diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index 9fbe05c093..0000000000 --- a/.coveragerc +++ /dev/null @@ -1,25 +0,0 @@ -[run] -branch = True -source = pymongo,bson,gridfs - -[paths] -source= - . - /data/mci/*/src - -[report] -exclude_lines = - if (.*and +)*_use_c( and.*)*: - def has_c - def get_version_string - ^except AttributeError: - except ImportError: - raise NotImplementedError - return NotImplemented - _use_c = True - if __name__ == '__main__': -partial_branches = - if (.*and +)*not _use_c( and.*)*: - -[html] -directory = htmlcov diff --git a/.evergreen/build-mac.sh b/.evergreen/build-mac.sh new file mode 100755 index 0000000000..4e8be8cf58 --- /dev/null +++ b/.evergreen/build-mac.sh @@ -0,0 +1,30 @@ +#!/bin/bash -ex + +# Get access to testinstall. +. .evergreen/utils.sh + +# Create temp directory for validated files. +rm -rf validdist +mkdir -p validdist +mv dist/* validdist || true + +VERSION=${VERSION:-3.10} + +PYTHON=/Library/Frameworks/Python.framework/Versions/$VERSION/bin/python3 +rm -rf build + +createvirtualenv $PYTHON releasevenv +python -m pip install build +python -m build --wheel . +deactivate || true +rm -rf releasevenv + +# Test that each wheel is installable. +for release in dist/*; do + testinstall $PYTHON $release + mv $release validdist/ +done + +mv validdist/* dist +rm -rf validdist +ls dist diff --git a/.evergreen/build-manylinux-internal.sh b/.evergreen/build-manylinux-internal.sh new file mode 100755 index 0000000000..9943a983c4 --- /dev/null +++ b/.evergreen/build-manylinux-internal.sh @@ -0,0 +1,42 @@ +#!/bin/bash -ex +cd /src + +# Get access to testinstall. +. .evergreen/utils.sh + +# Create temp directory for validated files. +rm -rf validdist +mkdir -p validdist +mv dist/* validdist || true + +# Compile wheels +for PYTHON in /opt/python/*/bin/python; do + if [[ ! $PYTHON =~ (cp37|cp38|cp39|cp310|cp311|cp312) ]]; then + continue + fi + # https://github.com/pypa/manylinux/issues/49 + rm -rf build + $PYTHON -m pip install build + $PYTHON -m build --wheel . + rm -rf build + + # Audit wheels and write manylinux tag + for whl in dist/*.whl; do + # Skip already built manylinux wheels. + if [[ "$whl" != *"manylinux"* ]]; then + auditwheel repair $whl -w dist + rm $whl + fi + done + + # Test that each wheel is installable. + # Test without virtualenv because it's not present on manylinux containers. + for release in dist/*; do + testinstall $PYTHON $release "without-virtualenv" + mv $release validdist/ + done +done + +mv validdist/* dist +rm -rf validdist +ls dist diff --git a/.evergreen/build-manylinux.sh b/.evergreen/build-manylinux.sh new file mode 100755 index 0000000000..11cf1dd231 --- /dev/null +++ b/.evergreen/build-manylinux.sh @@ -0,0 +1,51 @@ +#!/bin/bash -ex + +docker version + +# Set up qemu support using the method used in docker/setup-qemu-action +# https://github.com/docker/setup-qemu-action/blob/2b82ce82d56a2a04d2637cd93a637ae1b359c0a7/README.md?plain=1#L46 +docker run --rm --privileged tonistiigi/binfmt:latest --install all + +# manylinux1 2021-05-05-b64d921 and manylinux2014 2021-05-05-1ac6ef3 were +# the last releases to generate pip < 20.3 compatible wheels. After that +# auditwheel was upgraded to v4 which produces PEP 600 manylinux_x_y wheels +# which requires pip >= 20.3. We use the older docker image to support older +# pip versions. +BUILD_WITH_TAG="$1" +if [ -n "$BUILD_WITH_TAG" ]; then + images=(quay.io/pypa/manylinux1_x86_64:2021-05-05-b64d921 \ + quay.io/pypa/manylinux1_i686:2021-05-05-b64d921 \ + quay.io/pypa/manylinux2014_x86_64:2021-05-05-1ac6ef3 \ + quay.io/pypa/manylinux2014_i686:2021-05-05-1ac6ef3 \ + quay.io/pypa/manylinux2014_aarch64:2021-05-05-1ac6ef3 \ + quay.io/pypa/manylinux2014_ppc64le:2021-05-05-1ac6ef3 \ + quay.io/pypa/manylinux2014_s390x:2021-05-05-1ac6ef3) +else + images=(quay.io/pypa/manylinux1_x86_64 \ + quay.io/pypa/manylinux1_i686 \ + quay.io/pypa/manylinux2014_x86_64 \ + quay.io/pypa/manylinux2014_i686 \ + quay.io/pypa/manylinux2014_aarch64 \ + quay.io/pypa/manylinux2014_ppc64le \ + quay.io/pypa/manylinux2014_s390x) +fi + +for image in "${images[@]}"; do + docker pull $image + docker run --rm -v "`pwd`:/src" $image /src/.evergreen/build-manylinux-internal.sh +done + +ls dist + +# Check for any unexpected files. +unexpected=$(find dist \! \( -iname dist -or \ + -iname '*cp37*' -or \ + -iname '*cp38*' -or \ + -iname '*cp39*' -or \ + -iname '*cp310*' -or \ + -iname '*cp311*' -or \ + -iname '*cp312*' \)) +if [ -n "$unexpected" ]; then + echo "Unexpected files:" $unexpected + exit 1 +fi diff --git a/.evergreen/build-windows.sh b/.evergreen/build-windows.sh new file mode 100755 index 0000000000..a3ed0c2f19 --- /dev/null +++ b/.evergreen/build-windows.sh @@ -0,0 +1,29 @@ +#!/bin/bash -ex + +# Get access to testinstall. +. .evergreen/utils.sh + +# Create temp directory for validated files. +rm -rf validdist +mkdir -p validdist +mv dist/* validdist || true + +for VERSION in 37 38 39 310 311 312; do + _pythons=("C:/Python/Python${VERSION}/python.exe" \ + "C:/Python/32/Python${VERSION}/python.exe") + for PYTHON in "${_pythons[@]}"; do + rm -rf build + $PYTHON -m pip install build + $PYTHON -m build --wheel . + + # Test that each wheel is installable. + for release in dist/*; do + testinstall $PYTHON $release + mv $release validdist/ + done + done +done + +mv validdist/* dist +rm -rf validdist +ls dist diff --git a/.evergreen/check-c-extensions.sh b/.evergreen/check-c-extensions.sh new file mode 100755 index 0000000000..cb51ceed4a --- /dev/null +++ b/.evergreen/check-c-extensions.sh @@ -0,0 +1,11 @@ +#!/bin/bash +set -o errexit # Exit the script with error if any of the commands fail + +# Supported/used environment variables: +# C_EXTENSIONS Pass --no_ext to skip installing the C extensions. + +PYTHON_IMPL=$(python -c "import platform; print(platform.python_implementation())") +if [ -z "$C_EXTENSIONS" ] && [ "$PYTHON_IMPL" = "CPython" ]; then + PYMONGO_C_EXT_MUST_BUILD=1 python setup.py build_ext -i + python tools/fail_if_no_c.py +fi diff --git a/.evergreen/combine-coverage.sh b/.evergreen/combine-coverage.sh new file mode 100644 index 0000000000..f4aa3c29af --- /dev/null +++ b/.evergreen/combine-coverage.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# +# Coverage combine merges (and removes) all the coverage files and +# generates a new .coverage file in the current directory. + +set -o xtrace # Write all commands first to stderr +set -o errexit # Exit the script with error if any of the commands fail + +. .evergreen/utils.sh + +if [ -z "$PYTHON_BINARY" ]; then + PYTHON_BINARY=$(find_python3) +fi + +createvirtualenv "$PYTHON_BINARY" covenv +# coverage 7.3 dropped support for Python 3.7, keep in sync with run-tests.sh +# coverage >=5 is needed for relative_files=true. +pip install -q "coverage>=5,<7.3" + +pip list +ls -la coverage/ + +python -m coverage combine coverage/coverage.* +python -m coverage html -d htmlcov diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 4220e5d736..74b3dd0826 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -12,10 +12,11 @@ stepback: true # Actual testing tasks are marked with `type: test` command_type: system -# Protect ourself against rogue test case, or curl gone wild, that runs forever +# Protect ourselves against rogue test case, or curl gone wild, that runs forever # Good rule of thumb: the averageish length a task takes, times 5 # That roughly accounts for variable system performance for various buildvariants -exec_timeout_secs: 1800 # 30 minutes is the longest we'll ever run +exec_timeout_secs: 3600 # 60 minutes is the longest we'll ever run (primarily + # for macos hosts) # What to do when evergreen hits the timeout (`post:` tasks are run automatically) timeout: @@ -38,6 +39,7 @@ functions: params: working_dir: "src" script: | + set +x # Get the current unique version of this checkout if [ "${is_patch}" = "true" ]; then CURRENT_VERSION=$(git describe)-patch-${version_id} @@ -67,6 +69,7 @@ functions: PROJECT_DIRECTORY: "$PROJECT_DIRECTORY" PREPARE_SHELL: | set -o errexit + export SKIP_LEGACY_SHELL=1 export DRIVERS_TOOLS="$DRIVERS_TOOLS" export MONGO_ORCHESTRATION_HOME="$MONGO_ORCHESTRATION_HOME" export MONGODB_BINARIES="$MONGODB_BINARIES" @@ -76,9 +79,8 @@ functions: export TMPDIR="$MONGO_ORCHESTRATION_HOME/db" export PATH="$MONGODB_BINARIES:$PATH" export PROJECT="${project}" + export PIP_QUIET=1 EOT - # See what we've done - cat expansion.yml # Load the expansion file to make an evergreen variable with the current unique version - command: expansions.update @@ -89,14 +91,14 @@ functions: - command: shell.exec params: script: | - set -o xtrace ${PREPARE_SHELL} + set -o xtrace rm -rf $DRIVERS_TOOLS if [ "${project}" = "drivers-tools" ]; then # If this was a patch build, doing a fresh clone would not actually test the patch cp -R ${PROJECT_DIRECTORY}/ $DRIVERS_TOOLS else - git clone git://github.com/mongodb-labs/drivers-evergreen-tools.git $DRIVERS_TOOLS + git clone https://github.com/mongodb-labs/drivers-evergreen-tools.git $DRIVERS_TOOLS fi echo "{ \"releases\": { \"default\": \"$MONGODB_BINARIES\" }}" > $MONGO_ORCHESTRATION_HOME/orchestration.config @@ -129,13 +131,8 @@ functions: params: working_dir: "src" script: | - set -o xtrace ${PREPARE_SHELL} - # Coverage combine merges (and removes) all the coverage files and - # generates a new .coverage file in the current directory. - ls -la coverage/ - /opt/python/3.6/bin/python3 -m coverage combine coverage/coverage.* - /opt/python/3.6/bin/python3 -m coverage html -d htmlcov + bash .evergreen/combine-coverage.sh # Upload the resulting html coverage report. - command: shell.exec params: @@ -162,9 +159,12 @@ functions: - command: shell.exec params: script: | - set -o xtrace ${PREPARE_SHELL} - find $MONGO_ORCHESTRATION_HOME -name \*.log | xargs tar czf mongodb-logs.tar.gz + set -o xtrace + mkdir out_dir + find $MONGO_ORCHESTRATION_HOME -name \*.log -exec sh -c 'x="{}"; mv $x $PWD/out_dir/$(basename $(dirname $x))_$(basename $x)' \; + tar zcvf mongodb-logs.tar.gz -C out_dir/ . + rm -rf out_dir - command: archive.targz_pack params: target: "mongo-coredumps.tgz" @@ -253,6 +253,7 @@ functions: - command: shell.exec params: script: | + ${PREPARE_SHELL} set -o xtrace # Enable core dumps if enabled on the machine @@ -274,7 +275,7 @@ functions: fi fi - if [ $(uname -s) == "Darwin" ]; then + if [ $(uname -s) = "Darwin" ]; then core_pattern_mac=$(/usr/sbin/sysctl -n "kern.corefile") if [ "$core_pattern_mac" = "dump_%N.%P.core" ]; then echo "Enabling coredumps" @@ -282,14 +283,20 @@ functions: fi fi - ${PREPARE_SHELL} + if [ -n "${skip_crypt_shared}" ]; then + export SKIP_CRYPT_SHARED=1 + fi + MONGODB_VERSION=${VERSION} \ TOPOLOGY=${TOPOLOGY} \ AUTH=${AUTH} \ SSL=${SSL} \ STORAGE_ENGINE=${STORAGE_ENGINE} \ DISABLE_TEST_COMMANDS=${DISABLE_TEST_COMMANDS} \ - sh ${DRIVERS_TOOLS}/.evergreen/run-orchestration.sh + ORCHESTRATION_FILE=${ORCHESTRATION_FILE} \ + REQUIRE_API_VERSION=${REQUIRE_API_VERSION} \ + LOAD_BALANCER=${LOAD_BALANCER} \ + bash ${DRIVERS_TOOLS}/.evergreen/run-orchestration.sh # run-orchestration generates expansion file with the MONGODB_URI for the cluster - command: expansions.update params: @@ -300,13 +307,32 @@ functions: - key: MONGODB_STARTED value: "1" - "stop mongo-orchestration": + "bootstrap data lake": + - command: shell.exec + type: setup + params: + script: | + ${PREPARE_SHELL} + set -o xtrace + # The mongohouse build script needs to be passed the VARIANT variable, see + # https://github.com/10gen/mongohouse/blob/973cc11/evergreen.yaml#L65 + VARIANT=rhel84-small bash ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake/build-mongohouse-local.sh - command: shell.exec + type: setup params: + background: true script: | + ${PREPARE_SHELL} set -o xtrace + bash ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake/run-mongohouse-local.sh + + "stop mongo-orchestration": + - command: shell.exec + params: + script: | ${PREPARE_SHELL} - sh ${DRIVERS_TOOLS}/.evergreen/stop-orchestration.sh + set -o xtrace + bash ${DRIVERS_TOOLS}/.evergreen/stop-orchestration.sh "run mod_wsgi tests": - command: shell.exec @@ -314,9 +340,11 @@ functions: params: working_dir: "src" script: | - set -o xtrace ${PREPARE_SHELL} - PYTHON_BINARY=${PYTHON_BINARY} MOD_WSGI_VERSION=${MOD_WSGI_VERSION} PROJECT_DIRECTORY=${PROJECT_DIRECTORY} sh ${PROJECT_DIRECTORY}/.evergreen/run-mod-wsgi-tests.sh + set -o xtrace + PYTHON_BINARY=${PYTHON_BINARY} MOD_WSGI_VERSION=${MOD_WSGI_VERSION} \ + MOD_WSGI_EMBEDDED=${MOD_WSGI_EMBEDDED} PROJECT_DIRECTORY=${PROJECT_DIRECTORY} \ + bash ${PROJECT_DIRECTORY}/.evergreen/run-mod-wsgi-tests.sh "run mockupdb tests": - command: shell.exec @@ -324,19 +352,10 @@ functions: params: working_dir: "src" script: | - set -o xtrace ${PREPARE_SHELL} - PYTHON_BINARY=${PYTHON_BINARY} PROJECT_DIRECTORY=${PROJECT_DIRECTORY} sh ${PROJECT_DIRECTORY}/.evergreen/run-mockupdb-tests.sh - - "run cdecimal tests": - - command: shell.exec - type: test - params: - working_dir: "src" - script: | - set -o xtrace - ${PREPARE_SHELL} - PYTHON_BINARY=${PYTHON_BINARY} sh ${PROJECT_DIRECTORY}/.evergreen/run-cdecimal-tests.sh + set -o xtrace + export PYTHON_BINARY=${PYTHON_BINARY} + bash ${PROJECT_DIRECTORY}/.evergreen/tox.sh -m test-mockupdb "run doctests": - command: shell.exec @@ -344,11 +363,54 @@ functions: params: working_dir: "src" script: | - set -o xtrace ${PREPARE_SHELL} - PYTHON_BINARY=${PYTHON_BINARY} sh ${PROJECT_DIRECTORY}/.evergreen/run-doctests.sh + set -o xtrace + PYTHON_BINARY=${PYTHON_BINARY} bash ${PROJECT_DIRECTORY}/.evergreen/tox.sh -m doc-test "run tests": + # If testing FLE, start the KMS mock servers, first create the virtualenv. + - command: shell.exec + params: + script: | + if [ -n "${test_encryption}" ]; then + ${PREPARE_SHELL} + cd ${DRIVERS_TOOLS}/.evergreen/csfle + . ./activate-kmstlsvenv.sh + fi + # Run in the background so the mock servers don't block the EVG task. + - command: shell.exec + params: + background: true + script: | + if [ -n "${test_encryption}" ]; then + ${PREPARE_SHELL} + cd ${DRIVERS_TOOLS}/.evergreen/csfle + . ./activate-kmstlsvenv.sh + # The -u options forces the stdout and stderr streams to be unbuffered. + # TMPDIR is required to avoid "AF_UNIX path too long" errors. + TMPDIR="$(dirname $DRIVERS_TOOLS)" python -u kms_kmip_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/server.pem --port 5698 & + python -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/expired.pem --port 8000 & + python -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/wrong-host.pem --port 8001 & + python -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/server.pem --port 8002 --require_client_cert & + fi + # Wait up to 10 seconds for the KMIP server to start. + - command: shell.exec + params: + script: | + if [ -n "${test_encryption}" ]; then + ${PREPARE_SHELL} + cd ${DRIVERS_TOOLS}/.evergreen/csfle + . ./activate-kmstlsvenv.sh + for i in $(seq 1 1 10); do + sleep 1 + if python -u kms_kmip_client.py; then + echo 'KMS KMIP server started!' + exit 0 + fi + done + echo 'Failed to start KMIP server!' + exit 1 + fi - command: shell.exec type: test params: @@ -356,9 +418,18 @@ functions: working_dir: "src" script: | if [ -n "${test_encryption}" ]; then - cat < fle_aws_creds.sh + cat < fle_creds.sh export FLE_AWS_KEY="${fle_aws_key}" export FLE_AWS_SECRET="${fle_aws_secret}" + export FLE_AZURE_CLIENTID="${fle_azure_clientid}" + export FLE_AZURE_TENANTID="${fle_azure_tenantid}" + export FLE_AZURE_CLIENTSECRET="${fle_azure_clientsecret}" + export FLE_GCP_EMAIL="${fle_gcp_email}" + export FLE_GCP_PRIVATEKEY="${fle_gcp_privatekey}" + # Needed for generating temporary aws credentials. + export AWS_ACCESS_KEY_ID="${fle_aws_key}" + export AWS_SECRET_ACCESS_KEY="${fle_aws_secret}" + export AWS_DEFAULT_REGION=us-east-1 EOT fi - command: shell.exec @@ -366,13 +437,11 @@ functions: params: working_dir: "src" script: | - if [ -n "${set_xtrace_on}" ]; then - set -o xtrace - export SET_XTRACE_ON="${set_xtrace_on}" - fi + # Disable xtrace + set +x ${PREPARE_SHELL} if [ -n "${MONGODB_STARTED}" ]; then - export PYMONGO_MUST_CONNECT=1 + export PYMONGO_MUST_CONNECT=true fi if [ -n "${DISABLE_TEST_COMMANDS}" ]; then export PYMONGO_DISABLE_TEST_COMMANDS=1 @@ -380,57 +449,230 @@ functions: if [ -n "${test_encryption}" ]; then # Disable xtrace (just in case it was accidentally set). set +x - . ./fle_aws_creds.sh - rm -f ./fle_aws_creds.sh + . ./fle_creds.sh + rm -f ./fle_creds.sh export LIBMONGOCRYPT_URL="${libmongocrypt_url}" export TEST_ENCRYPTION=1 + if [ -n "${test_encryption_pyopenssl}" ]; then + export TEST_ENCRYPTION_PYOPENSSL=1 + fi + fi + if [ -n "${test_crypt_shared}" ]; then + export TEST_CRYPT_SHARED=1 + export CRYPT_SHARED_LIB_PATH=${CRYPT_SHARED_LIB_PATH} + fi + if [ -n "${test_pyopenssl}" ]; then + export TEST_PYOPENSSL=1 fi if [ -n "${SETDEFAULTENCODING}" ]; then export SETDEFAULTENCODING="${SETDEFAULTENCODING}" fi + if [ -n "${test_loadbalancer}" ]; then + export TEST_LOADBALANCER=1 + export SINGLE_MONGOS_LB_URI="${SINGLE_MONGOS_LB_URI}" + export MULTI_MONGOS_LB_URI="${MULTI_MONGOS_LB_URI}" + fi + if [ -n "${test_serverless}" ]; then + export TEST_SERVERLESS=1 + export SERVERLESS_ATLAS_USER="${SERVERLESS_ATLAS_USER}" + export SERVERLESS_ATLAS_PASSWORD="${SERVERLESS_ATLAS_PASSWORD}" + export MONGODB_URI="${SERVERLESS_URI}" + export SINGLE_MONGOS_LB_URI="${MONGODB_URI}" + export MULTI_MONGOS_LB_URI="${MONGODB_URI}" + fi + if [ -n "${TEST_INDEX_MANAGEMENT}" ]; then + export TEST_INDEX_MANAGEMENT=1 + export MONGODB_URI="${TEST_INDEX_URI}" + export DB_USER="${DRIVERS_ATLAS_LAMBDA_USER}" + export DB_PASSWORD="${DRIVERS_ATLAS_LAMBDA_PASSWORD}" + fi - PYTHON_BINARY=${PYTHON_BINARY} \ - GREEN_FRAMEWORK=${GREEN_FRAMEWORK} \ + GREEN_FRAMEWORK=${GREEN_FRAMEWORK} \ + PYTHON_BINARY=${PYTHON_BINARY} \ C_EXTENSIONS=${C_EXTENSIONS} \ COVERAGE=${COVERAGE} \ COMPRESSORS=${COMPRESSORS} \ AUTH=${AUTH} \ SSL=${SSL} \ - sh ${PROJECT_DIRECTORY}/.evergreen/run-tests.sh + TEST_DATA_LAKE=${TEST_DATA_LAKE} \ + MONGODB_API_VERSION=${MONGODB_API_VERSION} \ + bash ${PROJECT_DIRECTORY}/.evergreen/tox.sh -m test-eg "run enterprise auth tests": + - command: ec2.assume_role + params: + role_arn: ${aws_test_secrets_role} - command: shell.exec type: test params: - silent: true working_dir: "src" script: | - # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) - PYTHON_BINARY=${PYTHON_BINARY} SASL_HOST=${sasl_host} SASL_PORT=${sasl_port} SASL_USER=${sasl_user} SASL_PASS=${sasl_pass} SASL_DB=${sasl_db} PRINCIPAL=${principal} GSSAPI_DB=${gssapi_db} KEYTAB_BASE64=${keytab_base64} PROJECT_DIRECTORY=${PROJECT_DIRECTORY} sh ${PROJECT_DIRECTORY}/.evergreen/run-enterprise-auth-tests.sh + # Disable xtrace for security reasons (just in case it was accidentally set). + set +x + + DRIVERS_TOOLS="${DRIVERS_TOOLS}" \ + AWS_ACCESS_KEY_ID="${AWS_ACCESS_KEY_ID}" \ + AWS_SECRET_ACCESS_KEY="${AWS_SECRET_ACCESS_KEY}" \ + AWS_SESSION_TOKEN="${AWS_SESSION_TOKEN}" \ + bash ${PROJECT_DIRECTORY}/.evergreen/tox.sh -m aws-secrets -- drivers/enterprise_auth + + PROJECT_DIRECTORY="${PROJECT_DIRECTORY}" \ + PYTHON_BINARY="${PYTHON_BINARY}" \ + TEST_ENTERPRISE_AUTH=1 \ + AUTH=auth \ + bash ${PROJECT_DIRECTORY}/.evergreen/tox.sh -m test-eg "run atlas tests": + - command: ec2.assume_role + params: + role_arn: ${aws_test_secrets_role} - command: shell.exec type: test params: - silent: true + include_expansions_in_env: ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"] + working_dir: "src" + script: | + # Disable xtrace for security reasons (just in case it was accidentally set). + set +x + set -o errexit + + DRIVERS_TOOLS="${DRIVERS_TOOLS}" \ + AWS_ACCESS_KEY_ID="${AWS_ACCESS_KEY_ID}" \ + AWS_SECRET_ACCESS_KEY="${AWS_SECRET_ACCESS_KEY}" \ + AWS_SESSION_TOKEN="${AWS_SESSION_TOKEN}" \ + bash ${PROJECT_DIRECTORY}/.evergreen/tox.sh -m aws-secrets -- drivers/atlas_connect + + PROJECT_DIRECTORY="${PROJECT_DIRECTORY}" \ + PYTHON_BINARY="${PYTHON_BINARY}" \ + TEST_ATLAS=1 \ + bash ${PROJECT_DIRECTORY}/.evergreen/tox.sh -m test-eg + + "get aws auth secrets": + - command: ec2.assume_role + params: + role_arn: ${aws_test_secrets_role} + - command: shell.exec + type: test + params: + add_expansions_to_env: true + working_dir: "src" + script: | + ${PREPARE_SHELL} + cd $DRIVERS_TOOLS/.evergreen/auth_aws + ./setup_secrets.sh drivers/aws_auth + + "run aws auth test with regular aws credentials": + - command: shell.exec + type: test + params: + shell: "bash" + working_dir: "src" + script: | + ${PREPARE_SHELL} + .evergreen/run-mongodb-aws-test.sh regular + + "run aws auth test with assume role credentials": + - command: shell.exec + type: test + params: + shell: "bash" + working_dir: "src" + script: | + ${PREPARE_SHELL} + .evergreen/run-mongodb-aws-test.sh assume-role + + "run aws auth test with aws EC2 credentials": + - command: shell.exec + type: test + params: + working_dir: "src" + shell: "bash" + script: | + if [ "${skip_EC2_auth_test}" = "true" ]; then + echo "This platform does not support the EC2 auth test, skipping..." + exit 0 + fi + ${PREPARE_SHELL} + .evergreen/run-mongodb-aws-test.sh ec2 + + "run aws auth test with aws web identity credentials": + - command: shell.exec + type: test + params: + working_dir: "src" + shell: "bash" + script: | + if [ "${skip_EC2_auth_test}" = "true" ]; then + echo "This platform does not support the web identity auth test, skipping..." + exit 0 + fi + ${PREPARE_SHELL} + # Test with and without AWS_ROLE_SESSION_NAME set. + .evergreen/run-mongodb-aws-test.sh web-identity + AWS_ROLE_SESSION_NAME="test" \ + .evergreen/run-mongodb-aws-test.sh web-identity + + "run oidc auth test with aws credentials": + - command: ec2.assume_role + params: + role_arn: ${aws_test_secrets_role} + - command: shell.exec + type: test + params: + working_dir: "src" + shell: bash + include_expansions_in_env: ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"] + script: | + ${PREPARE_SHELL} + bash .evergreen/run-mongodb-oidc-test.sh + + "run aws auth test with aws credentials as environment variables": + - command: shell.exec + type: test + params: + working_dir: "src" + shell: bash + script: | + ${PREPARE_SHELL} + .evergreen/run-mongodb-aws-test.sh env-creds + + "run aws auth test with aws credentials and session token as environment variables": + - command: shell.exec + type: test + params: + working_dir: "src" + shell: bash + script: | + ${PREPARE_SHELL} + .evergreen/run-mongodb-aws-test.sh session-creds + + "run aws ECS auth test": + - command: shell.exec + type: test + params: + shell: "bash" working_dir: "src" script: | - # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) - PYTHON_BINARY=${PYTHON_BINARY} ATLAS_REPL='${atlas_repl}' ATLAS_SHRD='${atlas_shrd}' ATLAS_FREE='${atlas_free}' ATLAS_TLS11='${atlas_tls11}' ATLAS_TLS12='${atlas_tls12}' sh ${PROJECT_DIRECTORY}/.evergreen/run-atlas-tests.sh + if [ "${skip_ECS_auth_test}" = "true" ]; then + echo "This platform does not support the ECS auth test, skipping..." + exit 0 + fi + ${PREPARE_SHELL} + .evergreen/run-mongodb-aws-test.sh session-creds "cleanup": - command: shell.exec params: script: | - set -o xtrace ${PREPARE_SHELL} rm -rf $DRIVERS_TOOLS || true + rm -f ./secrets-export.sh || true "fix absolute paths": - command: shell.exec params: script: | - set -o xtrace + set +x ${PREPARE_SHELL} for filename in $(find ${DRIVERS_TOOLS} -name \*.json); do perl -p -i -e "s|ABSOLUTE_PATH_REPLACEMENT_TOKEN|${DRIVERS_TOOLS}|g" $filename @@ -440,7 +682,7 @@ functions: - command: shell.exec params: script: | - set -o xtrace + set +x ${PREPARE_SHELL} for i in $(find ${DRIVERS_TOOLS}/.evergreen ${PROJECT_DIRECTORY}/.evergreen -name \*.sh); do cat $i | tr -d '\r' > $i.new @@ -453,7 +695,7 @@ functions: - command: shell.exec params: script: | - set -o xtrace + set +x ${PREPARE_SHELL} for i in $(find ${DRIVERS_TOOLS}/.evergreen ${PROJECT_DIRECTORY}/.evergreen -name \*.sh); do chmod +x $i @@ -463,7 +705,7 @@ functions: - command: shell.exec params: script: | - set -o xtrace + set +x ${PREPARE_SHELL} echo '{"results": [{ "status": "FAIL", "test_file": "Build", "log_raw": "No test-results.json found was created" } ]}' > ${PROJECT_DIRECTORY}/test-results.json @@ -472,11 +714,199 @@ functions: params: working_dir: "src" script: | - set -o xtrace ${PREPARE_SHELL} + set -o xtrace file="${PROJECT_DIRECTORY}/.evergreen/install-dependencies.sh" # Don't use ${file} syntax here because evergreen treats it as an empty expansion. - [ -f "$file" ] && sh $file || echo "$file not available, skipping" + [ -f "$file" ] && bash $file || echo "$file not available, skipping" + + "run-ocsp-test": + - command: shell.exec + type: test + params: + working_dir: "src" + script: | + ${PREPARE_SHELL} + TEST_OCSP=1 \ + PYTHON_BINARY=${PYTHON_BINARY} \ + CA_FILE="$DRIVERS_TOOLS/.evergreen/ocsp/${OCSP_ALGORITHM}/ca.pem" \ + OCSP_TLS_SHOULD_SUCCEED="${OCSP_TLS_SHOULD_SUCCEED}" \ + bash ${PROJECT_DIRECTORY}/.evergreen/tox.sh -m test-eg + + run-valid-ocsp-server: + - command: shell.exec + params: + background: true + script: | + ${PREPARE_SHELL} + cd ${DRIVERS_TOOLS}/.evergreen/ocsp + . ./activate-ocspvenv.sh + python ocsp_mock.py \ + --ca_file ${OCSP_ALGORITHM}/ca.pem \ + --ocsp_responder_cert ${OCSP_ALGORITHM}/ca.crt \ + --ocsp_responder_key ${OCSP_ALGORITHM}/ca.key \ + -p 8100 -v + run-revoked-ocsp-server: + - command: shell.exec + params: + background: true + script: | + ${PREPARE_SHELL} + cd ${DRIVERS_TOOLS}/.evergreen/ocsp + . ./activate-ocspvenv.sh + python ocsp_mock.py \ + --ca_file ${OCSP_ALGORITHM}/ca.pem \ + --ocsp_responder_cert ${OCSP_ALGORITHM}/ca.crt \ + --ocsp_responder_key ${OCSP_ALGORITHM}/ca.key \ + -p 8100 \ + -v \ + --fault revoked + run-valid-delegate-ocsp-server: + - command: shell.exec + params: + background: true + script: | + ${PREPARE_SHELL} + cd ${DRIVERS_TOOLS}/.evergreen/ocsp + . ./activate-ocspvenv.sh + python ocsp_mock.py \ + --ca_file ${OCSP_ALGORITHM}/ca.pem \ + --ocsp_responder_cert ${OCSP_ALGORITHM}/ocsp-responder.crt \ + --ocsp_responder_key ${OCSP_ALGORITHM}/ocsp-responder.key \ + -p 8100 -v + run-revoked-delegate-ocsp-server: + - command: shell.exec + params: + background: true + script: | + ${PREPARE_SHELL} + cd ${DRIVERS_TOOLS}/.evergreen/ocsp + . ./activate-ocspvenv.sh + python ocsp_mock.py \ + --ca_file ${OCSP_ALGORITHM}/ca.pem \ + --ocsp_responder_cert ${OCSP_ALGORITHM}/ocsp-responder.crt \ + --ocsp_responder_key ${OCSP_ALGORITHM}/ocsp-responder.key \ + -p 8100 \ + -v \ + --fault revoked + + "run load-balancer": + - command: shell.exec + params: + script: | + DRIVERS_TOOLS=${DRIVERS_TOOLS} MONGODB_URI=${MONGODB_URI} bash ${DRIVERS_TOOLS}/.evergreen/run-load-balancer.sh start + - command: expansions.update + params: + file: lb-expansion.yml + + "stop load-balancer": + - command: shell.exec + params: + script: | + cd ${DRIVERS_TOOLS}/.evergreen + DRIVERS_TOOLS=${DRIVERS_TOOLS} bash ${DRIVERS_TOOLS}/.evergreen/run-load-balancer.sh stop + + "teardown_docker": + - command: shell.exec + params: + script: | + # Remove all Docker images + DOCKER=$(command -v docker) || true + if [ -n "$DOCKER" ]; then + docker rmi -f $(docker images -a -q) &> /dev/null || true + fi + + "teardown_aws": + - command: shell.exec + params: + shell: "bash" + script: | + ${PREPARE_SHELL} + cd "${DRIVERS_TOOLS}/.evergreen/auth_aws" + if [ -f "./aws_e2e_setup.json" ]; then + . ./activate-authawsvenv.sh + python ./lib/aws_assign_instance_profile.py + fi + + "build release": + - command: shell.exec + type: test + params: + working_dir: "src" + script: | + ${PREPARE_SHELL} + set -o xtrace + VERSION=${VERSION} ENSURE_UNIVERSAL2=${ENSURE_UNIVERSAL2} .evergreen/release.sh + + "upload release": + - command: archive.targz_pack + params: + target: "release-files.tgz" + source_dir: "src/dist" + include: + - "*" + - command: s3.put + params: + aws_key: ${aws_key} + aws_secret: ${aws_secret} + local_file: release-files.tgz + remote_file: ${UPLOAD_BUCKET}/release/${revision}/${task_id}-${execution}-release-files.tar.gz + bucket: mciuploads + permissions: public-read + content_type: ${content_type|application/gzip} + display_name: Release files + + "download and merge releases": + - command: shell.exec + params: + silent: true + script: | + export AWS_ACCESS_KEY_ID=${aws_key} + export AWS_SECRET_ACCESS_KEY=${aws_secret} + + # Download all the task coverage files. + aws s3 cp --recursive s3://mciuploads/${UPLOAD_BUCKET}/release/${revision}/ release/ + - command: shell.exec + params: + shell: "bash" + script: | + ${PREPARE_SHELL} + set -o xtrace + # Combine releases into one directory. + ls -la release/ + mkdir releases + # Copy old manylinux release first since we want the newer manylinux + # wheels to override them. + mkdir old_manylinux + if mv release/*old_manylinux* old_manylinux; then + for REL in old_manylinux/*; do + tar zxvf $REL -C releases/ + done + fi + for REL in release/*; do + tar zxvf $REL -C releases/ + done + # Build source distribution. + cd src/ + /opt/python/3.7/bin/python3 -m pip install build + /opt/python/3.7/bin/python3 -m build --sdist . + cp dist/* ../releases + - command: archive.targz_pack + params: + target: "release-files-all.tgz" + source_dir: "releases/" + include: + - "*" + - command: s3.put + params: + aws_key: ${aws_key} + aws_secret: ${aws_secret} + local_file: release-files-all.tgz + remote_file: ${UPLOAD_BUCKET}/release-all/${revision}/${task_id}-${execution}-release-files-all.tar.gz + bucket: mciuploads + permissions: public-read + content_type: ${content_type|application/gzip} + display_name: Release files all pre: - func: "fetch source" @@ -494,11 +924,194 @@ post: - func: "upload mo artifacts" - func: "upload test results" - func: "stop mongo-orchestration" + - func: "teardown_aws" - func: "cleanup" + - func: "teardown_docker" + +task_groups: + - name: serverless_task_group + setup_group_can_fail_task: true + setup_group_timeout_secs: 1800 # 30 minutes + setup_group: + - func: "fetch source" + - func: "prepare resources" + - command: shell.exec + params: + shell: "bash" + script: | + ${PREPARE_SHELL} + set +o xtrace + LOADBALANCED=ON \ + SERVERLESS_DRIVERS_GROUP=${SERVERLESS_DRIVERS_GROUP} \ + SERVERLESS_API_PUBLIC_KEY=${SERVERLESS_API_PUBLIC_KEY} \ + SERVERLESS_API_PRIVATE_KEY=${SERVERLESS_API_PRIVATE_KEY} \ + bash ${DRIVERS_TOOLS}/.evergreen/serverless/create-instance.sh + - command: expansions.update + params: + file: serverless-expansion.yml + teardown_group: + - command: shell.exec + params: + script: | + ${PREPARE_SHELL} + set +o xtrace + SERVERLESS_DRIVERS_GROUP=${SERVERLESS_DRIVERS_GROUP} \ + SERVERLESS_API_PUBLIC_KEY=${SERVERLESS_API_PUBLIC_KEY} \ + SERVERLESS_API_PRIVATE_KEY=${SERVERLESS_API_PRIVATE_KEY} \ + SERVERLESS_INSTANCE_NAME=${SERVERLESS_INSTANCE_NAME} \ + bash ${DRIVERS_TOOLS}/.evergreen/serverless/delete-instance.sh + - func: "upload test results" + tasks: + - ".serverless" + + - name: testgcpkms_task_group + setup_group_can_fail_task: true + setup_group_timeout_secs: 1800 # 30 minutes + setup_group: + - func: fetch source + - func: prepare resources + - func: fix absolute paths + - func: make files executable + - command: shell.exec + params: + shell: "bash" + script: | + ${PREPARE_SHELL} + echo '${testgcpkms_key_file}' > /tmp/testgcpkms_key_file.json + export GCPKMS_KEYFILE=/tmp/testgcpkms_key_file.json + export GCPKMS_DRIVERS_TOOLS=$DRIVERS_TOOLS + export GCPKMS_SERVICEACCOUNT="${testgcpkms_service_account}" + export GCPKMS_MACHINETYPE="e2-standard-4" + $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/create-and-setup-instance.sh + # Load the GCPKMS_GCLOUD, GCPKMS_INSTANCE, GCPKMS_REGION, and GCPKMS_ZONE expansions. + - command: expansions.update + params: + file: testgcpkms-expansions.yml + teardown_group: + - command: shell.exec + params: + shell: "bash" + script: | + ${PREPARE_SHELL} + export GCPKMS_GCLOUD=${GCPKMS_GCLOUD} + export GCPKMS_PROJECT=${GCPKMS_PROJECT} + export GCPKMS_ZONE=${GCPKMS_ZONE} + export GCPKMS_INSTANCENAME=${GCPKMS_INSTANCENAME} + $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/delete-instance.sh + - func: "upload test results" + tasks: + - testgcpkms-task + + - name: testazurekms_task_group + setup_group: + - func: fetch source + - func: prepare resources + - func: fix absolute paths + - func: make files executable + - command: shell.exec + params: + shell: bash + script: |- + ${PREPARE_SHELL} + # Get azurekms credentials from the vault. + bash $DRIVERS_TOOLS/.evergreen/auth_aws/setup_secrets.sh drivers/azurekms + source ./secrets-export.sh + export AZUREKMS_VMNAME_PREFIX="PYTHON_DRIVER" + export AZUREKMS_DRIVERS_TOOLS="$DRIVERS_TOOLS" + $DRIVERS_TOOLS/.evergreen/csfle/azurekms/create-and-setup-vm.sh + - command: expansions.update + params: + file: testazurekms-expansions.yml + teardown_group: + # Load expansions again. The setup task may have failed before running `expansions.update`. + - command: expansions.update + params: + file: testazurekms-expansions.yml + - command: shell.exec + params: + shell: bash + script: |- + ${PREPARE_SHELL} + set -x + export AZUREKMS_VMNAME=${AZUREKMS_VMNAME} + export AZUREKMS_RESOURCEGROUP=${AZUREKMS_RESOURCEGROUP} + export AZUREKMS_SCOPE=${AZUREKMS_SCOPE} + $DRIVERS_TOOLS/.evergreen/csfle/azurekms/delete-vm.sh + - func: "upload test results" + setup_group_can_fail_task: true + teardown_group_can_fail_task: true + setup_group_timeout_secs: 1800 + tasks: + - testazurekms-task + + - name: test_aws_lambda_task_group + setup_group: + - func: fetch source + - func: prepare resources + - command: subprocess.exec + params: + working_dir: src + binary: bash + add_expansions_to_env: true + args: + - ${DRIVERS_TOOLS}/.evergreen/atlas/setup-atlas-cluster.sh + - command: expansions.update + params: + file: src/atlas-expansion.yml + teardown_task: + - command: subprocess.exec + params: + working_dir: src + binary: bash + add_expansions_to_env: true + args: + - ${DRIVERS_TOOLS}/.evergreen/atlas/teardown-atlas-cluster.sh + setup_group_can_fail_task: true + setup_group_timeout_secs: 1800 + tasks: + - test-aws-lambda-deployed + + - name: test_atlas_task_group_search_indexes + setup_group: + - func: fetch source + - func: prepare resources + - func: fix absolute paths + - func: make files executable + - command: subprocess.exec + params: + working_dir: src + binary: bash + add_expansions_to_env: true + env: + MONGODB_VERSION: "7.0" + args: + - ${DRIVERS_TOOLS}/.evergreen/atlas/setup-atlas-cluster.sh + - command: expansions.update + params: + file: src/atlas-expansion.yml + - command: shell.exec + params: + working_dir: src + shell: bash + script: |- + echo "TEST_INDEX_URI: ${MONGODB_URI}" > atlas-expansion.yml + - command: expansions.update + params: + file: src/atlas-expansion.yml + teardown_task: + - command: subprocess.exec + params: + working_dir: src + binary: bash + add_expansions_to_env: true + args: + - ${DRIVERS_TOOLS}/.evergreen/atlas/teardown-atlas-cluster.sh + setup_group_can_fail_task: true + setup_group_timeout_secs: 1800 + tasks: + - test-search-index-helpers tasks: - - # Wildcard task. Do you need to find out what tools are available and where? # Throw it here, and execute this task on all buildvariants - name: getdata @@ -528,6 +1141,81 @@ tasks: genhtml --version || true valgrind --version || true + - name: "release-mac-1100" + tags: ["release_tag"] + run_on: macos-1100 + commands: + - func: "build release" + vars: + VERSION: "3.12" + ENSURE_UNIVERSAL2: "1" + - func: "build release" + vars: + VERSION: "3.11" + ENSURE_UNIVERSAL2: "1" + - func: "build release" + vars: + VERSION: "3.10" + ENSURE_UNIVERSAL2: "1" + - func: "build release" + vars: + VERSION: "3.9" + ENSURE_UNIVERSAL2: "1" + - func: "upload release" + - func: "build release" + vars: + VERSION: "3.8" + - func: "upload release" + + - name: "release-mac-1014" + tags: ["release_tag"] + run_on: macos-1014 + commands: + - func: "build release" + vars: + VERSION: "3.7" + - func: "upload release" + + - name: "release-windows" + tags: ["release_tag"] + run_on: windows-64-vsMulti-small + commands: + - func: "build release" + - func: "upload release" + + - name: "release-manylinux" + tags: ["release_tag"] + run_on: ubuntu2204-large + exec_timeout_secs: 216000 # 60 minutes (manylinux task is slow). + commands: + - func: "build release" + - func: "upload release" + + - name: "release-old-manylinux" + tags: ["release_tag"] + run_on: ubuntu2204-large + exec_timeout_secs: 216000 # 60 minutes (manylinux task is slow). + commands: + - command: shell.exec + type: test + params: + working_dir: "src" + script: | + ${PREPARE_SHELL} + set -o xtrace + .evergreen/build-manylinux.sh BUILD_WITH_TAG + - func: "upload release" + + - name: "release-combine" + tags: ["release_tag"] + run_on: rhel84-small + depends_on: + - name: "*" + variant: ".release_tag" + patch_optional: true + commands: + - func: "download and merge releases" + # Standard test tasks {{{ - name: "mockupdb" @@ -544,192 +1232,192 @@ tasks: TOPOLOGY: "server" - func: "run doctests" - - name: "test-2.6-standalone" - tags: ["2.6", "standalone"] + - name: "test-3.6-standalone" + tags: ["3.6", "standalone"] commands: - func: "bootstrap mongo-orchestration" vars: - VERSION: "2.6" + VERSION: "3.6" TOPOLOGY: "server" - func: "run tests" - - name: "test-2.6-replica_set" - tags: ["2.6", "replica_set"] + - name: "test-3.6-replica_set" + tags: ["3.6", "replica_set"] commands: - func: "bootstrap mongo-orchestration" vars: - VERSION: "2.6" + VERSION: "3.6" TOPOLOGY: "replica_set" - func: "run tests" - - name: "test-2.6-sharded_cluster" - tags: ["2.6", "sharded_cluster"] + - name: "test-3.6-sharded_cluster" + tags: ["3.6", "sharded_cluster"] commands: - func: "bootstrap mongo-orchestration" vars: - VERSION: "2.6" + VERSION: "3.6" TOPOLOGY: "sharded_cluster" - func: "run tests" - - name: "test-3.0-standalone" - tags: ["3.0", "standalone"] + - name: "test-4.0-standalone" + tags: ["4.0", "standalone"] commands: - func: "bootstrap mongo-orchestration" vars: - VERSION: "3.0" + VERSION: "4.0" TOPOLOGY: "server" - func: "run tests" - - name: "test-3.0-replica_set" - tags: ["3.0", "replica_set"] + - name: "test-4.0-replica_set" + tags: ["4.0", "replica_set"] commands: - func: "bootstrap mongo-orchestration" vars: - VERSION: "3.0" + VERSION: "4.0" TOPOLOGY: "replica_set" - func: "run tests" - - name: "test-3.0-sharded_cluster" - tags: ["3.0", "sharded_cluster"] + - name: "test-4.0-sharded_cluster" + tags: ["4.0", "sharded_cluster"] commands: - func: "bootstrap mongo-orchestration" vars: - VERSION: "3.0" + VERSION: "4.0" TOPOLOGY: "sharded_cluster" - func: "run tests" - - name: "test-3.2-standalone" - tags: ["3.2", "standalone"] + - name: "test-4.2-standalone" + tags: ["4.2", "standalone"] commands: - func: "bootstrap mongo-orchestration" vars: - VERSION: "3.2" + VERSION: "4.2" TOPOLOGY: "server" - func: "run tests" - - name: "test-3.2-replica_set" - tags: ["3.2", "replica_set"] + - name: "test-4.2-replica_set" + tags: ["4.2", "replica_set"] commands: - func: "bootstrap mongo-orchestration" vars: - VERSION: "3.2" + VERSION: "4.2" TOPOLOGY: "replica_set" - func: "run tests" - - name: "test-3.2-sharded_cluster" - tags: ["3.2", "sharded_cluster"] + - name: "test-4.2-sharded_cluster" + tags: ["4.2", "sharded_cluster"] commands: - func: "bootstrap mongo-orchestration" vars: - VERSION: "3.2" + VERSION: "4.2" TOPOLOGY: "sharded_cluster" - func: "run tests" - - name: "test-3.4-standalone" - tags: ["3.4", "standalone"] + - name: "test-4.4-standalone" + tags: ["4.4", "standalone"] commands: - func: "bootstrap mongo-orchestration" vars: - VERSION: "3.4" + VERSION: "4.4" TOPOLOGY: "server" - func: "run tests" - - name: "test-3.4-replica_set" - tags: ["3.4", "replica_set"] + - name: "test-4.4-replica_set" + tags: ["4.4", "replica_set"] commands: - func: "bootstrap mongo-orchestration" vars: - VERSION: "3.4" + VERSION: "4.4" TOPOLOGY: "replica_set" - func: "run tests" - - name: "test-3.4-sharded_cluster" - tags: ["3.4", "sharded_cluster"] + - name: "test-4.4-sharded_cluster" + tags: ["4.4", "sharded_cluster"] commands: - func: "bootstrap mongo-orchestration" vars: - VERSION: "3.4" + VERSION: "4.4" TOPOLOGY: "sharded_cluster" - func: "run tests" - - name: "test-3.6-standalone" - tags: ["3.6", "standalone"] + - name: "test-5.0-standalone" + tags: ["5.0", "standalone"] commands: - func: "bootstrap mongo-orchestration" vars: - VERSION: "3.6" + VERSION: "5.0" TOPOLOGY: "server" - func: "run tests" - - name: "test-3.6-replica_set" - tags: ["3.6", "replica_set"] + - name: "test-5.0-replica_set" + tags: ["5.0", "replica_set"] commands: - func: "bootstrap mongo-orchestration" vars: - VERSION: "3.6" + VERSION: "5.0" TOPOLOGY: "replica_set" - func: "run tests" - - name: "test-3.6-sharded_cluster" - tags: ["3.6", "sharded_cluster"] + - name: "test-5.0-sharded_cluster" + tags: ["5.0", "sharded_cluster"] commands: - func: "bootstrap mongo-orchestration" vars: - VERSION: "3.6" + VERSION: "5.0" TOPOLOGY: "sharded_cluster" - func: "run tests" - - name: "test-4.0-standalone" - tags: ["4.0", "standalone"] + - name: "test-6.0-standalone" + tags: ["6.0", "standalone"] commands: - func: "bootstrap mongo-orchestration" vars: - VERSION: "4.0" + VERSION: "6.0" TOPOLOGY: "server" - func: "run tests" - - name: "test-4.0-replica_set" - tags: ["4.0", "replica_set"] + - name: "test-6.0-replica_set" + tags: ["6.0", "replica_set"] commands: - func: "bootstrap mongo-orchestration" vars: - VERSION: "4.0" + VERSION: "6.0" TOPOLOGY: "replica_set" - func: "run tests" - - name: "test-4.0-sharded_cluster" - tags: ["4.0", "sharded_cluster"] + - name: "test-6.0-sharded_cluster" + tags: ["6.0", "sharded_cluster"] commands: - func: "bootstrap mongo-orchestration" vars: - VERSION: "4.0" + VERSION: "6.0" TOPOLOGY: "sharded_cluster" - func: "run tests" - - name: "test-4.2-standalone" - tags: ["4.2", "standalone"] + - name: "test-7.0-standalone" + tags: ["7.0", "standalone"] commands: - func: "bootstrap mongo-orchestration" vars: - VERSION: "4.2" + VERSION: "7.0" TOPOLOGY: "server" - func: "run tests" - - name: "test-4.2-replica_set" - tags: ["4.2", "replica_set"] + - name: "test-7.0-replica_set" + tags: ["7.0", "replica_set"] commands: - func: "bootstrap mongo-orchestration" vars: - VERSION: "4.2" + VERSION: "7.0" TOPOLOGY: "replica_set" - func: "run tests" - - name: "test-4.2-sharded_cluster" - tags: ["4.2", "sharded_cluster"] + - name: "test-7.0-sharded_cluster" + tags: ["7.0", "sharded_cluster"] commands: - func: "bootstrap mongo-orchestration" vars: - VERSION: "4.2" + VERSION: "7.0" TOPOLOGY: "sharded_cluster" - func: "run tests" @@ -760,6 +1448,38 @@ tasks: TOPOLOGY: "sharded_cluster" - func: "run tests" + - name: "test-rapid-standalone" + tags: ["rapid", "standalone"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "rapid" + TOPOLOGY: "server" + - func: "run tests" + + - name: "test-rapid-replica_set" + tags: ["rapid", "replica_set"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "rapid" + TOPOLOGY: "replica_set" + - func: "run tests" + + - name: "test-rapid-sharded_cluster" + tags: ["rapid", "sharded_cluster"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "rapid" + TOPOLOGY: "sharded_cluster" + - func: "run tests" + + - name: "test-serverless" + tags: ["serverless"] + commands: + - func: "run tests" + - name: "test-enterprise-auth" tags: ["enterprise-auth"] commands: @@ -769,6 +1489,16 @@ tasks: TOPOLOGY: "server" - func: "run enterprise auth tests" + - name: "test-search-index-helpers" + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "6.0" + TOPOLOGY: "replica_set" + - func: "run tests" + vars: + TEST_INDEX_MANAGEMENT: "1" + - name: "mod-wsgi-standalone" tags: ["mod_wsgi"] commands: @@ -787,27 +1517,509 @@ tasks: TOPOLOGY: "replica_set" - func: "run mod_wsgi tests" - - name: "cdecimal" - tags: ["cdecimal"] + - name: "mod-wsgi-embedded-mode-standalone" + tags: ["mod_wsgi"] commands: - func: "bootstrap mongo-orchestration" vars: VERSION: "latest" TOPOLOGY: "server" - - func: "run cdecimal tests" + - func: "run mod_wsgi tests" + vars: + MOD_WSGI_EMBEDDED: "1" + + - name: "mod-wsgi-embedded-mode-replica-set" + tags: ["mod_wsgi"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "latest" + TOPOLOGY: "replica_set" + - func: "run mod_wsgi tests" + vars: + MOD_WSGI_EMBEDDED: "1" - name: "no-server" tags: ["no-server"] commands: - func: "run tests" - vars: - PYTHON_BINARY: /opt/python/2.7/bin/python - name: "atlas-connect" tags: ["atlas-connect"] commands: - func: "run atlas tests" + - name: atlas-data-lake-tests + commands: + - func: "bootstrap data lake" + - func: "run tests" + vars: + TEST_DATA_LAKE: "true" + + - name: "test-aws-lambda-deployed" + commands: + - func: "install dependencies" + - command: ec2.assume_role + params: + role_arn: ${LAMBDA_AWS_ROLE_ARN} + duration_seconds: 3600 + - command: subprocess.exec + params: + working_dir: src + binary: bash + add_expansions_to_env: true + args: + - .evergreen/run-deployed-lambda-aws-tests.sh + env: + TEST_LAMBDA_DIRECTORY: ${PROJECT_DIRECTORY}/test/lambda + + - name: test-ocsp-rsa-valid-cert-server-staples + tags: ["ocsp", "ocsp-rsa", "ocsp-staple"] + commands: + - func: run-valid-ocsp-server + vars: + OCSP_ALGORITHM: "rsa" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "rsa" + OCSP_TLS_SHOULD_SUCCEED: "true" + + - name: test-ocsp-rsa-invalid-cert-server-staples + tags: ["ocsp", "ocsp-rsa", "ocsp-staple"] + commands: + - func: run-revoked-ocsp-server + vars: + OCSP_ALGORITHM: "rsa" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "rsa" + OCSP_TLS_SHOULD_SUCCEED: "false" + + - name: test-ocsp-rsa-valid-cert-server-does-not-staple + tags: ["ocsp", "ocsp-rsa"] + commands: + - func: run-valid-ocsp-server + vars: + OCSP_ALGORITHM: "rsa" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "rsa" + OCSP_TLS_SHOULD_SUCCEED: "true" + + - name: test-ocsp-rsa-invalid-cert-server-does-not-staple + tags: ["ocsp", "ocsp-rsa"] + commands: + - func: run-revoked-ocsp-server + vars: + OCSP_ALGORITHM: "rsa" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "rsa" + OCSP_TLS_SHOULD_SUCCEED: "false" + + - name: test-ocsp-rsa-soft-fail + tags: ["ocsp", "ocsp-rsa"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "rsa" + OCSP_TLS_SHOULD_SUCCEED: "true" + + - name: test-ocsp-rsa-malicious-invalid-cert-mustStaple-server-does-not-staple + tags: ["ocsp", "ocsp-rsa"] + commands: + - func: run-revoked-ocsp-server + vars: + OCSP_ALGORITHM: "rsa" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple-disableStapling.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "rsa" + OCSP_TLS_SHOULD_SUCCEED: "false" + + - name: test-ocsp-rsa-malicious-no-responder-mustStaple-server-does-not-staple + tags: ["ocsp", "ocsp-rsa"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple-disableStapling.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "rsa" + OCSP_TLS_SHOULD_SUCCEED: "false" + + - name: test-ocsp-rsa-delegate-valid-cert-server-staples + tags: ["ocsp", "ocsp-rsa", "ocsp-staple"] + commands: + - func: run-valid-delegate-ocsp-server + vars: + OCSP_ALGORITHM: "rsa" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "rsa" + OCSP_TLS_SHOULD_SUCCEED: "true" + + - name: test-ocsp-rsa-delegate-invalid-cert-server-staples + tags: ["ocsp", "ocsp-rsa", "ocsp-staple"] + commands: + - func: run-revoked-delegate-ocsp-server + vars: + OCSP_ALGORITHM: "rsa" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "rsa" + OCSP_TLS_SHOULD_SUCCEED: "false" + + - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple + tags: ["ocsp", "ocsp-rsa"] + commands: + - func: run-valid-delegate-ocsp-server + vars: + OCSP_ALGORITHM: "rsa" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "rsa" + OCSP_TLS_SHOULD_SUCCEED: "true" + + - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple + tags: ["ocsp", "ocsp-rsa"] + commands: + - func: run-revoked-delegate-ocsp-server + vars: + OCSP_ALGORITHM: "rsa" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "rsa" + OCSP_TLS_SHOULD_SUCCEED: "false" + + - name: test-ocsp-rsa-delegate-malicious-invalid-cert-mustStaple-server-does-not-staple + tags: ["ocsp", "ocsp-rsa"] + commands: + - func: run-revoked-delegate-ocsp-server + vars: + OCSP_ALGORITHM: "rsa" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple-disableStapling.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "rsa" + OCSP_TLS_SHOULD_SUCCEED: "false" + + - name: test-ocsp-ecdsa-valid-cert-server-staples + tags: ["ocsp", "ocsp-ecdsa", "ocsp-staple"] + commands: + - func: run-valid-ocsp-server + vars: + OCSP_ALGORITHM: "ecdsa" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "ecdsa" + OCSP_TLS_SHOULD_SUCCEED: "true" + + - name: test-ocsp-ecdsa-invalid-cert-server-staples + tags: ["ocsp", "ocsp-ecdsa", "ocsp-staple"] + commands: + - func: run-revoked-ocsp-server + vars: + OCSP_ALGORITHM: "ecdsa" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "ecdsa" + OCSP_TLS_SHOULD_SUCCEED: "false" + + - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple + tags: ["ocsp", "ocsp-ecdsa"] + commands: + - func: run-valid-ocsp-server + vars: + OCSP_ALGORITHM: "ecdsa" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-disableStapling.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "ecdsa" + OCSP_TLS_SHOULD_SUCCEED: "true" + + - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple + tags: ["ocsp", "ocsp-ecdsa"] + commands: + - func: run-revoked-ocsp-server + vars: + OCSP_ALGORITHM: "ecdsa" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-disableStapling.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "ecdsa" + OCSP_TLS_SHOULD_SUCCEED: "false" + + - name: test-ocsp-ecdsa-soft-fail + tags: ["ocsp", "ocsp-ecdsa"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-disableStapling.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "ecdsa" + OCSP_TLS_SHOULD_SUCCEED: "true" + + - name: test-ocsp-ecdsa-malicious-invalid-cert-mustStaple-server-does-not-staple + tags: ["ocsp", "ocsp-ecdsa"] + commands: + - func: run-revoked-ocsp-server + vars: + OCSP_ALGORITHM: "ecdsa" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "ecdsa" + OCSP_TLS_SHOULD_SUCCEED: "false" + + - name: test-ocsp-ecdsa-malicious-no-responder-mustStaple-server-does-not-staple + tags: ["ocsp", "ocsp-ecdsa"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "ecdsa" + OCSP_TLS_SHOULD_SUCCEED: "false" + + - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples + tags: ["ocsp", "ocsp-ecdsa", "ocsp-staple"] + commands: + - func: run-valid-delegate-ocsp-server + vars: + OCSP_ALGORITHM: "ecdsa" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "ecdsa" + OCSP_TLS_SHOULD_SUCCEED: "true" + + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples + tags: ["ocsp", "ocsp-ecdsa", "ocsp-staple"] + commands: + - func: run-revoked-delegate-ocsp-server + vars: + OCSP_ALGORITHM: "ecdsa" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "ecdsa" + OCSP_TLS_SHOULD_SUCCEED: "false" + + - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple + tags: ["ocsp", "ocsp-ecdsa"] + commands: + - func: run-valid-delegate-ocsp-server + vars: + OCSP_ALGORITHM: "ecdsa" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-disableStapling.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "ecdsa" + OCSP_TLS_SHOULD_SUCCEED: "true" + + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple + tags: ["ocsp", "ocsp-ecdsa"] + commands: + - func: run-revoked-delegate-ocsp-server + vars: + OCSP_ALGORITHM: "ecdsa" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-disableStapling.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "ecdsa" + OCSP_TLS_SHOULD_SUCCEED: "false" + + - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-mustStaple-server-does-not-staple + tags: ["ocsp", "ocsp-ecdsa"] + commands: + - func: run-revoked-delegate-ocsp-server + vars: + OCSP_ALGORITHM: "ecdsa" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "ecdsa" + OCSP_TLS_SHOULD_SUCCEED: "false" + + - name: "aws-auth-test-4.4" + commands: + - func: "bootstrap mongo-orchestration" + vars: + AUTH: "auth" + ORCHESTRATION_FILE: "auth-aws.json" + TOPOLOGY: "server" + VERSION: "4.4" + - func: "get aws auth secrets" + - func: "run aws auth test with regular aws credentials" + - func: "run aws auth test with assume role credentials" + - func: "run aws auth test with aws credentials as environment variables" + - func: "run aws auth test with aws credentials and session token as environment variables" + - func: "run aws auth test with aws EC2 credentials" + - func: "run aws auth test with aws web identity credentials" + - func: "run aws ECS auth test" + + - name: "aws-auth-test-5.0" + commands: + - func: "bootstrap mongo-orchestration" + vars: + AUTH: "auth" + ORCHESTRATION_FILE: "auth-aws.json" + TOPOLOGY: "server" + VERSION: "5.0" + - func: "get aws auth secrets" + - func: "run aws auth test with regular aws credentials" + - func: "run aws auth test with assume role credentials" + - func: "run aws auth test with aws credentials as environment variables" + - func: "run aws auth test with aws credentials and session token as environment variables" + - func: "run aws auth test with aws EC2 credentials" + - func: "run aws auth test with aws web identity credentials" + - func: "run aws ECS auth test" + + - name: "aws-auth-test-6.0" + commands: + - func: "bootstrap mongo-orchestration" + vars: + AUTH: "auth" + ORCHESTRATION_FILE: "auth-aws.json" + TOPOLOGY: "server" + VERSION: "6.0" + - func: "get aws auth secrets" + - func: "run aws auth test with regular aws credentials" + - func: "run aws auth test with assume role credentials" + - func: "run aws auth test with aws credentials as environment variables" + - func: "run aws auth test with aws credentials and session token as environment variables" + - func: "run aws auth test with aws EC2 credentials" + - func: "run aws auth test with aws web identity credentials" + - func: "run aws ECS auth test" + + - name: "aws-auth-test-7.0" + commands: + - func: "bootstrap mongo-orchestration" + vars: + AUTH: "auth" + ORCHESTRATION_FILE: "auth-aws.json" + TOPOLOGY: "server" + VERSION: "7.0" + - func: "get aws auth secrets" + - func: "run aws auth test with regular aws credentials" + - func: "run aws auth test with assume role credentials" + - func: "run aws auth test with aws credentials as environment variables" + - func: "run aws auth test with aws credentials and session token as environment variables" + - func: "run aws auth test with aws EC2 credentials" + - func: "run aws auth test with aws web identity credentials" + - func: "run aws ECS auth test" + + - name: "aws-auth-test-rapid" + commands: + - func: "bootstrap mongo-orchestration" + vars: + AUTH: "auth" + ORCHESTRATION_FILE: "auth-aws.json" + TOPOLOGY: "server" + VERSION: "rapid" + - func: "get aws auth secrets" + - func: "run aws auth test with regular aws credentials" + - func: "run aws auth test with assume role credentials" + - func: "run aws auth test with aws credentials as environment variables" + - func: "run aws auth test with aws credentials and session token as environment variables" + - func: "run aws auth test with aws EC2 credentials" + - func: "run aws auth test with aws web identity credentials" + - func: "run aws ECS auth test" + + - name: "aws-auth-test-latest" + commands: + - func: "bootstrap mongo-orchestration" + vars: + AUTH: "auth" + ORCHESTRATION_FILE: "auth-aws.json" + TOPOLOGY: "server" + VERSION: "latest" + - func: "get aws auth secrets" + - func: "run aws auth test with regular aws credentials" + - func: "run aws auth test with assume role credentials" + - func: "run aws auth test with aws credentials as environment variables" + - func: "run aws auth test with aws credentials and session token as environment variables" + - func: "run aws auth test with aws EC2 credentials" + - func: "run aws auth test with aws web identity credentials" + - func: "run aws ECS auth test" + + - name: "oidc-auth-test-latest" + commands: + - func: "run oidc auth test with aws credentials" + + - name: load-balancer-test + commands: + - func: "bootstrap mongo-orchestration" + vars: + TOPOLOGY: "sharded_cluster" + LOAD_BALANCER: true + - func: "run load-balancer" + - func: "run tests" + + - name: "test-fips-standalone" + tags: ["fips"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "latest" + TOPOLOGY: "server" + - func: "run tests" # }}} - name: "coverage-report" tags: ["coverage"] @@ -831,112 +2043,199 @@ tasks: commands: - func: "download and merge coverage" + - name: "testgcpkms-task" + commands: + - command: shell.exec + type: setup + params: + working_dir: "src" + shell: "bash" + script: | + ${PREPARE_SHELL} + echo "Copying files ... begin" + export GCPKMS_GCLOUD=${GCPKMS_GCLOUD} + export GCPKMS_PROJECT=${GCPKMS_PROJECT} + export GCPKMS_ZONE=${GCPKMS_ZONE} + export GCPKMS_INSTANCENAME=${GCPKMS_INSTANCENAME} + tar czf /tmp/mongo-python-driver.tgz . + GCPKMS_SRC=/tmp/mongo-python-driver.tgz GCPKMS_DST=$GCPKMS_INSTANCENAME: $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/copy-file.sh + echo "Copying files ... end" + echo "Untarring file ... begin" + GCPKMS_CMD="tar xf mongo-python-driver.tgz" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh + echo "Untarring file ... end" + - command: shell.exec + type: test + params: + working_dir: "src" + shell: "bash" + script: | + ${PREPARE_SHELL} + export GCPKMS_GCLOUD=${GCPKMS_GCLOUD} + export GCPKMS_PROJECT=${GCPKMS_PROJECT} + export GCPKMS_ZONE=${GCPKMS_ZONE} + export GCPKMS_INSTANCENAME=${GCPKMS_INSTANCENAME} + GCPKMS_CMD="SUCCESS=true TEST_FLE_GCP_AUTO=1 LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian10/master/latest/libmongocrypt.tar.gz ./.evergreen/tox.sh -m test-eg" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh + + - name: "testgcpkms-fail-task" + # testgcpkms-fail-task runs in a non-GCE environment. + # It is expected to fail to obtain GCE credentials. + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "latest" + TOPOLOGY: "server" + - command: shell.exec + type: test + params: + working_dir: "src" + shell: "bash" + script: | + ${PREPARE_SHELL} + export PYTHON_BINARY=/opt/mongodbtoolchain/v4/bin/python3 + export LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian10/master/latest/libmongocrypt.tar.gz + SUCCESS=false TEST_FLE_GCP_AUTO=1 ./.evergreen/tox.sh -m test-eg + + - name: testazurekms-task + commands: + - command: shell.exec + params: + shell: bash + script: |- + set -o errexit + ${PREPARE_SHELL} + source ./secrets-export.sh + cd src + echo "Copying files ... begin" + export AZUREKMS_RESOURCEGROUP=${AZUREKMS_RESOURCEGROUP} + export AZUREKMS_VMNAME=${AZUREKMS_VMNAME} + export AZUREKMS_PRIVATEKEYPATH=/tmp/testazurekms_privatekey + tar czf /tmp/mongo-python-driver.tgz . + AZUREKMS_SRC="/tmp/mongo-python-driver.tgz" \ + AZUREKMS_DST="~/" \ + $DRIVERS_TOOLS/.evergreen/csfle/azurekms/copy-file.sh + echo "Copying files ... end" + echo "Untarring file ... begin" + AZUREKMS_CMD="tar xf mongo-python-driver.tgz" \ + $DRIVERS_TOOLS/.evergreen/csfle/azurekms/run-command.sh + echo "Untarring file ... end" + - command: shell.exec + type: test + params: + shell: bash + script: |- + set -o errexit + ${PREPARE_SHELL} + source ./secrets-export.sh + export AZUREKMS_RESOURCEGROUP=${AZUREKMS_RESOURCEGROUP} + export AZUREKMS_VMNAME=${AZUREKMS_VMNAME} + export AZUREKMS_PRIVATEKEYPATH=/tmp/testazurekms_privatekey + AZUREKMS_CMD="KEY_NAME=\"$AZUREKMS_KEYNAME\" KEY_VAULT_ENDPOINT=\"$AZUREKMS_KEYVAULTENDPOINT\" LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian10/master/latest/libmongocrypt.tar.gz SUCCESS=true TEST_FLE_AZURE_AUTO=1 ./.evergreen/tox.sh -m test-eg" \ + $DRIVERS_TOOLS/.evergreen/csfle/azurekms/run-command.sh + + - name: testazurekms-fail-task + commands: + - func: fetch source + - func: make files executable + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "latest" + TOPOLOGY: "server" + - command: shell.exec + type: test + params: + shell: bash + script: |- + set -o errexit + ${PREPARE_SHELL} + # Get azurekms credentials from the vault. + bash $DRIVERS_TOOLS/.evergreen/auth_aws/setup_secrets.sh drivers/azurekms + source ./secrets-export.sh + cd src + PYTHON_BINARY=/opt/mongodbtoolchain/v4/bin/python3 \ + KEY_NAME="${AZUREKMS_KEYNAME}" \ + KEY_VAULT_ENDPOINT="${AZUREKMS_KEYVAULTENDPOINT}" \ + LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian10/master/latest/libmongocrypt.tar.gz \ + SUCCESS=false TEST_FLE_AZURE_AUTO=1 \ + ./.evergreen/tox.sh -m test-eg axes: # Choice of distro - id: platform display_name: OS values: - - id: awslinux - display_name: "Amazon Linux 2018 (Enterprise)" - run_on: amazon1-2018-test - batchtime: 10080 # 7 days + - id: macos-1014 + display_name: "macOS 10.14" + run_on: macos-1014 variables: - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/linux-64-amazon-ami/master/latest/libmongocrypt.tar.gz - - id: archlinux-test - display_name: "Archlinux" - run_on: archlinux-test - batchtime: 10080 # 7 days - - id: debian71 - display_name: "Debian 7.1" - run_on: debian71-test - batchtime: 10080 # 7 days - - id: debian81 - display_name: "Debian 8.1" - run_on: debian81-test - batchtime: 10080 # 7 days - - id: debian92 - display_name: "Debian 9.2" - run_on: debian92-test - batchtime: 10080 # 7 days - variables: - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/debian92/master/latest/libmongocrypt.tar.gz - - id: macos-1012 - display_name: "macOS 10.12" - run_on: macos-1012 + skip_EC2_auth_test: true + skip_ECS_auth_test: true + skip_web_identity_auth_test: true + libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/macos/master/latest/libmongocrypt.tar.gz + - id: macos-1100 + display_name: "macOS 11.00" + run_on: macos-1100 variables: + skip_EC2_auth_test: true + skip_ECS_auth_test: true + skip_web_identity_auth_test: true libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/macos/master/latest/libmongocrypt.tar.gz - - id: rhel62 - display_name: "RHEL 6.2 (x86_64)" - run_on: rhel62-small - batchtime: 10080 # 7 days + - id: macos-1100-arm64 + display_name: "macOS 11.00 Arm64" + run_on: macos-1100-arm64 variables: - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel-62-64-bit/master/latest/libmongocrypt.tar.gz - - id: rhel70 - display_name: "RHEL 7.0" - run_on: rhel70-small + skip_EC2_auth_test: true + skip_ECS_auth_test: true + skip_web_identity_auth_test: true + libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/macos/master/latest/libmongocrypt.tar.gz + - id: rhel7 + display_name: "RHEL 7.x" + run_on: rhel79-small batchtime: 10080 # 7 days variables: libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel-70-64-bit/master/latest/libmongocrypt.tar.gz - - id: rhel71-power8-test - display_name: "RHEL 7.1 (POWER8)" - run_on: rhel71-power8-test - batchtime: 10080 # 7 days - - id: rhel72-zseries-test - display_name: "RHEL 7.2 (zSeries)" - run_on: rhel72-zseries-test - batchtime: 10080 # 7 days - variables: - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel72-zseries-test/master/latest/libmongocrypt.tar.gz - - id: suse12-x86-64-test - display_name: "SUSE 12 (x86_64)" - run_on: suse12-test + - id: rhel8 + display_name: "RHEL 8.x" + run_on: rhel87-small batchtime: 10080 # 7 days variables: - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/suse12-64/master/latest/libmongocrypt.tar.gz - - id: ubuntu-12.04 - display_name: "Ubuntu 12.04" - run_on: ubuntu1204-test - batchtime: 10080 # 7 days - - id: ubuntu-16.04 - display_name: "Ubuntu 16.04" - run_on: ubuntu1604-test + libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel-80-64-bit/master/latest/libmongocrypt.tar.gz + - id: rhel80-fips + display_name: "RHEL 8.0 FIPS" + run_on: rhel80-fips batchtime: 10080 # 7 days variables: - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/ubuntu1604/master/latest/libmongocrypt.tar.gz - - id: ubuntu1604-arm64-small - display_name: "Ubuntu 16.04 (ARM64)" - run_on: ubuntu1604-arm64-small + libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel-80-64-bit/master/latest/libmongocrypt.tar.gz + - id: ubuntu-22.04 + display_name: "Ubuntu 22.04" + run_on: ubuntu2204-small batchtime: 10080 # 7 days - variables: - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/ubuntu1604-arm64/master/latest/libmongocrypt.tar.gz - - id: ubuntu1604-power8-test - display_name: "Ubuntu 16.04 (POWER8)" - run_on: ubuntu1604-power8-test + - id: ubuntu-20.04 + display_name: "Ubuntu 20.04" + run_on: ubuntu2004-small batchtime: 10080 # 7 days - - id: ubuntu1804-arm64-test - display_name: "Ubuntu 18.04 (ARM64)" - run_on: ubuntu1804-arm64-test + - id: rhel83-zseries + display_name: "RHEL 8.3 (zSeries)" + run_on: rhel83-zseries-small batchtime: 10080 # 7 days - variables: - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/ubuntu1804-arm64/master/latest/libmongocrypt.tar.gz - - id: windows-vs2010 - display_name: "Windows 64 Visual Studio 2010" - run_on: windows-64-vs2010-test + - id: rhel81-power8 + display_name: "RHEL 8.1 (POWER8)" + run_on: rhel81-power8-small batchtime: 10080 # 7 days - variables: - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/windows-test/master/latest/libmongocrypt.tar.gz - - id: windows-vs2015 - display_name: "Windows 64 Visual Studio 2015" - run_on: windows-64-vs2015-test + - id: rhel82-arm64 + display_name: "RHEL 8.2 (ARM64)" + run_on: rhel82-arm64-small batchtime: 10080 # 7 days variables: - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/windows-test/master/latest/libmongocrypt.tar.gz - - id: windows-vs2017 - display_name: "Windows 64 Visual Studio 2017" - run_on: windows-64-vs2017-test + libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel-82-arm64/master/latest/libmongocrypt.tar.gz + - id: windows-64-vsMulti-small + display_name: "Windows 64" + run_on: windows-64-vsMulti-small batchtime: 10080 # 7 days variables: + skip_ECS_auth_test: true + skip_EC2_auth_test: true + skip_web_identity_auth_test: true + venv_bin_dir: "Scripts" libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/windows-test/master/latest/libmongocrypt.tar.gz # Test with authentication? @@ -997,84 +2296,146 @@ axes: variables: COMPRESSORS: "zstd" + # Choice of MongoDB server version + - id: mongodb-version + display_name: "MongoDB" + values: + - id: "3.6" + display_name: "MongoDB 3.6" + variables: + VERSION: "3.6" + - id: "4.0" + display_name: "MongoDB 4.0" + variables: + VERSION: "4.0" + - id: "4.2" + display_name: "MongoDB 4.2" + variables: + VERSION: "4.2" + - id: "4.4" + display_name: "MongoDB 4.4" + variables: + VERSION: "4.4" + - id: "5.0" + display_name: "MongoDB 5.0" + variables: + VERSION: "5.0" + - id: "6.0" + display_name: "MongoDB 6.0" + variables: + VERSION: "6.0" + - id: "7.0" + display_name: "MongoDB 7.0" + variables: + VERSION: "7.0" + - id: "latest" + display_name: "MongoDB latest" + variables: + VERSION: "latest" + - id: "rapid" + display_name: "MongoDB rapid" + variables: + VERSION: "rapid" + # Choice of Python runtime version - id: python-version display_name: "Python" values: # Note: always display platform with python-version to avoid ambiguous display names. # Linux - - id: "2.7" - display_name: "Python 2.7" + - id: "3.7" + display_name: "Python 3.7" + variables: + PYTHON_BINARY: "/opt/python/3.7/bin/python3" + - id: "3.8" + display_name: "Python 3.8" variables: - PYTHON_BINARY: "/opt/python/2.7/bin/python" - - id: "3.4" - display_name: "Python 3.4" + PYTHON_BINARY: "/opt/python/3.8/bin/python3" + - id: "3.9" + display_name: "Python 3.9" variables: - PYTHON_BINARY: "/opt/python/3.4/bin/python3" - - id: "3.5" - display_name: "Python 3.5" - batchtime: 10080 # 7 days + PYTHON_BINARY: "/opt/python/3.9/bin/python3" + - id: "3.10" + display_name: "Python 3.10" variables: - PYTHON_BINARY: "/opt/python/3.5/bin/python3" - - id: "3.6" - display_name: "Python 3.6" + PYTHON_BINARY: "/opt/python/3.10/bin/python3" + - id: "3.11" + display_name: "Python 3.11" variables: - PYTHON_BINARY: "/opt/python/3.6/bin/python3" + PYTHON_BINARY: "/opt/python/3.11/bin/python3" + - id: "3.12" + display_name: "Python 3.12" + variables: + PYTHON_BINARY: "/opt/python/3.12/bin/python3" + - id: "pypy3.8" + display_name: "PyPy 3.8" + variables: + PYTHON_BINARY: "/opt/python/pypy3.8/bin/pypy3" + - id: "pypy3.10" + display_name: "PyPy 3.10" + variables: + PYTHON_BINARY: "/opt/python/pypy3.10/bin/pypy3" + + - id: python-version-windows + display_name: "Python" + values: - id: "3.7" display_name: "Python 3.7" variables: - PYTHON_BINARY: "/opt/python/3.7/bin/python3" + PYTHON_BINARY: "C:/python/Python37/python.exe" - id: "3.8" display_name: "Python 3.8" variables: - PYTHON_BINARY: "/opt/python/3.8/bin/python3" - - id: "pypy" - display_name: "PyPy" + PYTHON_BINARY: "C:/python/Python38/python.exe" + - id: "3.9" + display_name: "Python 3.9" variables: - PYTHON_BINARY: "/opt/python/pypy/bin/pypy" - - id: "pypy3.5" - display_name: "PyPy 3.5" + PYTHON_BINARY: "C:/python/Python39/python.exe" + - id: "3.10" + display_name: "Python 3.10" variables: - PYTHON_BINARY: "/opt/python/pypy3.5/bin/pypy3" - - id: "pypy3.6" - display_name: "PyPy 3.6" + PYTHON_BINARY: "C:/python/Python310/python.exe" + - id: "3.11" + display_name: "Python 3.11" variables: - PYTHON_BINARY: "/opt/python/pypy3.6/bin/pypy3" - - id: "jython2.7" - display_name: "Jython 2.7" - batchtime: 10080 # 7 days + PYTHON_BINARY: "C:/python/Python311/python.exe" + - id: "3.12" + display_name: "Python 3.12" + variables: + PYTHON_BINARY: "C:/python/Python312/python.exe" + + - id: python-version-windows-32 + display_name: "Python" + values: + - id: "3.7" + display_name: "32-bit Python 3.7" variables: - PYTHON_BINARY: "/opt/python/jython2.7/bin/jython" - # Windows - - id: "win-vs2010-3.4" - display_name: "Python 3.4" + PYTHON_BINARY: "C:/python/32/Python37/python.exe" + - id: "3.8" + display_name: "32-bit Python 3.8" variables: - PYTHON_BINARY: "/cygdrive/c/python/Python34/python.exe" - - id: "win-vs2015-2.7" - display_name: "Python 2.7" + PYTHON_BINARY: "C:/python/32/Python38/python.exe" + - id: "3.9" + display_name: "32-bit Python 3.9" variables: - PYTHON_BINARY: "/cygdrive/c/python/Python27/python.exe" - - id: "win-vs2015-3.5" - display_name: "Python 3.5" + PYTHON_BINARY: "C:/python/32/Python39/python.exe" + - id: "3.10" + display_name: "32-bit Python 3.10" variables: - PYTHON_BINARY: "/cygdrive/c/python/Python35/python.exe" - - id: "win-vs2015-3.6" - display_name: "Python 3.6" + PYTHON_BINARY: "C:/python/32/Python310/python.exe" + - id: "3.11" + display_name: "32-bit Python 3.11" variables: - PYTHON_BINARY: "/cygdrive/c/python/Python36/python.exe" - - id: "win-vs2015-3.7" - display_name: "Python 3.7" + PYTHON_BINARY: "C:/python/32/Python311/python.exe" + - id: "3.12" + display_name: "32-bit Python 3.12" variables: - PYTHON_BINARY: "/cygdrive/c/python/Python37/python.exe" + PYTHON_BINARY: "C:/python/32/Python312/python.exe" # Choice of mod_wsgi version - id: mod-wsgi-version display_name: "mod_wsgi version" values: - - id: "3" - display_name: "mod_wsgi 3.5" - variables: - MOD_WSGI_VERSION: "3" - id: "4" display_name: "mod_wsgi 4.x" variables: @@ -1114,10 +2475,6 @@ axes: display_name: MMAPv1 variables: STORAGE_ENGINE: "mmapv1" - - id: wiredtiger - display_name: WiredTiger - variables: - STORAGE_ENGINE: "wiredtiger" - id: inmemory display_name: InMemory variables: @@ -1151,299 +2508,273 @@ axes: tags: ["encryption_tag"] variables: test_encryption: true + batchtime: 10080 # 7 days + - id: "encryption_pyopenssl" + display_name: "Encryption PyOpenSSL" + tags: ["encryption_tag"] + variables: + test_encryption: true + test_encryption_pyopenssl: true + batchtime: 10080 # 7 days + # The path to crypt_shared is stored in the $CRYPT_SHARED_LIB_PATH expansion. + - id: "encryption_crypt_shared" + display_name: "Encryption shared lib" + tags: ["encryption_tag"] + variables: + test_encryption: true + test_crypt_shared: true + batchtime: 10080 # 7 days - # Run setdefaultencoding before running the test suite? - - id: setdefaultencoding - display_name: "setdefaultencoding" + # Run pyopenssl tests? + - id: pyopenssl + display_name: "PyOpenSSL" values: - - id: "setdefaultencoding" - display_name: "setdefaultencoding" - tags: ["setdefaultencoding_tag"] + - id: "enabled" + display_name: "PyOpenSSL" variables: - SETDEFAULTENCODING: "cp1251" - -buildvariants: -- matrix_name: "tests-all" - matrix_spec: - platform: - # OSes that support versions of MongoDB>=2.6 with SSL. - - awslinux - - rhel70 - auth-ssl: "*" - display_name: "${platform} ${auth-ssl}" - tasks: &all-server-versions - - ".latest" - - ".4.2" - - ".4.0" - - ".3.6" - - ".3.4" - - ".3.2" - - ".3.0" - - ".2.6" + test_pyopenssl: true + batchtime: 10080 # 7 days -- matrix_name: "tests-all-encryption" - matrix_spec: - platform: - # OSes that support versions of MongoDB>=2.6 with SSL. - - awslinux - - rhel70 - auth-ssl: "*" - encryption: "*" - display_name: "Encryption ${platform} ${auth-ssl}" - tasks: &encryption-server-versions - - ".4.2" - - ".4.0" - - ".2.6" + - id: versionedApi + display_name: "versionedApi" + values: + # Test against a cluster with requireApiVersion=1. + - id: "requireApiVersion1" + display_name: "requireApiVersion1" + tags: [ "versionedApi_tag" ] + variables: + # REQUIRE_API_VERSION is set to make drivers-evergreen-tools + # start a cluster with the requireApiVersion parameter. + REQUIRE_API_VERSION: "1" + # MONGODB_API_VERSION is the apiVersion to use in the test suite. + MONGODB_API_VERSION: "1" + # Test against a cluster with acceptApiVersion2 but without + # requireApiVersion, and don't automatically add apiVersion to + # clients created in the test suite. + - id: "acceptApiVersion2" + display_name: "acceptApiVersion2" + tags: [ "versionedApi_tag" ] + variables: + ORCHESTRATION_FILE: "versioned-api-testing.json" -- matrix_name: "tests-no-36-plus" - matrix_spec: - platform: - # OSes that support versions of MongoDB>=2.6 and <3.6 with SSL. - - ubuntu-12.04 - auth-ssl: "*" - # Ubuntu 12 ships Python 2.7.3. We want to test that version with - # and without C extensions - c-extensions: "*" - display_name: "${platform} ${auth-ssl} ${c-extensions}" - tasks: - - ".3.4" - - ".3.2" - - ".3.0" - - ".2.6" + # Run load balancer tests? + - id: loadbalancer + display_name: "Load Balancer" + values: + - id: "enabled" + display_name: "Load Balancer" + variables: + test_loadbalancer: true + batchtime: 10080 # 7 days -- matrix_name: "tests-no-40-plus" - matrix_spec: - platform: - # OSes that support versions of MongoDB>=2.6 and <4.0 with SSL. - - debian71 - auth-ssl: "*" - # Debian 7 ships Python 2.7.3. We want to test that version with - # and without C extensions - c-extensions: "*" - display_name: "${platform} ${auth-ssl} ${c-extensions}" - tasks: - - ".3.6" - - ".3.4" - - ".3.2" - - ".3.0" - - ".2.6" + - id: serverless + display_name: "Serverless" + values: + - id: "enabled" + display_name: "Serverless" + variables: + test_serverless: true + batchtime: 10080 # 7 days -- matrix_name: "tests-archlinux" +buildvariants: +- matrix_name: "tests-fips" matrix_spec: platform: - # Archlinux supports MongoDB without SSL. - # MongoDB 4.2 drops support for archlinux (generic linux builds). - - archlinux-test - auth: "*" - ssl: "nossl" + - rhel80-fips + auth: "auth" + ssl: "ssl" display_name: "${platform} ${auth} ${ssl}" tasks: - - ".4.0" - - ".3.6" - - ".3.4" - - ".3.2" - - ".3.0" - - ".2.6" - -- matrix_name: "tests-os-requires-32" - matrix_spec: - platform: - # OSes that support versions of MongoDB>=3.2 with SSL. - - ubuntu-16.04 - - suse12-x86-64-test - - rhel71-power8-test - auth-ssl: "*" - display_name: "${platform} ${auth-ssl}" - tasks: - - ".latest" - - ".4.2" - - ".4.0" - - ".3.6" - - ".3.4" - - ".3.2" + - "test-fips-standalone" - matrix_name: "test-macos" matrix_spec: platform: # MacOS introduced SSL support with MongoDB >= 3.2. # Older server versions (2.6, 3.0) are supported without SSL. - - macos-1012 + - macos-1014 auth: "*" ssl: "*" exclude_spec: # No point testing with SSL without auth. - - platform: macos-1012 + - platform: macos-1014 auth: "noauth" ssl: "ssl" display_name: "${platform} ${auth} ${ssl}" tasks: - ".latest" + - ".7.0" + - ".6.0" + - ".5.0" + - ".4.4" - ".4.2" - ".4.0" - ".3.6" - - ".3.4" - - ".3.2" - rules: - - if: - platform: macos-1012 - auth: "*" - ssl: "nossl" - then: - add_tasks: - - ".3.0" - - ".2.6" + +- matrix_name: "test-macos-arm64" + matrix_spec: + platform: + - macos-1100-arm64 + auth-ssl: "*" + display_name: "${platform} ${auth-ssl}" + tasks: + - ".latest" + - ".7.0" + - ".6.0" + - ".5.0" + - ".4.4" - matrix_name: "test-macos-encryption" matrix_spec: platform: - - macos-1012 + - macos-1100 auth: "auth" ssl: "nossl" encryption: "*" - display_name: "Encryption ${platform} ${auth} ${ssl}" - tasks: *encryption-server-versions - -- matrix_name: "test-os-requires-34-no-42plus" + display_name: "${encryption} ${platform} ${auth} ${ssl}" + tasks: "test-latest-replica_set" + rules: + - if: + encryption: ["encryption", "encryption_crypt_shared"] + platform: macos-1100 + auth: "auth" + ssl: "nossl" + then: + add_tasks: &encryption-server-versions + - ".rapid" + - ".latest" + - ".7.0" + - ".6.0" + - ".5.0" + - ".4.4" + - ".4.2" + - ".4.0" + +# Test one server version with zSeries, POWER8, and ARM. +- matrix_name: "test-different-cpu-architectures" matrix_spec: platform: - # OSes that support versions of MongoDB>=3.4 <4.2 with SSL. - - debian81 - - ubuntu1604-power8-test - - ubuntu1604-arm64-small + - rhel83-zseries # Added in 5.0.8 (SERVER-44074) + - rhel81-power8 # Added in 4.2.7 (SERVER-44072) + - rhel82-arm64 # Added in 4.4.2 (SERVER-48282) auth-ssl: "*" display_name: "${platform} ${auth-ssl}" tasks: - - ".4.0" - - ".3.6" - - ".3.4" + - ".6.0" -- matrix_name: "test-os-requires-34" +- matrix_name: "tests-python-version-rhel8-test-ssl" matrix_spec: - platform: - # OSes that support versions of MongoDB>=3.4 with SSL. - - rhel72-zseries-test + platform: rhel8 + python-version: "*" auth-ssl: "*" - display_name: "${platform} ${auth-ssl}" - tasks: + coverage: "*" + display_name: "${python-version} ${platform} ${auth-ssl} ${coverage}" + tasks: &all-server-versions + - ".rapid" - ".latest" + - ".7.0" + - ".6.0" + - ".5.0" + - ".4.4" - ".4.2" - ".4.0" - ".3.6" - - ".3.4" -- matrix_name: "test-os-requires-42" +- matrix_name: "tests-pyopenssl" matrix_spec: - platform: - # OSes that support versions of MongoDB>=4.2 with SSL. - - ubuntu1804-arm64-test - auth-ssl: "*" - display_name: "${platform} ${auth-ssl}" + platform: rhel8 + python-version: "*" + auth: "*" + ssl: "ssl" + pyopenssl: "*" + # Only test "noauth" with Python 3.7. + exclude_spec: + platform: rhel8 + python-version: ["3.8", "3.9", "3.10", "pypy3.8", "pypy3.10"] + auth: "noauth" + ssl: "ssl" + pyopenssl: "*" + display_name: "PyOpenSSL ${platform} ${python-version} ${auth}" tasks: - - ".latest" - - ".4.2" - variables: - set_xtrace_on: on + - '.replica_set' + # Test standalone and sharded only on 7.0. + - '.7.0' -- matrix_name: "tests-python-version-rhel62-test-ssl" +- matrix_name: "tests-pyopenssl-macOS" matrix_spec: - platform: rhel62 - # RHEL 6.2 does not support Python 3.7.x and later. - python-version: &rhel62-pythons ["2.7", "3.4", "3.5", "3.6", "pypy", "pypy3.5", "pypy3.6", "jython2.7"] - auth: "*" - ssl: "*" - coverage: "*" - exclude_spec: - - platform: rhel62 - python-version: "*" - auth: "noauth" - ssl: "ssl" - coverage: "*" - - platform: rhel62 - python-version: "!jython2.7" # Test Jython with Auth/NoSSL - auth: "auth" - ssl: "nossl" - coverage: "*" - - platform: rhel62 - # PYTHON-498: disable Jython SSL tests - python-version: "jython2.7" - # EVG-1410: exlcude_spec must specifiy values for all axes - auth: "*" - ssl: "ssl" - coverage: "*" - display_name: "${python-version} ${platform} ${auth} ${ssl} ${coverage}" - tasks: *all-server-versions + platform: macos-1014 + auth: "auth" + ssl: "ssl" + pyopenssl: "*" + display_name: "PyOpenSSL ${platform} ${auth}" + tasks: + - '.replica_set' + +- matrix_name: "tests-pyopenssl-windows" + matrix_spec: + platform: windows-64-vsMulti-small + python-version-windows: "*" + auth: "auth" + ssl: "ssl" + pyopenssl: "*" + display_name: "PyOpenSSL ${platform} ${python-version-windows} ${auth}" + tasks: + - '.replica_set' -- matrix_name: "tests-python-version-rhel62-test-encryption" +- matrix_name: "tests-python-version-rhel8-test-encryption" matrix_spec: - platform: rhel62 - # RHEL 6.2 does not support Python 3.7.x and later. - python-version: ["2.7", "3.4", "3.5", "3.6", "pypy", "pypy3.5", "pypy3.6"] + platform: rhel8 + python-version: "*" auth-ssl: noauth-nossl # TODO: dependency error for 'coverage-report' task: # dependency tests-python-version-rhel62-test-encryption_.../test-2.6-standalone is not present in the project config # coverage: "*" encryption: "*" - display_name: "Encryption ${python-version} ${platform} ${auth-ssl}" - tasks: *encryption-server-versions + display_name: "${encryption} ${python-version} ${platform} ${auth-ssl}" + tasks: "test-latest-replica_set" + rules: + - if: + encryption: ["encryption", "encryption_crypt_shared"] + platform: rhel8 + auth-ssl: noauth-nossl + python-version: "*" + then: + add_tasks: *encryption-server-versions -- matrix_name: "tests-python-version-rhel62-without-c-extensions" +- matrix_name: "tests-python-version-rhel8-without-c-extensions" matrix_spec: - platform: rhel62 - python-version: *rhel62-pythons + platform: rhel8 + python-version: "*" c-extensions: without-c-extensions auth-ssl: noauth-nossl coverage: "*" exclude_spec: # These interpreters are always tested without extensions. - - platform: rhel62 - python-version: ["pypy", "pypy3.5", "pypy3.6", "jython2.7"] + - platform: rhel8 + python-version: ["pypy3.8", "pypy3.10"] c-extensions: "*" auth-ssl: "*" coverage: "*" display_name: "${c-extensions} ${python-version} ${platform} ${auth} ${ssl} ${coverage}" tasks: *all-server-versions -- matrix_name: "tests-python-version-ubuntu1604-without-c-extensions" - matrix_spec: - platform: ubuntu-16.04 - python-version: &openssl-102-plus-pythons ["3.7", "3.8"] - c-extensions: without-c-extensions - auth-ssl: noauth-nossl - display_name: "${c-extensions} ${python-version} ${platform} ${auth} ${ssl} ${coverage}" - tasks: - - ".latest" - - ".4.2" - - ".4.0" - - ".3.6" - - ".3.4" - - ".3.2" - -- matrix_name: "tests-python-version-ubuntu16-compression" +- matrix_name: "tests-python-version-rhel8-compression" matrix_spec: - # Ubuntu 16.04 images have libsnappy-dev installed, and provides OpenSSL 1.0.2 for testing Python 3.7 - platform: ubuntu-16.04 - python-version: ["2.7", "3.4", "3.5", "3.6", "3.7", "3.8", "pypy", "pypy3.5", "pypy3.6", "jython2.7"] + platform: rhel8 + python-version: "*" c-extensions: "*" compression: "*" exclude_spec: # These interpreters are always tested without extensions. - - platform: ubuntu-16.04 - python-version: ["pypy", "pypy3.5", "pypy3.6", "jython2.7"] + - platform: rhel8 + python-version: ["pypy3.8", "pypy3.10"] c-extensions: "with-c-extensions" compression: "*" - # Jython doesn't support some compression types. - - platform: ubuntu-16.04 - python-version: ["jython2.7"] - c-extensions: "*" - compression: ["snappy", "zstd"] - # Some tests fail with CPython 3.8 and python-snappy - - platform: ubuntu-16.04 - python-version: ["3.8"] - c-extensions: "*" - compression: ["snappy"] display_name: "${compression} ${c-extensions} ${python-version} ${platform}" tasks: - "test-latest-standalone" + - "test-5.0-standalone" + - "test-4.4-standalone" - "test-4.2-standalone" rules: # Server versions 3.6 and 4.0 support snappy and zlib. @@ -1456,138 +2787,89 @@ buildvariants: - "test-4.0-standalone" - "test-3.6-standalone" -- matrix_name: "tests-python-version-green-framework-rhel62" +- matrix_name: "tests-python-version-green-framework-rhel8" matrix_spec: - platform: rhel62 - python-version: *rhel62-pythons + platform: rhel8 + python-version: "*" green-framework: "*" auth-ssl: "*" exclude_spec: # Don't test green frameworks on these Python versions. - - platform: rhel62 - python-version: ["pypy", "pypy3.5", "pypy3.6", "jython2.7"] + - platform: rhel8 + python-version: ["pypy3.8", "pypy3.10"] green-framework: "*" auth-ssl: "*" display_name: "${green-framework} ${python-version} ${platform} ${auth-ssl}" tasks: *all-server-versions -# Test CPython 3.4 against MongoDB 2.6-4.2 on Windows with Visual Studio 2010. -- matrix_name: "tests-windows-vs2010-python-version" - matrix_spec: - platform: windows-vs2010 - python-version: &win-vs2010-pythons ["win-vs2010-3.4"] - auth-ssl: "*" - display_name: "${platform} ${python-version} ${auth-ssl}" - tasks: - - ".4.2" - - ".4.0" - - ".3.6" - - ".3.4" - - ".3.2" - - ".3.0" - - ".2.6" - -# windows-vs2010 3.4 is unable to dlopen the libmongocrypt ddl built on 2016 -#- matrix_name: "tests-windows-vs2010-python-version-encryption" -# matrix_spec: -# platform: windows-vs2010 -# python-version: *win-vs2010-pythons -# auth-ssl: "*" -# encryption: "*" -# display_name: "Encryption ${platform} ${python-version} ${auth-ssl}" -# tasks: *encryption-server-versions - -- matrix_name: "tests-python-version-requires-openssl-102-plus-test-ssl" +- matrix_name: "tests-windows-python-version" matrix_spec: - platform: ubuntu-16.04 - python-version: &openssl-102-plus-pythons ["3.7", "3.8"] + platform: windows-64-vsMulti-small + python-version-windows: "*" auth-ssl: "*" - display_name: "${python-version} OpenSSL 1.0.2 ${platform} ${auth-ssl}" - tasks: - - ".latest" - - ".4.2" - - ".4.0" - - ".3.6" - - ".3.4" - - ".3.2" - -- matrix_name: "tests-python-version-requires-openssl-102-plus-test-encryption" - matrix_spec: - platform: ubuntu-16.04 - python-version: *openssl-102-plus-pythons - auth-ssl: "noauth-nossl" - encryption: "*" - display_name: "Encryption ${python-version} ${platform} ${auth-ssl}" - tasks: *encryption-server-versions + display_name: "${platform} ${python-version-windows} ${auth-ssl}" + tasks: *all-server-versions -- matrix_name: "tests-python-version-supports-openssl-110-test-ssl" +- matrix_name: "tests-windows-python-version-32-bit" matrix_spec: - platform: debian92 - python-version: *openssl-102-plus-pythons + platform: windows-64-vsMulti-small + python-version-windows-32: "*" auth-ssl: "*" - display_name: "${python-version} OpenSSL 1.1.0 ${platform} ${auth-ssl}" - tasks: - - ".latest" + display_name: "${platform} ${python-version-windows-32} ${auth-ssl}" + tasks: *all-server-versions -# Test CPython 2.7, 3.5 and 3.6 against MongoDB 2.6-4.2 -# on Windows with the Microsoft Visual C++ Compiler for Python 2.7 or Visual Studio 2015. -- matrix_name: "tests-windows-vs2015-python-version-27plus" +- matrix_name: "tests-python-version-supports-openssl-102-test-ssl" matrix_spec: - platform: windows-vs2015 - python-version: &win-vs2015-pythons ["win-vs2015-2.7", "win-vs2015-3.5", "win-vs2015-3.6", "win-vs2015-3.7"] + platform: rhel7 + # Python 3.10+ requires OpenSSL 1.1.1+ + python-version: ["3.7", "3.8", "3.9", "pypy3.8", "pypy3.10"] auth-ssl: "*" - display_name: "${platform} ${python-version} ${auth-ssl}" + display_name: "OpenSSL 1.0.2 ${python-version} ${platform} ${auth-ssl}" tasks: - - ".4.2" - - ".4.0" - - ".3.6" - - ".3.4" - - ".3.2" - - ".3.0" - - ".2.6" + - ".5.0" -- matrix_name: "tests-windows-vs2015-python-version-encryption" +- matrix_name: "tests-windows-encryption" matrix_spec: - platform: windows-vs2015 - python-version: *win-vs2015-pythons + platform: windows-64-vsMulti-small + python-version-windows: "*" auth-ssl: "*" encryption: "*" - display_name: "Encryption ${platform} ${python-version} ${auth-ssl}" - tasks: *encryption-server-versions - -# Test CPython 3.7 against MongoDB >= 4.3 on Windows 2017+. -- matrix_name: "tests-windows-vs2017" - matrix_spec: - platform: windows-vs2017 - python-version: ["win-vs2015-3.7"] - auth-ssl: "*" - display_name: "${platform} ${python-version} ${auth-ssl}" - tasks: - - .latest + display_name: "${encryption} ${platform} ${python-version-windows} ${auth-ssl}" + tasks: "test-latest-replica_set" + rules: + - if: + encryption: ["encryption", "encryption_crypt_shared"] + platform: windows-64-vsMulti-small + python-version-windows: "*" + auth-ssl: "*" + then: + add_tasks: *encryption-server-versions -# Storage engine tests on RHEL 6.2 (x86_64) with Python 2.7. +# Storage engine tests on RHEL 8.4 (x86_64) with Python 3.7. - matrix_name: "tests-storage-engines" matrix_spec: - platform: rhel62 + platform: rhel8 storage-engine: "*" - python-version: 2.7 + python-version: 3.7 display_name: "Storage ${storage-engine} ${python-version} ${platform}" rules: - if: - platform: rhel62 + platform: rhel8 storage-engine: ["inmemory"] python-version: "*" then: add_tasks: - "test-latest-standalone" + - "test-7.0-standalone" + - "test-6.0-standalone" + - "test-5.0-standalone" + - "test-4.4-standalone" - "test-4.2-standalone" - "test-4.0-standalone" - "test-3.6-standalone" - - "test-3.4-standalone" - - "test-3.2-standalone" - if: # MongoDB 4.2 drops support for MMAPv1 - platform: rhel62 + platform: rhel8 storage-engine: ["mmapv1"] python-version: "*" then: @@ -1596,141 +2878,255 @@ buildvariants: - "test-4.0-replica_set" - "test-3.6-standalone" - "test-3.6-replica_set" - - "test-3.4-standalone" - - "test-3.2-standalone" - - if: - # No need to test this on later server versions as it becomes the default - platform: rhel62 - storage-engine: ["wiredtiger"] - python-version: "*" - then: - add_tasks: - - "test-3.0-standalone" -# enableTestCommands=0 tests on RHEL 6.2 (x86_64) with Python 2.7. +# enableTestCommands=0 tests on RHEL 8.4 (x86_64) with Python 3.7. - matrix_name: "test-disableTestCommands" matrix_spec: - platform: rhel62 + platform: rhel8 disableTestCommands: "*" - python-version: "2.7" + python-version: "3.7" display_name: "Disable test commands ${python-version} ${platform}" tasks: - ".latest" -# setdefaultencoding tests on RHEL 6.2 (x86_64) with Python 2.7. -- matrix_name: "test-setdefaultencoding" - matrix_spec: - platform: rhel62 - setdefaultencoding: "*" - python-version: "2.7" - display_name: "setdefaultencoding ${python-version} ${platform}" - tasks: - - "test-latest-standalone" - - matrix_name: "test-linux-enterprise-auth" matrix_spec: - platform: rhel62 - python-version: *rhel62-pythons + platform: rhel8 + python-version: "*" auth: "auth" display_name: "Enterprise ${auth} ${platform} ${python-version}" tasks: - name: "test-enterprise-auth" -- matrix_name: "tests-windows-vs2010-enterprise-auth" +- matrix_name: "tests-windows-enterprise-auth" matrix_spec: - platform: windows-vs2010 - python-version: *win-vs2010-pythons + platform: windows-64-vsMulti-small + python-version-windows: "*" auth: "auth" - display_name: "Enterprise ${auth} ${platform} ${python-version}" + display_name: "Enterprise ${auth} ${platform} ${python-version-windows}" tasks: - name: "test-enterprise-auth" -- matrix_name: "tests-windows-vs2015-enterprise-auth" +- matrix_name: "test-search-index-helpers" matrix_spec: - platform: windows-vs2015 - python-version: *win-vs2015-pythons - auth: "auth" - display_name: "Enterprise ${auth} ${platform} ${python-version}" + platform: rhel8 + python-version: "3.8" + display_name: "Search Index Helpers ${platform}" tasks: - - name: "test-enterprise-auth" + - name: "test_atlas_task_group_search_indexes" - matrix_name: "tests-mod-wsgi" matrix_spec: - platform: rhel62 - python-version: ["2.7", "3.4", "3.6"] + platform: ubuntu-22.04 + python-version: ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] mod-wsgi-version: "*" display_name: "${mod-wsgi-version} ${python-version} ${platform}" tasks: - name: "mod-wsgi-standalone" - name: "mod-wsgi-replica-set" + - name: "mod-wsgi-embedded-mode-standalone" + - name: "mod-wsgi-embedded-mode-replica-set" - matrix_name: "mockupdb-tests" matrix_spec: - platform: rhel62 - python-version: 2.7 + platform: rhel8 + python-version: 3.7 display_name: "MockupDB Tests" tasks: - name: "mockupdb" - matrix_name: "tests-doctests" matrix_spec: - platform: rhel62 - python-version: ["2.7", "3.4"] + platform: rhel8 + python-version: ["3.8"] display_name: "Doctests ${python-version} ${platform}" tasks: - name: "doctests" -- matrix_name: "cdecimal" - matrix_spec: - platform: rhel62 - python-version: 2.7 - display_name: "cdecimal ${python-version} ${platform}" - tasks: - - name: "cdecimal" - - name: "no-server" display_name: "No server test" run_on: - - rhel62-small + - rhel84-small tasks: - name: "no-server" - expansions: - set_xtrace_on: on - name: "Coverage Report" display_name: "Coverage Report" run_on: - - ubuntu1604-test + - rhel84-small tasks: - name: "coverage-report" - expansions: - set_xtrace_on: on - matrix_name: "atlas-connect" matrix_spec: - platform: rhel62 - python-version: *rhel62-pythons + platform: rhel8 + python-version: "*" display_name: "Atlas connect ${python-version} ${platform}" tasks: - name: "atlas-connect" -- matrix_name: "atlas-connect-openssl-102-plus" +- matrix_name: "serverless" matrix_spec: - platform: debian92 - python-version: *openssl-102-plus-pythons - display_name: "Atlas connect ${python-version} ${platform}" + platform: rhel8 + python-version: "*" + auth-ssl: auth-ssl + serverless: "*" + display_name: "Serverless ${python-version} ${platform}" tasks: - - name: "atlas-connect" + - "serverless_task_group" + +- matrix_name: "data-lake-spec-tests" + matrix_spec: + platform: rhel8 + python-version: ["3.7", "3.10"] + auth: "auth" + c-extensions: "*" + display_name: "Atlas Data Lake ${python-version} ${c-extensions}" + tasks: + - name: atlas-data-lake-tests + +- matrix_name: "stable-api-tests" + matrix_spec: + platform: rhel8 + python-version: ["3.7", "3.10"] + auth: "auth" + versionedApi: "*" + display_name: "Versioned API ${versionedApi} ${python-version}" + batchtime: 10080 # 7 days + tasks: + # Versioned API was introduced in MongoDB 4.7 + - "test-latest-standalone" + - "test-5.0-standalone" + +- matrix_name: "ocsp-test" + matrix_spec: + platform: rhel8 + python-version: ["3.7", "3.10", "pypy3.8", "pypy3.10"] + mongodb-version: ["4.4", "5.0", "6.0", "7.0", "latest"] + auth: "noauth" + ssl: "ssl" + display_name: "OCSP test ${platform} ${python-version} ${mongodb-version}" + batchtime: 20160 # 14 days + tasks: + - name: ".ocsp" + +- matrix_name: "ocsp-test-windows" + matrix_spec: + platform: windows-64-vsMulti-small + python-version-windows: ["3.7", "3.10"] + mongodb-version: ["4.4", "5.0", "6.0", "7.0", "latest"] + auth: "noauth" + ssl: "ssl" + display_name: "OCSP test ${platform} ${python-version-windows} ${mongodb-version}" + batchtime: 20160 # 14 days + tasks: + # Windows MongoDB servers do not staple OCSP responses and only support RSA. + - name: ".ocsp-rsa !.ocsp-staple" + +- matrix_name: "ocsp-test-macos" + matrix_spec: + platform: macos-1014 + mongodb-version: ["4.4", "5.0", "6.0", "7.0", "latest"] + auth: "noauth" + ssl: "ssl" + display_name: "OCSP test ${platform} ${mongodb-version}" + batchtime: 20160 # 14 days + tasks: + # macOS MongoDB servers do not staple OCSP responses and only support RSA. + - name: ".ocsp-rsa !.ocsp-staple" + +- matrix_name: "oidc-auth-test" + matrix_spec: + platform: [ rhel8, macos-1100, windows-64-vsMulti-small ] + display_name: "MONGODB-OIDC Auth ${platform}" + tasks: + - name: "oidc-auth-test-latest" + +- matrix_name: "aws-auth-test" + matrix_spec: + platform: [ubuntu-20.04] + python-version: ["3.9"] + display_name: "MONGODB-AWS Auth ${platform} ${python-version}" + tasks: + - name: "aws-auth-test-4.4" + - name: "aws-auth-test-5.0" + - name: "aws-auth-test-6.0" + - name: "aws-auth-test-7.0" + - name: "aws-auth-test-rapid" + - name: "aws-auth-test-latest" + +- matrix_name: "aws-auth-test-mac" + matrix_spec: + platform: [macos-1014] + display_name: "MONGODB-AWS Auth ${platform} ${python-version-mac}" + tasks: + - name: "aws-auth-test-4.4" + - name: "aws-auth-test-5.0" + - name: "aws-auth-test-6.0" + - name: "aws-auth-test-7.0" + - name: "aws-auth-test-rapid" + - name: "aws-auth-test-latest" + +- matrix_name: "aws-auth-test-windows" + matrix_spec: + platform: [windows-64-vsMulti-small] + python-version-windows: "*" + display_name: "MONGODB-AWS Auth ${platform} ${python-version-windows}" + tasks: + - name: "aws-auth-test-4.4" + - name: "aws-auth-test-5.0" + - name: "aws-auth-test-6.0" + - name: "aws-auth-test-7.0" + - name: "aws-auth-test-rapid" + - name: "aws-auth-test-latest" + +- matrix_name: "load-balancer" + matrix_spec: + platform: rhel8 + mongodb-version: ["6.0", "7.0", "rapid", "latest"] + auth-ssl: "*" + python-version: "*" + loadbalancer: "*" + display_name: "Load Balancer ${platform} ${python-version} ${mongodb-version} ${auth-ssl}" + tasks: + - name: "load-balancer-test" + +- name: testgcpkms-variant + display_name: "GCP KMS" + run_on: + - debian10-small + tasks: + - name: testgcpkms_task_group + batchtime: 20160 # Use a batchtime of 14 days as suggested by the CSFLE test README + - testgcpkms-fail-task + +- name: testazurekms-variant + display_name: "Azure KMS" + run_on: rhel87-small + tasks: + - name: testazurekms_task_group + batchtime: 20160 # Use a batchtime of 14 days as suggested by the CSFLE test README + - testazurekms-fail-task + +- name: rhel8-test-lambda + display_name: AWS Lambda handler tests + run_on: rhel87-small + tasks: + - name: test_aws_lambda_task_group + +- name: Release + display_name: Release + batchtime: 20160 # 14 days + tags: ["release_tag"] + tasks: + - ".release_tag" # Platform notes # i386 builds of OpenSSL or Cyrus SASL are not available - # Ubuntu16.04 ppc64le is only supported by MongoDB 3.4+ - # Ubuntu16.04 aarch64 is only supported by MongoDB 3.4+ - # Ubuntu16.04 s390x is only supported by MongoDB 3.4+ - # Ubuntu16.04 (x86) only supports MongoDB 3.2+ # Debian 8.1 only supports MongoDB 3.4+ # SUSE12 s390x is only supported by MongoDB 3.4+ # No enterprise build for Archlinux, SSL not available + # RHEL 7.6 and RHEL 8.4 only supports 3.6+. # RHEL 7 only supports 2.6+ # RHEL 7.1 ppc64le is only supported by MongoDB 3.2+ # RHEL 7.2 s390x is only supported by MongoDB 3.4+ diff --git a/.evergreen/install-dependencies.sh b/.evergreen/install-dependencies.sh index f28a957746..9f4bcdbb59 100644 --- a/.evergreen/install-dependencies.sh +++ b/.evergreen/install-dependencies.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash set -o xtrace # Write all commands first to stderr set -o errexit # Exit the script with error if any of the commands fail diff --git a/.evergreen/perf.yml b/.evergreen/perf.yml index a077b8c067..43b21a65fb 100644 --- a/.evergreen/perf.yml +++ b/.evergreen/perf.yml @@ -96,7 +96,7 @@ functions: # If this was a patch build, doing a fresh clone would not actually test the patch cp -R ${PROJECT_DIRECTORY}/ $DRIVERS_TOOLS else - git clone git://github.com/mongodb-labs/drivers-evergreen-tools.git $DRIVERS_TOOLS + git clone https://github.com/mongodb-labs/drivers-evergreen-tools.git $DRIVERS_TOOLS fi echo "{ \"releases\": { \"default\": \"$MONGODB_BINARIES\" }}" > $MONGO_ORCHESTRATION_HOME/orchestration.config @@ -105,7 +105,7 @@ functions: params: script: | ${PREPARE_SHELL} - MONGODB_VERSION=${VERSION} TOPOLOGY=${TOPOLOGY} AUTH=${AUTH} SSL=${SSL} STORAGE_ENGINE=${STORAGE_ENGINE} sh ${DRIVERS_TOOLS}/.evergreen/run-orchestration.sh + MONGODB_VERSION=${VERSION} TOPOLOGY=${TOPOLOGY} AUTH=${AUTH} SSL=${SSL} STORAGE_ENGINE=${STORAGE_ENGINE} bash ${DRIVERS_TOOLS}/.evergreen/run-orchestration.sh # run-orchestration generates expansion file with the MONGODB_URI for the cluster - command: expansions.update params: @@ -116,7 +116,7 @@ functions: params: script: | ${PREPARE_SHELL} - sh ${DRIVERS_TOOLS}/.evergreen/stop-orchestration.sh + bash ${DRIVERS_TOOLS}/.evergreen/stop-orchestration.sh "run perf tests": - command: shell.exec @@ -125,7 +125,7 @@ functions: working_dir: "src" script: | ${PREPARE_SHELL} - PROJECT_DIRECTORY=${PROJECT_DIRECTORY} sh ${PROJECT_DIRECTORY}/.evergreen/run-perf-tests.sh + PROJECT_DIRECTORY=${PROJECT_DIRECTORY} bash ${PROJECT_DIRECTORY}/.evergreen/run-perf-tests.sh "attach benchmark test results": - command: attach.results @@ -133,9 +133,8 @@ functions: file_location: src/report.json "send dashboard data": - - command: json.send + - command: perf.send params: - name: perf file: src/results.json "cleanup": @@ -183,7 +182,7 @@ functions: ${PREPARE_SHELL} file="${PROJECT_DIRECTORY}/.evergreen/install-dependencies.sh" # Don't use ${file} syntax here because evergreen treats it as an empty expansion. - [ -f "$file" ] && sh $file || echo "$file not available, skipping" + [ -f "$file" ] && bash $file || echo "$file not available, skipping" pre: - func: "fetch source" @@ -200,34 +199,34 @@ post: - func: "cleanup" tasks: - - name: "perf-3.0-standalone" + - name: "perf-4.0-standalone" tags: ["perf"] commands: - func: "bootstrap mongo-orchestration" vars: - VERSION: "3.0" + VERSION: "4.0" TOPOLOGY: "server" - func: "run perf tests" - func: "attach benchmark test results" - func: "send dashboard data" - - name: "perf-3.4-standalone" + - name: "perf-4.4-standalone" tags: ["perf"] commands: - func: "bootstrap mongo-orchestration" vars: - VERSION: "3.4" + VERSION: "4.4" TOPOLOGY: "server" - func: "run perf tests" - func: "attach benchmark test results" - func: "send dashboard data" - - name: "perf-3.6-standalone" + - name: "perf-6.0-standalone" tags: ["perf"] commands: - func: "bootstrap mongo-orchestration" vars: - VERSION: "3.6" + VERSION: "6.0" TOPOLOGY: "server" - func: "run perf tests" - func: "attach benchmark test results" @@ -238,9 +237,8 @@ buildvariants: - name: "perf-tests" display_name: "Performance Benchmark Tests" batchtime: 10080 # 7 days - run_on: centos6-perf + run_on: ubuntu2004-large tasks: - - name: "perf-3.0-standalone" - - name: "perf-3.4-standalone" - - name: "perf-3.6-standalone" - + - name: "perf-4.0-standalone" + - name: "perf-4.4-standalone" + - name: "perf-6.0-standalone" diff --git a/.evergreen/release.sh b/.evergreen/release.sh new file mode 100755 index 0000000000..1fdd459ad9 --- /dev/null +++ b/.evergreen/release.sh @@ -0,0 +1,9 @@ +#!/bin/bash -ex + +if [ "$(uname -s)" = "Darwin" ]; then + .evergreen/build-mac.sh +elif [ "Windows_NT" = "$OS" ]; then # Magic variable in cygwin + .evergreen/build-windows.sh +else + .evergreen/build-manylinux.sh +fi diff --git a/.evergreen/resync-specs.sh b/.evergreen/resync-specs.sh new file mode 100755 index 0000000000..f9ac89e947 --- /dev/null +++ b/.evergreen/resync-specs.sh @@ -0,0 +1,178 @@ +#!/bin/bash +# exit when any command fails +set -e +PYMONGO=$(dirname "$(cd "$(dirname "$0")"; pwd)") +SPECS=${MDB_SPECS:-~/Work/specifications} + +help (){ + echo "Usage: resync_specs.sh [-bcsp] spec" + echo "Required arguments:" + echo " spec determines which folder the spec tests will be copied from." + echo "Optional flags:" + echo " -b is used to add a string to the blocklist for that next run. Can be used" + echo " any number of times on a single command to block multiple patterns." + echo " You can use any regex pattern (it is passed to 'grep -Ev')." + echo " -c is used to set a branch or commit that will be checked out in the" + echo " specifications repo before copying." + echo " -s is used to set a unique path to the specs repo for that specific" + echo " run." + echo "Notes:" + echo "You can export the environment variable MDB_SPECS to set the specs" + echo " repo similar to -s, but this will persist between runs until you " + echo "unset it." +} + +# Parse flag args +BRANCH='' +BLOCKLIST='.*\.yml' +while getopts 'b:c:s:' flag; do + case "${flag}" in + b) BLOCKLIST+="|$OPTARG" + ;; + c) BRANCH="${OPTARG}" + ;; + s) SPECS="${OPTARG}" + ;; + *) help; exit 0 + ;; + esac +done +shift $((OPTIND-1)) + +if [ -z $BRANCH ] +then + git -C $SPECS checkout $BRANCH +fi + +# Ensure the JSON files are up to date. +cd $SPECS/source +make +cd - +# cpjson unified-test-format/tests/invalid unified-test-format/invalid +# * param1: Path to spec tests dir in specifications repo +# * param2: Path to where the corresponding tests live in Python. +cpjson () { + find "$PYMONGO"/test/$2 -type f -delete + cd "$SPECS"/source/$1 + find . -name '*.json' | grep -Ev "${BLOCKLIST}" | cpio -pdm \ + $PYMONGO/test/$2 + printf "\nIgnored files for ${PWD}:\n" + IGNORED_FILES="$(printf "\n%s\n" "$(diff <(find . -name '*.json' | sort) \ + <(find . -name '*.json' | grep -Ev "${BLOCKLIST}" | sort))" | \ + sed -e '/^[0-9]/d' | sed -e 's|< ./||g' )" + printf "%s\n" $IGNORED_FILES + cd "$PYMONGO"/test/$2 + printf "%s\n" $IGNORED_FILES | xargs git checkout master + +} + +for spec in "$@" +do + # Match the spec dir name, the python test dir name, and/or common abbreviations. + case "$spec" in + auth) + cpjson auth/tests/ auth + ;; + atlas-data-lake-testing|data_lake) + cpjson atlas-data-lake-testing/tests/ data_lake + ;; + bson-corpus|bson_corpus) + cpjson bson-corpus/tests/ bson_corpus + ;; + max-staleness|max_staleness) + cpjson max-staleness/tests/ max_staleness + ;; + collection-management|collection_management) + cpjson collection-management/tests/ collection_management + ;; + connection-string|connection_string) + cpjson connection-string/tests/ connection_string/test + ;; + change-streams|change_streams) + cpjson change-streams/tests/ change_streams/ + ;; + client-side-encryption|csfle|fle) + cpjson client-side-encryption/tests/ client-side-encryption/spec + cpjson client-side-encryption/corpus/ client-side-encryption/corpus + cpjson client-side-encryption/external/ client-side-encryption/external + cpjson client-side-encryption/limits/ client-side-encryption/limits + cpjson client-side-encryption/etc/data client-side-encryption/etc/data + ;; + cmap|CMAP|connection-monitoring-and-pooling) + cpjson connection-monitoring-and-pooling/tests cmap + rm $PYMONGO/test/cmap/wait-queue-fairness.json # PYTHON-1873 + ;; + apm|APM|command-monitoring|command_monitoring) + cpjson command-logging-and-monitoring/tests/monitoring command_monitoring + ;; + crud|CRUD) + cpjson crud/tests/ crud + ;; + csot|CSOT|client-side-operations-timeout) + cpjson client-side-operations-timeout/tests csot + ;; + gridfs) + cpjson gridfs/tests gridfs + ;; + index|index-management) + cpjson index-management/tests index_management + ;; + load-balancers|load_balancer) + cpjson load-balancers/tests load_balancer + ;; + srv|SRV|initial-dns-seedlist-discovery|srv_seedlist) + cpjson initial-dns-seedlist-discovery/tests/ srv_seedlist + ;; + retryable-reads|retryable_reads) + cpjson retryable-reads/tests/ retryable_reads + ;; + retryable-writes|retryable_writes) + cpjson retryable-writes/tests/ retryable_writes + ;; + run-command|run_command) + cpjson run-command/tests/ run_command + ;; + sdam|SDAM|server-discovery-and-monitoring|discovery_and_monitoring) + cpjson server-discovery-and-monitoring/tests/errors \ + discovery_and_monitoring/errors + cpjson server-discovery-and-monitoring/tests/rs \ + discovery_and_monitoring/rs + cpjson server-discovery-and-monitoring/tests/sharded \ + discovery_and_monitoring/sharded + cpjson server-discovery-and-monitoring/tests/single \ + discovery_and_monitoring/single + cpjson server-discovery-and-monitoring/tests/unified \ + discovery_and_monitoring/unified + cpjson server-discovery-and-monitoring/tests/load-balanced \ + discovery_and_monitoring/load-balanced + ;; + sdam-monitoring|sdam_monitoring) + cpjson server-discovery-and-monitoring/tests/monitoring sdam_monitoring + ;; + server-selection|server_selection) + cpjson server-selection/tests/ server_selection + ;; + sessions) + cpjson sessions/tests/ sessions + ;; + transactions|transactions-convenient-api) + cpjson transactions/tests/ transactions + cpjson transactions-convenient-api/tests/ transactions-convenient-api + rm $PYMONGO/test/transactions/legacy/errors-client.json # PYTHON-1894 + ;; + unified|unified-test-format) + cpjson unified-test-format/tests/ unified-test-format/ + ;; + uri|uri-options|uri_options) + cpjson uri-options/tests uri_options + cp "$SPECS"/source/uri-options/tests/*.pem $PYMONGO/test/uri_options + ;; + stable-api|versioned-api) + cpjson versioned-api/tests versioned-api + ;; + *) + echo "Do not know how to resync spec tests for '${spec}'" + help + ;; + esac +done diff --git a/.evergreen/run-atlas-tests.sh b/.evergreen/run-atlas-tests.sh deleted file mode 100644 index c06d3b45b5..0000000000 --- a/.evergreen/run-atlas-tests.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash - -# Don't trace to avoid secrets showing up in the logs -set -o errexit - -export JAVA_HOME=/opt/java/jdk8 - -IMPL=$(${PYTHON_BINARY} -c "import platform, sys; sys.stdout.write(platform.python_implementation())") -if [ $IMPL = "Jython" -o $IMPL = "PyPy" ]; then - $PYTHON_BINARY -m virtualenv --never-download --no-wheel atlastest - . atlastest/bin/activate - trap "deactivate; rm -rf atlastest" EXIT HUP - pip install certifi - PYTHON=python -else - PYTHON=$PYTHON_BINARY -fi - -echo "Running tests" -$PYTHON test/atlas/test_connection.py diff --git a/.evergreen/run-cdecimal-tests.sh b/.evergreen/run-cdecimal-tests.sh deleted file mode 100644 index 1a341a5025..0000000000 --- a/.evergreen/run-cdecimal-tests.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -set -o xtrace -set -o errexit - -virtualenv -p ${PYTHON_BINARY} cdecimaltest -trap "deactivate; rm -rf cdecimaltest" EXIT HUP -. cdecimaltest/bin/activate -# No cdecimal tarballs on pypi. -pip install http://www.bytereef.org/software/mpdecimal/releases/cdecimal-2.3.tar.gz -python -c 'import sys; print(sys.version)' -python cdecimal_test.py diff --git a/.evergreen/run-deployed-lambda-aws-tests.sh b/.evergreen/run-deployed-lambda-aws-tests.sh new file mode 100644 index 0000000000..aa16d62650 --- /dev/null +++ b/.evergreen/run-deployed-lambda-aws-tests.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -o errexit # Exit the script with error if any of the commands fail + +export PATH="/opt/python/3.9/bin:${PATH}" +python --version +pushd ./test/lambda + +. build.sh +popd +. ${DRIVERS_TOOLS}/.evergreen/aws_lambda/run-deployed-lambda-aws-tests.sh diff --git a/.evergreen/run-doctests.sh b/.evergreen/run-doctests.sh deleted file mode 100644 index eebb0f784c..0000000000 --- a/.evergreen/run-doctests.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -set -o xtrace -set -o errexit - -${PYTHON_BINARY} setup.py clean -${PYTHON_BINARY} setup.py doc -t diff --git a/.evergreen/run-enterprise-auth-tests.sh b/.evergreen/run-enterprise-auth-tests.sh deleted file mode 100644 index cc2c8dce0c..0000000000 --- a/.evergreen/run-enterprise-auth-tests.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/bash - -# Don't trace to avoid secrets showing up in the logs -set -o errexit - -echo "Running enterprise authentication tests" - -export JAVA_HOME=/opt/java/jdk8 - -PLATFORM="$(${PYTHON_BINARY} -c 'import platform, sys; sys.stdout.write(platform.system())')" - -export DB_USER="bob" -export DB_PASSWORD="pwd123" -EXTRA_ARGS="" - -# There is no kerberos package for Jython, but we do want to test PLAIN. -if [ ${PLATFORM} != "Java" ]; then - if [ "Windows_NT" = "$OS" ]; then - echo "Setting GSSAPI_PASS" - export GSSAPI_PASS=${SASL_PASS} - else - # BUILD-3830 - touch ${PROJECT_DIRECTORY}/.evergreen/krb5.conf.empty - export KRB5_CONFIG=${PROJECT_DIRECTORY}/.evergreen/krb5.conf.empty - - echo "Writing keytab" - echo ${KEYTAB_BASE64} | base64 -d > ${PROJECT_DIRECTORY}/.evergreen/drivers.keytab - echo "Running kinit" - kinit -k -t ${PROJECT_DIRECTORY}/.evergreen/drivers.keytab -p ${PRINCIPAL} - fi - echo "Setting GSSAPI variables" - export GSSAPI_HOST=${SASL_HOST} - export GSSAPI_PORT=${SASL_PORT} - export GSSAPI_PRINCIPAL=${PRINCIPAL} -else - EXTRA_ARGS="-J-XX:-UseGCOverheadLimit -J-Xmx4096m" -fi - - -echo "Running tests" -${PYTHON_BINARY} setup.py clean -${PYTHON_BINARY} $EXTRA_ARGS setup.py test --xunit-output=xunit-results diff --git a/.evergreen/run-mockupdb-tests.sh b/.evergreen/run-mockupdb-tests.sh deleted file mode 100644 index 93e434c50f..0000000000 --- a/.evergreen/run-mockupdb-tests.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -set -o xtrace -set -o errexit - -${PYTHON_BINARY} setup.py clean -cd .. -${PYTHON_BINARY} -m virtualenv mockuptests -. mockuptests/bin/activate -trap "deactivate" EXIT HUP - -# Install PyMongo from git clone so mockup-tests don't -# download it from pypi. -pip install ${PROJECT_DIRECTORY} - -git clone https://github.com/ajdavis/pymongo-mockup-tests.git -cd pymongo-mockup-tests -python setup.py test diff --git a/.evergreen/run-mod-wsgi-tests.sh b/.evergreen/run-mod-wsgi-tests.sh index 5e8b7ca2ac..afb3f271ae 100644 --- a/.evergreen/run-mod-wsgi-tests.sh +++ b/.evergreen/run-mod-wsgi-tests.sh @@ -1,10 +1,10 @@ -#!/bin/sh +#!/bin/bash set -o xtrace set -o errexit APACHE=$(command -v apache2 || command -v /usr/lib/apache2/mpm-prefork/apache2) || true if [ -n "$APACHE" ]; then - APACHE_CONFIG=apache22ubuntu1204.conf + APACHE_CONFIG=apache24ubuntu161404.conf else APACHE=$(command -v httpd) || true if [ -z "$APACHE" ]; then @@ -18,25 +18,30 @@ fi PYTHON_VERSION=$(${PYTHON_BINARY} -c "import sys; sys.stdout.write('.'.join(str(val) for val in sys.version_info[:2]))") +# Ensure the C extensions are installed. +${PYTHON_BINARY} setup.py build_ext -i + export MOD_WSGI_SO=/opt/python/mod_wsgi/python_version/$PYTHON_VERSION/mod_wsgi_version/$MOD_WSGI_VERSION/mod_wsgi.so export PYTHONHOME=/opt/python/$PYTHON_VERSION +# If MOD_WSGI_EMBEDDED is set use the default embedded mode behavior instead +# of daemon mode (WSGIDaemonProcess). +if [ -n "$MOD_WSGI_EMBEDDED" ]; then + export MOD_WSGI_CONF=mod_wsgi_test_embedded.conf +else + export MOD_WSGI_CONF=mod_wsgi_test.conf +fi cd .. $APACHE -k start -f ${PROJECT_DIRECTORY}/test/mod_wsgi_test/${APACHE_CONFIG} -trap "$APACHE -k stop -f ${PROJECT_DIRECTORY}/test/mod_wsgi_test/${APACHE_CONFIG}" EXIT HUP +trap '$APACHE -k stop -f ${PROJECT_DIRECTORY}/test/mod_wsgi_test/${APACHE_CONFIG}' EXIT HUP -set +e -wget -t 1 -T 10 -O - "http://localhost:8080${PROJECT_DIRECTORY}" -STATUS=$? -set -e - -# Debug -cat error_log - -if [ $STATUS != 0 ]; then - exit $STATUS -fi +wget -t 1 -T 10 -O - "http://localhost:8080/interpreter1${PROJECT_DIRECTORY}" || (cat error_log && exit 1) +wget -t 1 -T 10 -O - "http://localhost:8080/interpreter2${PROJECT_DIRECTORY}" || (cat error_log && exit 1) -${PYTHON_BINARY} ${PROJECT_DIRECTORY}/test/mod_wsgi_test/test_client.py -n 25000 -t 100 parallel http://localhost:8080${PROJECT_DIRECTORY} +${PYTHON_BINARY} ${PROJECT_DIRECTORY}/test/mod_wsgi_test/test_client.py -n 25000 -t 100 parallel \ + http://localhost:8080/interpreter1${PROJECT_DIRECTORY} http://localhost:8080/interpreter2${PROJECT_DIRECTORY} || \ + (tail -n 100 error_log && exit 1) -${PYTHON_BINARY} ${PROJECT_DIRECTORY}/test/mod_wsgi_test/test_client.py -n 25000 serial http://localhost:8080${PROJECT_DIRECTORY} +${PYTHON_BINARY} ${PROJECT_DIRECTORY}/test/mod_wsgi_test/test_client.py -n 25000 serial \ + http://localhost:8080/interpreter1${PROJECT_DIRECTORY} http://localhost:8080/interpreter2${PROJECT_DIRECTORY} || \ + (tail -n 100 error_log && exit 1) diff --git a/.evergreen/run-mongodb-aws-ecs-test.sh b/.evergreen/run-mongodb-aws-ecs-test.sh new file mode 100755 index 0000000000..757ad80790 --- /dev/null +++ b/.evergreen/run-mongodb-aws-ecs-test.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +# Don't trace since the URI contains a password that shouldn't show up in the logs +set -o errexit # Exit the script with error if any of the commands fail + +############################################ +# Main Program # +############################################ + +if [[ -z "$1" ]]; then + echo "usage: $0 " + exit 1 +fi +export MONGODB_URI="$1" + +if echo "$MONGODB_URI" | grep -q "@"; then + echo "MONGODB_URI unexpectedly contains user credentials in ECS test!"; + exit 1 +fi +# Now we can safely enable xtrace +set -o xtrace + +# Install python with pip. +PYTHON_VER="python3.9" +apt-get update +apt-get install $PYTHON_VER python3-pip build-essential $PYTHON_VER-dev -y + +export PYTHON_BINARY=$PYTHON_VER +export TEST_AUTH_AWS=1 +export AUTH="auth" +export SET_XTRACE_ON=1 +cd src +$PYTHON_BINARY -m pip install -q --user tox +bash .evergreen/tox.sh -m test-eg diff --git a/.evergreen/run-mongodb-aws-test.sh b/.evergreen/run-mongodb-aws-test.sh new file mode 100755 index 0000000000..684c80452d --- /dev/null +++ b/.evergreen/run-mongodb-aws-test.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +set -o xtrace +set -o errexit # Exit the script with error if any of the commands fail + +############################################ +# Main Program # +############################################ + +# Supported/used environment variables: +# MONGODB_URI Set the URI, including an optional username/password to use +# to connect to the server via MONGODB-AWS authentication +# mechanism. +# PYTHON_BINARY The Python version to use. + +echo "Running MONGODB-AWS authentication tests" + +# Handle credentials and environment setup. +. $DRIVERS_TOOLS/.evergreen/auth_aws/aws_setup.sh $1 + +# show test output +set -x + +export TEST_AUTH_AWS=1 +export AUTH="auth" +export SET_XTRACE_ON=1 +bash ./.evergreen/tox.sh -m test-eg diff --git a/.evergreen/run-mongodb-oidc-test.sh b/.evergreen/run-mongodb-oidc-test.sh new file mode 100755 index 0000000000..75fafb448b --- /dev/null +++ b/.evergreen/run-mongodb-oidc-test.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +set +x # Disable debug trace +set -o errexit # Exit the script with error if any of the commands fail + +echo "Running MONGODB-OIDC authentication tests" + +# Make sure DRIVERS_TOOLS is set. +if [ -z "$DRIVERS_TOOLS" ]; then + echo "Must specify DRIVERS_TOOLS" + exit 1 +fi + +# Get the drivers secrets. Use an existing secrets file first. +if [ ! -f "./secrets-export.sh" ]; then + bash ${DRIVERS_TOOLS}/.evergreen/auth_aws/setup_secrets.sh drivers/oidc +fi +source ./secrets-export.sh + +# # If the file did not have our creds, get them from the vault. +if [ -z "$OIDC_ATLAS_URI_SINGLE" ]; then + bash ${DRIVERS_TOOLS}/.evergreen/auth_aws/setup_secrets.sh drivers/oidc + source ./secrets-export.sh +fi + +# Make the OIDC tokens. +set -x +pushd ${DRIVERS_TOOLS}/.evergreen/auth_oidc +. ./oidc_get_tokens.sh +popd + +# Set up variables and run the test. +if [ -n "$LOCAL_OIDC_SERVER" ]; then + export MONGODB_URI=${MONGODB_URI:-"mongodb://localhost"} + export MONGODB_URI_SINGLE="${MONGODB_URI}/?authMechanism=MONGODB-OIDC" + export MONGODB_URI_MULTI="${MONGODB_URI}:27018/?authMechanism=MONGODB-OIDC&directConnection=true" +else + set +x # turn off xtrace for this portion + export MONGODB_URI="$OIDC_ATLAS_URI_SINGLE" + export MONGODB_URI_SINGLE="$OIDC_ATLAS_URI_SINGLE/?authMechanism=MONGODB-OIDC" + export MONGODB_URI_MULTI="$OIDC_ATLAS_URI_MULTI/?authMechanism=MONGODB-OIDC" + set -x +fi + +export TEST_AUTH_OIDC=1 +export COVERAGE=1 +export AUTH="auth" +bash ./.evergreen/tox.sh -m test-eg diff --git a/.evergreen/run-perf-tests.sh b/.evergreen/run-perf-tests.sh index bbebf34c98..72be38e03d 100644 --- a/.evergreen/run-perf-tests.sh +++ b/.evergreen/run-perf-tests.sh @@ -13,21 +13,7 @@ cd .. export TEST_PATH="${PROJECT_DIRECTORY}/driver-performance-test-data" export OUTPUT_FILE="${PROJECT_DIRECTORY}/results.json" -MTCBIN=/opt/mongodbtoolchain/v2/bin -VIRTUALENV="$MTCBIN/virtualenv -p $MTCBIN/python2.7" +export PYTHON_BINARY=/opt/mongodbtoolchain/v3/bin/python3 +export PERF_TEST=1 -$VIRTUALENV pyperftest -. pyperftest/bin/activate -pip install simplejson - -python setup.py build_ext -i -start_time=$(date +%s) -python test/performance/perf_test.py -end_time=$(date +%s) -elapsed_secs=$((end_time-start_time)) - -cat results.json - -echo "{\"failures\": 0, \"results\": [{\"status\": \"pass\", \"exit_code\": 0, \"test_file\": \"BenchMarkTests\", \"start\": $start_time, \"end\": $end_time, \"elapsed\": $elapsed_secs}]}" > report.json - -cat report.json +bash ./.evergreen/tox.sh -m test-eg diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 8b27e0ec84..8a31a96a3c 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -1,95 +1,133 @@ -#!/bin/sh +#!/bin/bash set -o errexit # Exit the script with error if any of the commands fail +set -o xtrace -# Supported/used environment variables: -# SET_XTRACE_ON Set to non-empty to write all commands first to stderr. -# AUTH Set to enable authentication. Defaults to "noauth" -# SSL Set to enable SSL. Defaults to "nossl" -# PYTHON_BINARY The Python version to use. Defaults to whatever is available -# GREEN_FRAMEWORK The green framework to test with, if any. -# C_EXTENSIONS Pass --no_ext to setup.py, or not. -# COVERAGE If non-empty, run the test suite with coverage. -# TEST_ENCRYPTION If non-empty, install pymongocrypt. -# LIBMONGOCRYPT_URL The URL to download libmongocrypt. -# SETDEFAULTENCODING The encoding to set via sys.setdefaultencoding. - -if [ -n "${SET_XTRACE_ON}" ]; then - set -o xtrace -else - set +x -fi +# Note: It is assumed that you have already set up a virtual environment before running this file. +# Supported/used environment variables: +# AUTH Set to enable authentication. Defaults to "noauth" +# SSL Set to enable SSL. Defaults to "nossl" +# GREEN_FRAMEWORK The green framework to test with, if any. +# C_EXTENSIONS Pass --no_ext to skip installing the C extensions. +# COVERAGE If non-empty, run the test suite with coverage. +# COMPRESSORS If non-empty, install appropriate compressor. +# LIBMONGOCRYPT_URL The URL to download libmongocrypt. +# TEST_DATA_LAKE If non-empty, run data lake tests. +# TEST_ENCRYPTION If non-empty, run encryption tests. +# TEST_CRYPT_SHARED If non-empty, install crypt_shared lib. +# TEST_SERVERLESS If non-empy, test on serverless. +# TEST_LOADBALANCER If non-empy, test load balancing. +# TEST_FLE_AZURE_AUTO If non-empy, test auto FLE on Azure +# TEST_FLE_GCP_AUTO If non-empy, test auto FLE on GCP +# TEST_PYOPENSSL If non-empy, test with PyOpenSSL +# TEST_ENTERPRISE_AUTH If non-empty, test with Enterprise Auth +# TEST_AUTH_AWS If non-empty, test AWS Auth Mechanism +# TEST_AUTH_OIDC If non-empty, test OIDC Auth Mechanism +# TEST_PERF If non-empty, run performance tests +# TEST_OCSP If non-empty, run OCSP tests +# TEST_ENCRYPTION_PYOPENSSL If non-empy, test encryption with PyOpenSSL +# TEST_ATLAS If non-empty, test Atlas connections AUTH=${AUTH:-noauth} SSL=${SSL:-nossl} -PYTHON_BINARY=${PYTHON_BINARY:-} -GREEN_FRAMEWORK=${GREEN_FRAMEWORK:-} -C_EXTENSIONS=${C_EXTENSIONS:-} -COVERAGE=${COVERAGE:-} -COMPRESSORS=${COMPRESSORS:-} -TEST_ENCRYPTION=${TEST_ENCRYPTION:-} -LIBMONGOCRYPT_URL=${LIBMONGOCRYPT_URL:-} -SETDEFAULTENCODING=${SETDEFAULTENCODING:-} +TEST_ARGS="$1" +PYTHON=$(which python) +export PIP_QUIET=1 # Quiet by default -if [ -n "$COMPRESSORS" ]; then - export COMPRESSORS=$COMPRESSORS -fi +python -c "import sys; sys.exit(sys.prefix == sys.base_prefix)" || (echo "Not inside a virtual env!"; exit 1) -export JAVA_HOME=/opt/java/jdk8 +# Try to source exported AWS Secrets +if [ -f ./secrets-export.sh ]; then + source ./secrets-export.sh +fi if [ "$AUTH" != "noauth" ]; then - export DB_USER="bob" - export DB_PASSWORD="pwd123" + set +x + if [ ! -z "$TEST_DATA_LAKE" ]; then + export DB_USER="mhuser" + export DB_PASSWORD="pencil" + elif [ ! -z "$TEST_SERVERLESS" ]; then + export DB_USER=$SERVERLESS_ATLAS_USER + export DB_PASSWORD=$SERVERLESS_ATLAS_PASSWORD + elif [ ! -z "$TEST_AUTH_OIDC" ]; then + export DB_USER=$OIDC_ALTAS_USER + export DB_PASSWORD=$OIDC_ATLAS_PASSWORD + else + export DB_USER="bob" + export DB_PASSWORD="pwd123" + fi + set -x +fi + +if [ -n "$TEST_ENTERPRISE_AUTH" ]; then + if [ "Windows_NT" = "$OS" ]; then + echo "Setting GSSAPI_PASS" + export GSSAPI_PASS=${SASL_PASS} + export GSSAPI_CANONICALIZE="true" + else + # BUILD-3830 + touch krb5.conf.empty + export KRB5_CONFIG=${PROJECT_DIRECTORY}/.evergreen/krb5.conf.empty + + echo "Writing keytab" + echo ${KEYTAB_BASE64} | base64 -d > ${PROJECT_DIRECTORY}/.evergreen/drivers.keytab + echo "Running kinit" + kinit -k -t ${PROJECT_DIRECTORY}/.evergreen/drivers.keytab -p ${PRINCIPAL} + fi + echo "Setting GSSAPI variables" + export GSSAPI_HOST=${SASL_HOST} + export GSSAPI_PORT=${SASL_PORT} + export GSSAPI_PRINCIPAL=${PRINCIPAL} +fi + +if [ -n "$TEST_LOADBALANCER" ]; then + export LOAD_BALANCER=1 + export SINGLE_MONGOS_LB_URI="${SINGLE_MONGOS_LB_URI:-mongodb://127.0.0.1:8000/?loadBalanced=true}" + export MULTI_MONGOS_LB_URI="${MULTI_MONGOS_LB_URI:-mongodb://127.0.0.1:8001/?loadBalanced=true}" + export TEST_ARGS="test/test_load_balancer.py" fi if [ "$SSL" != "nossl" ]; then export CLIENT_PEM="$DRIVERS_TOOLS/.evergreen/x509gen/client.pem" export CA_PEM="$DRIVERS_TOOLS/.evergreen/x509gen/ca.pem" -fi -if [ -z "$PYTHON_BINARY" ]; then - VIRTUALENV=$(command -v virtualenv) || true - if [ -z "$VIRTUALENV" ]; then - PYTHON=$(command -v python || command -v python3) || true - if [ -z "$PYTHON" ]; then - echo "Cannot test without python or python3 installed!" - exit 1 - fi - else - $VIRTUALENV pymongotestvenv - . pymongotestvenv/bin/activate - PYTHON=python - trap "deactivate; rm -rf pymongotestvenv" EXIT HUP + if [ -n "$TEST_LOADBALANCER" ]; then + export SINGLE_MONGOS_LB_URI="${SINGLE_MONGOS_LB_URI}&tls=true" + export MULTI_MONGOS_LB_URI="${MULTI_MONGOS_LB_URI}&tls=true" fi -elif [ "$COMPRESSORS" = "snappy" ]; then - $PYTHON_BINARY -m virtualenv --system-site-packages --never-download snappytest - . snappytest/bin/activate - trap "deactivate; rm -rf snappytest" EXIT HUP - # 0.5.2 has issues in pypy3(.5) - pip install python-snappy==0.5.1 +fi + +if [ "$COMPRESSORS" = "snappy" ]; then + python -m pip install '.[snappy]' PYTHON=python elif [ "$COMPRESSORS" = "zstd" ]; then - $PYTHON_BINARY -m virtualenv --system-site-packages --never-download zstdtest - . zstdtest/bin/activate - trap "deactivate; rm -rf zstdtest" EXIT HUP - pip install zstandard - PYTHON=python -elif [ -n "$SETDEFAULTENCODING" ]; then - $PYTHON_BINARY -m virtualenv --system-site-packages --never-download encodingtest - . encodingtest/bin/activate - trap "deactivate; rm -rf encodingtest" EXIT HUP - mkdir test-sitecustomize - cat < test-sitecustomize/sitecustomize.py -import sys -sys.setdefaultencoding("$SETDEFAULTENCODING") -EOT - export PYTHONPATH="$(pwd)/test-sitecustomize" - PYTHON=python -else - PYTHON="$PYTHON_BINARY" + python -m pip install zstandard fi -if [ -n "$TEST_ENCRYPTION" ]; then +# PyOpenSSL test setup. +if [ -n "$TEST_PYOPENSSL" ]; then + python -m pip install '.[ocsp]' +fi + +if [ -n "$TEST_ENCRYPTION" ] || [ -n "$TEST_FLE_AZURE_AUTO" ] || [ -n "$TEST_FLE_GCP_AUTO" ]; then + + # Work around for root certifi not being installed. + # TODO: Remove after PYTHON-3952 is deployed. + if [ "$(uname -s)" = "Darwin" ]; then + python -m pip install certifi + CERT_PATH=$(python -c "import certifi; print(certifi.where())") + export SSL_CERT_FILE=${CERT_PATH} + export REQUESTS_CA_BUNDLE=${CERT_PATH} + export AWS_CA_BUNDLE=${CERT_PATH} + fi + + python -m pip install '.[encryption]' + + if [ "Windows_NT" = "$OS" ]; then # Magic variable in cygwin + # PYTHON-2808 Ensure this machine has the CA cert for google KMS. + powershell.exe "Invoke-WebRequest -URI https://oauth2.googleapis.com/" > /dev/null || true + fi + if [ -z "$LIBMONGOCRYPT_URL" ]; then echo "Cannot test client side encryption without LIBMONGOCRYPT_URL!" exit 1 @@ -102,86 +140,157 @@ if [ -n "$TEST_ENCRYPTION" ]; then # Use the nocrypto build to avoid dependency issues with older windows/python versions. BASE=$(pwd)/libmongocrypt/nocrypto if [ -f "${BASE}/lib/libmongocrypt.so" ]; then - export PYMONGOCRYPT_LIB=${BASE}/lib/libmongocrypt.so + PYMONGOCRYPT_LIB=${BASE}/lib/libmongocrypt.so elif [ -f "${BASE}/lib/libmongocrypt.dylib" ]; then - export PYMONGOCRYPT_LIB=${BASE}/lib/libmongocrypt.dylib + PYMONGOCRYPT_LIB=${BASE}/lib/libmongocrypt.dylib elif [ -f "${BASE}/bin/mongocrypt.dll" ]; then PYMONGOCRYPT_LIB=${BASE}/bin/mongocrypt.dll # libmongocrypt's windows dll is not marked executable. chmod +x $PYMONGOCRYPT_LIB - export PYMONGOCRYPT_LIB=$(cygpath -m $PYMONGOCRYPT_LIB) + PYMONGOCRYPT_LIB=$(cygpath -m $PYMONGOCRYPT_LIB) elif [ -f "${BASE}/lib64/libmongocrypt.so" ]; then - export PYMONGOCRYPT_LIB=${BASE}/lib64/libmongocrypt.so + PYMONGOCRYPT_LIB=${BASE}/lib64/libmongocrypt.so else echo "Cannot find libmongocrypt shared object file" exit 1 fi + export PYMONGOCRYPT_LIB - git clone --branch master git@github.com:mongodb/libmongocrypt.git libmongocrypt_git - $PYTHON -m pip install --upgrade ./libmongocrypt_git/bindings/python - # TODO: use a virtualenv - trap "$PYTHON -m pip uninstall -y pymongocrypt" EXIT HUP - $PYTHON -c "import pymongocrypt; print('pymongocrypt version: '+pymongocrypt.__version__)" - $PYTHON -c "import pymongocrypt; print('libmongocrypt version: '+pymongocrypt.libmongocrypt_version())" - # PATH is set by PREPARE_SHELL. + # TODO: Test with 'pip install pymongocrypt' + git clone https://github.com/mongodb/libmongocrypt.git libmongocrypt_git + python -m pip install --prefer-binary -r .evergreen/test-encryption-requirements.txt + python -m pip install ./libmongocrypt_git/bindings/python + python -c "import pymongocrypt; print('pymongocrypt version: '+pymongocrypt.__version__)" + python -c "import pymongocrypt; print('libmongocrypt version: '+pymongocrypt.libmongocrypt_version())" + # PATH is updated by PREPARE_SHELL for access to mongocryptd. fi -PYTHON_IMPL=$($PYTHON -c "import platform, sys; sys.stdout.write(platform.python_implementation())") -if [ $PYTHON_IMPL = "Jython" ]; then - EXTRA_ARGS="-J-XX:-UseGCOverheadLimit -J-Xmx4096m" -else - EXTRA_ARGS="" +if [ -n "$TEST_ENCRYPTION" ]; then + if [ -n "$TEST_ENCRYPTION_PYOPENSSL" ]; then + python -m pip install '.[ocsp]' + fi + + # Get access to the AWS temporary credentials: + # CSFLE_AWS_TEMP_ACCESS_KEY_ID, CSFLE_AWS_TEMP_SECRET_ACCESS_KEY, CSFLE_AWS_TEMP_SESSION_TOKEN + . $DRIVERS_TOOLS/.evergreen/csfle/set-temp-creds.sh + + if [ -n "$TEST_CRYPT_SHARED" ]; then + CRYPT_SHARED_DIR=`dirname $CRYPT_SHARED_LIB_PATH` + echo "using crypt_shared_dir $CRYPT_SHARED_DIR" + export DYLD_FALLBACK_LIBRARY_PATH=$CRYPT_SHARED_DIR:$DYLD_FALLBACK_LIBRARY_PATH + export LD_LIBRARY_PATH=$CRYPT_SHARED_DIR:$LD_LIBRARY_PATH + export PATH=$CRYPT_SHARED_DIR:$PATH + fi + # Only run the encryption tests. + if [ -z "$TEST_ARGS" ]; then + TEST_ARGS="test/test_encryption.py" + fi fi -# Don't download unittest-xml-reporting from pypi, which often fails. -HAVE_XMLRUNNER=$($PYTHON -c "import pkgutil, sys; sys.stdout.write('1' if pkgutil.find_loader('xmlrunner') else '0')") -if [ $HAVE_XMLRUNNER = "1" ]; then - # The xunit output dir must be a Python style absolute path. - XUNIT_DIR="$(pwd)/xunit-results" - if [ "Windows_NT" = "$OS" ]; then # Magic variable in cygwin - XUNIT_DIR=$(cygpath -m $XUNIT_DIR) +if [ -n "$TEST_FLE_AZURE_AUTO" ] || [ -n "$TEST_FLE_GCP_AUTO" ]; then + if [[ -z "$SUCCESS" ]]; then + echo "Must define SUCCESS" + exit 1 fi - OUTPUT="--xunit-output=${XUNIT_DIR}" -else - OUTPUT="" + + if echo "$MONGODB_URI" | grep -q "@"; then + echo "MONGODB_URI unexpectedly contains user credentials in FLE test!"; + exit 1 + fi + + if [ -z "$TEST_ARGS" ]; then + TEST_ARGS="test/test_on_demand_csfle.py" + fi +fi + +if [ -n "$TEST_INDEX_MANAGEMENT" ]; then + TEST_ARGS="test/test_index_management.py" +fi + +if [ -n "$TEST_DATA_LAKE" ] && [ -z "$TEST_ARGS" ]; then + TEST_ARGS="test/test_data_lake.py" +fi + +if [ -n "$TEST_ATLAS" ]; then + TEST_ARGS="test/atlas/test_connection.py" +fi + +if [ -n "$TEST_OCSP" ]; then + python -m pip install ".[ocsp]" + TEST_ARGS="test/ocsp/test_ocsp.py" +fi + +if [ -n "$TEST_AUTH_AWS" ]; then + python -m pip install ".[aws]" + TEST_ARGS="test/auth_aws/test_auth_aws.py" +fi + +if [ -n "$TEST_AUTH_OIDC" ]; then + python -m pip install ".[aws]" + + # Work around for root certifi not being installed. + # TODO: Remove after PYTHON-3952 is deployed. + if [ "$(uname -s)" = "Darwin" ]; then + python -m pip install certifi + CERT_PATH=$(python -c "import certifi; print(certifi.where())") + export SSL_CERT_FILE=${CERT_PATH} + export REQUESTS_CA_BUNDLE=${CERT_PATH} + export AWS_CA_BUNDLE=${CERT_PATH} + fi + + TEST_ARGS="test/auth_oidc/test_auth_oidc.py" +fi + +if [ -n "$PERF_TEST" ]; then + python -m pip install simplejson + start_time=$(date +%s) + TEST_ARGS="test/performance/perf_test.py" fi echo "Running $AUTH tests over $SSL with python $PYTHON" -$PYTHON -c 'import sys; print(sys.version)' +python -c 'import sys; print(sys.version)' + # Run the tests, and store the results in Evergreen compatible XUnit XML # files in the xunit-results/ directory. # Run the tests with coverage if requested and coverage is installed. -# Only cover CPython. Jython and PyPy report suspiciously low coverage. -COVERAGE_OR_PYTHON="$PYTHON" -COVERAGE_ARGS="" -if [ -n "$COVERAGE" -a $PYTHON_IMPL = "CPython" ]; then - COVERAGE_BIN="$(dirname "$PYTHON")/coverage" - if $COVERAGE_BIN --version; then - echo "INFO: coverage is installed, running tests with coverage..." - COVERAGE_OR_PYTHON="$COVERAGE_BIN" - COVERAGE_ARGS="run --branch" - else - echo "INFO: coverage is not installed, running tests without coverage..." - fi +# Only cover CPython. PyPy reports suspiciously low coverage. +PYTHON_IMPL=$($PYTHON -c "import platform; print(platform.python_implementation())") +if [ -n "$COVERAGE" ] && [ "$PYTHON_IMPL" = "CPython" ]; then + # coverage 7.3 dropped support for Python 3.7, keep in sync with combine-coverage.sh. + # coverage >=5 is needed for relative_files=true. + python -m pip install pytest-cov "coverage>=5,<7.3" + TEST_ARGS="$TEST_ARGS --cov" +fi + +if [ -n "$GREEN_FRAMEWORK" ]; then + python -m pip install $GREEN_FRAMEWORK fi -$PYTHON setup.py clean +# Show the installed packages +PIP_QUIET=0 python -m pip list + if [ -z "$GREEN_FRAMEWORK" ]; then - if [ -z "$C_EXTENSIONS" -a $PYTHON_IMPL = "CPython" ]; then - # Fail if the C extensions fail to build. - - # This always sets 0 for exit status, even if the build fails, due - # to our hack to install PyMongo without C extensions when build - # deps aren't available. - $PYTHON setup.py build_ext -i - # This will set a non-zero exit status if either import fails, - # causing this script to exit. - $PYTHON -c "from bson import _cbson; from pymongo import _cmessage" - fi - $COVERAGE_OR_PYTHON $EXTRA_ARGS $COVERAGE_ARGS setup.py $C_EXTENSIONS test $OUTPUT + .evergreen/check-c-extensions.sh + python -m pytest -v --durations=5 --maxfail=10 $TEST_ARGS else - # --no_ext has to come before "test" so there is no way to toggle extensions here. - $PYTHON green_framework_test.py $GREEN_FRAMEWORK $OUTPUT + python green_framework_test.py $GREEN_FRAMEWORK -v $TEST_ARGS +fi + +# Handle perf test post actions. +if [ -n "$PERF_TEST" ]; then + end_time=$(date +%s) + elapsed_secs=$((end_time-start_time)) + + cat results.json + + echo "{\"failures\": 0, \"results\": [{\"status\": \"pass\", \"exit_code\": 0, \"test_file\": \"BenchMarkTests\", \"start\": $start_time, \"end\": $end_time, \"elapsed\": $elapsed_secs}]}" > report.json + + cat report.json +fi + +# Handle coverage post actions. +if [ -n "$COVERAGE" ]; then + rm -rf .pytest_cache fi diff --git a/.evergreen/test-encryption-requirements.txt b/.evergreen/test-encryption-requirements.txt new file mode 100644 index 0000000000..13ed7ebb15 --- /dev/null +++ b/.evergreen/test-encryption-requirements.txt @@ -0,0 +1,3 @@ +cffi>=1.12.0,<2 +# boto3 is required by drivers-evergreen-tools/.evergreen/csfle/set-temp-creds.sh +boto3<2 diff --git a/.evergreen/tox.sh b/.evergreen/tox.sh new file mode 100644 index 0000000000..808787d63c --- /dev/null +++ b/.evergreen/tox.sh @@ -0,0 +1,24 @@ +#!/bin/bash +set -o errexit # Exit the script with error if any of the commands fail +set -x + +. .evergreen/utils.sh + +if [ -z "$PYTHON_BINARY" ]; then + PYTHON_BINARY=$(find_python3) +fi + +if $PYTHON_BINARY -m tox --version; then + run_tox() { + $PYTHON_BINARY -m tox "$@" + } +else # No toolchain present, set up virtualenv before installing tox + createvirtualenv "$PYTHON_BINARY" toxenv + trap "deactivate; rm -rf toxenv" EXIT HUP + python -m pip install -q tox + run_tox() { + python -m tox "$@" + } +fi + +run_tox "${@:1}" diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh new file mode 100755 index 0000000000..35005c0d6a --- /dev/null +++ b/.evergreen/utils.sh @@ -0,0 +1,112 @@ +#!/bin/bash -ex + +set -o xtrace + +find_python3() { + PYTHON="" + # Add a fallback system python3 if it is available and Python 3.7+. + if is_python_37 "$(command -v python3)"; then + PYTHON="$(command -v python3)" + fi + # Find a suitable toolchain version, if available. + if [ "$(uname -s)" = "Darwin" ]; then + # macos 11.00 + if [ -d "/Library/Frameworks/Python.Framework/Versions/3.10" ]; then + PYTHON="/Library/Frameworks/Python.Framework/Versions/3.10/bin/python3" + # macos 10.14 + elif [ -d "/Library/Frameworks/Python.Framework/Versions/3.7" ]; then + PYTHON="/Library/Frameworks/Python.Framework/Versions/3.7/bin/python3" + fi + elif [ "Windows_NT" = "$OS" ]; then # Magic variable in cygwin + PYTHON="C:/python/Python37/python.exe" + else + # Prefer our own toolchain, fall back to mongodb toolchain if it has Python 3.7+. + if [ -f "/opt/python/3.7/bin/python3" ]; then + PYTHON="/opt/python/3.7/bin/python3" + elif is_python_37 "$(command -v /opt/mongodbtoolchain/v4/bin/python3)"; then + PYTHON="/opt/mongodbtoolchain/v4/bin/python3" + elif is_python_37 "$(command -v /opt/mongodbtoolchain/v3/bin/python3)"; then + PYTHON="/opt/mongodbtoolchain/v3/bin/python3" + fi + fi + if [ -z "$PYTHON" ]; then + echo "Cannot test without python3.7+ installed!" + exit 1 + fi + echo "$PYTHON" +} + +# Usage: +# createvirtualenv /path/to/python /output/path/for/venv +# * param1: Python binary to use for the virtualenv +# * param2: Path to the virtualenv to create +createvirtualenv () { + PYTHON=$1 + VENVPATH=$2 + # Prefer venv + VENV="$PYTHON -m venv" + if [ "$(uname -s)" = "Darwin" ]; then + VIRTUALENV="$PYTHON -m virtualenv" + else + VIRTUALENV=$(command -v virtualenv 2>/dev/null || echo "$PYTHON -m virtualenv") + VIRTUALENV="$VIRTUALENV -p $PYTHON" + fi + if ! $VENV $VENVPATH 2>/dev/null; then + # Workaround for bug in older versions of virtualenv. + $VIRTUALENV $VENVPATH 2>/dev/null || $VIRTUALENV $VENVPATH + fi + if [ "Windows_NT" = "$OS" ]; then + # Workaround https://bugs.python.org/issue32451: + # mongovenv/Scripts/activate: line 3: $'\r': command not found + dos2unix $VENVPATH/Scripts/activate || true + . $VENVPATH/Scripts/activate + else + . $VENVPATH/bin/activate + fi + + export PIP_QUIET=1 + python -m pip install --upgrade pip + python -m pip install --upgrade setuptools tox +} + +# Usage: +# testinstall /path/to/python /path/to/.whl ["no-virtualenv"] +# * param1: Python binary to test +# * param2: Path to the wheel to install +# * param3 (optional): If set to a non-empty string, don't create a virtualenv. Used in manylinux containers. +testinstall () { + PYTHON=$1 + RELEASE=$2 + NO_VIRTUALENV=$3 + + if [ -z "$NO_VIRTUALENV" ]; then + createvirtualenv $PYTHON venvtestinstall + PYTHON=python + fi + + $PYTHON -m pip install --upgrade $RELEASE + cd tools + $PYTHON fail_if_no_c.py + $PYTHON -m pip uninstall -y pymongo + cd .. + + if [ -z "$NO_VIRTUALENV" ]; then + deactivate + rm -rf venvtestinstall + fi +} + +# Function that returns success if the provided Python binary is version 3.7 or later +# Usage: +# is_python_37 /path/to/python +# * param1: Python binary +is_python_37() { + if [ -z "$1" ]; then + return 1 + elif $1 -c "import sys; exit(sys.version_info[:2] < (3, 7))"; then + # runs when sys.version_info[:2] >= (3, 7) + return 0 + else + return 1 + fi +} diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 0000000000..67ad992c75 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,4 @@ +# Initial pre-commit reformat +5578999a90e439fbca06fc0ffc98f4d04e96f7b4 +# pyupgrade and ruff +0092b0af79378abf35b6db73a082ecb91af1d973 diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000000..a9d726b96b --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,2 @@ +# Global owner for repo +* @mongodb/dbx-python diff --git a/.github/workflows/pull_request_template.md b/.github/workflows/pull_request_template.md new file mode 100644 index 0000000000..852066d4b2 --- /dev/null +++ b/.github/workflows/pull_request_template.md @@ -0,0 +1,23 @@ +# [JIRA Ticket ID](Link to Ticket) + + +# Summary + + +# Changes in this PR + + +# Test Plan + + +# Screenshots (Optional) + + +# Callouts or Follow-up items (Optional) + diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml new file mode 100644 index 0000000000..624cff1bf2 --- /dev/null +++ b/.github/workflows/test-python.yml @@ -0,0 +1,163 @@ +name: Python Tests + +on: + push: + pull_request: + +concurrency: + group: tests-${{ github.ref }} + cancel-in-progress: true + +defaults: + run: + shell: bash -eux {0} + +jobs: + + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v3 + with: + python-version: 3.8 + cache: 'pip' + cache-dependency-path: 'pyproject.toml' + - name: Install Python dependencies + run: | + python -m pip install -U pip tox + - name: Run linters + run: | + tox -m lint-manual + tox -m manifest + + build: + # supercharge/mongodb-github-action requires containers so we don't test other platforms + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-20.04] + python-version: ["3.7", "3.11", "pypy-3.8"] + name: CPython ${{ matrix.python-version }}-${{ matrix.os }} + steps: + - uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + cache: 'pip' + cache-dependency-path: 'pyproject.toml' + - name: Install dependencies + run: | + pip install -q tox + - name: Start MongoDB + uses: supercharge/mongodb-github-action@1.7.0 + with: + mongodb-version: 4.4 + - name: Run tests + run: | + tox -m test + + doctest: + runs-on: ubuntu-latest + name: DocTest + steps: + - uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: "3.8" + cache: 'pip' + cache-dependency-path: 'pyproject.toml' + - name: Install dependencies + run: | + pip install -q tox + - name: Start MongoDB + uses: supercharge/mongodb-github-action@1.7.0 + with: + mongodb-version: 4.4 + - name: Run tests + run: | + tox -m doc-test + + typing: + name: Typing Tests + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + with: + python-version: "3.11" + cache: 'pip' + cache-dependency-path: 'pyproject.toml' + - name: Install dependencies + run: | + pip install -q tox + - name: Run typecheck + run: | + tox -m typecheck + + docs: + name: Docs Checks + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + with: + cache: 'pip' + cache-dependency-path: 'pyproject.toml' + - name: Install dependencies + run: | + pip install -q tox + - name: Check links + run: | + tox -m linkcheck + - name: Build docs + run: | + tox -m doc + + make_sdist: + runs-on: ubuntu-latest + name: "Make an sdist" + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + with: + cache: 'pip' + cache-dependency-path: 'pyproject.toml' + - name: Build SDist + shell: bash + run: | + pip install build + python -m build --sdist + - uses: actions/upload-artifact@v3 + with: + name: "sdist" + path: dist/*.tar.gz + + test_sdist: + runs-on: ubuntu-latest + needs: [make_sdist] + name: Install from SDist and Test + timeout-minutes: 20 + steps: + - name: Download sdist + uses: actions/download-artifact@v3 + - name: Unpack SDist + shell: bash + run: | + cd sdist + mkdir test + tar --strip-components=1 -zxf *.tar.gz -C ./test + - uses: actions/setup-python@v2 + with: + cache: 'pip' + cache-dependency-path: 'sdist/test/pyproject.toml' + - name: Start MongoDB + uses: supercharge/mongodb-github-action@1.7.0 + - name: Run Test + shell: bash + run: | + cd sdist/test + pip install -e ".[test]" + pytest -v diff --git a/.gitignore b/.gitignore index 385160b014..77483d26b2 100644 --- a/.gitignore +++ b/.gitignore @@ -14,3 +14,14 @@ pymongo.egg-info/ *.egg .tox mongocryptd.pid +.idea/ +.nova/ +venv/ +secrets-export.sh + +# Lambda temp files +test/lambda/.aws-sam +test/lambda/env.json +test/lambda/mongodb/pymongo/* +test/lambda/mongodb/gridfs/* +test/lambda/mongodb/bson/* diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000..00a03defcd --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,101 @@ + +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.1.0 + hooks: + - id: check-added-large-files + - id: check-case-conflict + - id: check-toml + - id: check-yaml + exclude: template.yaml + - id: debug-statements + - id: end-of-file-fixer + exclude: WHEEL + exclude_types: [json] + - id: forbid-new-submodules + - id: trailing-whitespace + exclude: .patch + exclude_types: [json] + +- repo: https://github.com/psf/black + rev: 22.3.0 + hooks: + - id: black + files: \.py$ + args: [--line-length=100] + +- repo: https://github.com/astral-sh/ruff-pre-commit + # Ruff version. + rev: v0.1.0 + hooks: + - id: ruff + args: ["--fix", "--show-fixes"] + +- repo: https://github.com/adamchainz/blacken-docs + rev: "1.13.0" + hooks: + - id: blacken-docs + additional_dependencies: + - black==22.3.0 + +- repo: https://github.com/pre-commit/pygrep-hooks + rev: "v1.10.0" + hooks: + - id: rst-backticks + - id: rst-directive-colons + - id: rst-inline-touching-normal + +- repo: https://github.com/rstcheck/rstcheck + rev: v6.2.0 + hooks: + - id: rstcheck + additional_dependencies: [sphinx] + args: ["--ignore-directives=doctest,testsetup,todo,automodule","--ignore-substitutions=release", "--report-level=error"] + +# We use the Python version instead of the original version which seems to require Docker +# https://github.com/koalaman/shellcheck-precommit +- repo: https://github.com/shellcheck-py/shellcheck-py + rev: v0.8.0.4 + hooks: + - id: shellcheck + name: shellcheck + args: ["--severity=warning"] + stages: [manual] + +- repo: https://github.com/PyCQA/doc8 + rev: 0.11.1 + hooks: + - id: doc8 + args: ["--ignore=D001"] # ignore line length + stages: [manual] + +- repo: https://github.com/sirosen/check-jsonschema + rev: 0.14.1 + hooks: + - id: check-jsonschema + name: "Check GitHub Workflows" + files: ^\.github/workflows/ + types: [yaml] + args: ["--schemafile", "https://json.schemastore.org/github-workflow"] + stages: [manual] + +- repo: https://github.com/ariebovenberg/slotscheck + rev: v0.14.0 + hooks: + - id: slotscheck + files: \.py$ + exclude: "^(test|tools)/" + stages: [manual] + +- repo: https://github.com/codespell-project/codespell + rev: "v2.2.4" + hooks: + - id: codespell + # Examples of errors or updates to justify the exceptions: + # - test/test_on_demand_csfle.py:44: FLE ==> FILE + # - test/test_bson.py:1043: fo ==> of, for, to, do, go + # - test/bson_corpus/decimal128-4.json:98: Infinit ==> Infinite + # - test/test_bson.py:267: isnt ==> isn't + # - test/versioned-api/crud-api-version-1-strict.json:514: nin ==> inn, min, bin, nine + # - test/test_client.py:188: te ==> the, be, we, to + args: ["-L", "fle,fo,infinit,isnt,nin,te"] diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 0000000000..39c86fff03 --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,24 @@ +# .readthedocs.yaml +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +# Build documentation in the doc/ directory with Sphinx +sphinx: + configuration: doc/conf.py + fail_on_warning: true + +# Set the version of Python and requirements required to build the docs. +python: + install: + # Install pymongo itself. + - method: pip + path: . + - requirements: doc/docs-requirements.txt + +build: + os: ubuntu-22.04 + tools: + python: "3.11" diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 5dd72f6da5..0000000000 --- a/.travis.yml +++ /dev/null @@ -1,17 +0,0 @@ -language: python - -python: - - 2.7 - - 3.4 - - 3.5 - - 3.6 - - 3.7 - - 3.8 - - pypy - - pypy3.5 - -services: - - mongodb - -script: PYMONGO_MUST_CONNECT=1 python setup.py test - diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 01857b6763..07d6f1d77c 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -2,7 +2,7 @@ Contributing to PyMongo ======================= PyMongo has a large `community -`_ and +`_ and contributions are always encouraged. Contributions can be as simple as minor tweaks to the documentation. Please read these guidelines before sending a pull request. @@ -19,7 +19,7 @@ that might not be of interest or that has already been addressed. Supported Interpreters ---------------------- -PyMongo supports CPython 2.7, 3.4+, PyPy, and PyPy3.5+. Language +PyMongo supports CPython 3.7+ and PyPy3.8+. Language features not supported by all interpreters can not be used. Style Guide @@ -34,17 +34,138 @@ General Guidelines - Avoid backward breaking changes if at all possible. - Write inline documentation for new classes and methods. - Write tests and make sure they pass (make sure you have a mongod - running on the default port, then execute ``python setup.py test`` + running on the default port, then execute ``tox -m test`` from the cmd line to run the test suite). - Add yourself to doc/contributors.rst :) +Authoring a Pull Request +------------------------ + +**Our Pull Request Policy is based on this** `Code Review Developer Guide `_ + +The expectation for any code author is to provide all the context needed in the space of a +pull request for any engineer to feel equipped to review the code. Depending on the type of +change, do your best to highlight important new functions or objects you’ve introduced in the +code; think complex functions or new abstractions. Whilst it may seem like more work for you to +adjust your pull request, the reality is your likelihood for getting review sooner shoots +up. + +**Self Review Guidelines to follow** + +- If the PR is too large, split it if possible. + - Use 250 LoC (excluding test data and config changes) as a rule-of-thumb. + - Moving and changing code should be in separate PRs or commits. + - Moving: Taking large code blobs and transplanting them to another file. There's generally no (or very little) actual code changed other than a cut and paste. It can even be extended to large deletions. + - Changing: Adding code changes (be that refactors or functionality additions/subtractions). + - These two, when mixed, can muddy understanding and sometimes make it harder for reviewers to keep track of things. +- Prefer explaining with code comments instead of PR comments. + +**Provide background** + +- The PR description and linked tickets should answer the "what" and "why" of the change. The code change explains the "how". + +**Follow the Template** + +- Please do not deviate from the template we make; it is there for a lot of reasons. If it is a one line fix, we still need to have context on what and why it is needed. +- If making a versioning change, please let that be known. See examples below: + - ``versionadded:: 3.11`` + - ``versionchanged:: 3.5`` + + +**Pull Request Template Breakdown** + +- **Github PR Title** + - The PR Title format should always be ``[JIRA-ID] : Jira Title or Blurb Summary``. + +- **JIRA LINK** + - Convenient link to the associated JIRA ticket. + +- **Summary** + - Small blurb on why this is needed. The JIRA task should have the more in-depth description, but this should still, at a high level, give anyone looking an understanding of why the PR has been checked in. + +- **Changes in this PR** + - The explicit code changes that this PR is introducing. This should be more specific than just the task name. (Unless the task name is very clear). + +- **Test Plan** + - Everything needs a test description. Describe what you did to validate your changes actually worked; if you did nothing, then document you did not test it. Aim to make these steps reproducible by other engineers, specifically with your primary reviewer in mind. + +- **Screenshots** + - Any images that provide more context to the PR. Usually, these just coincide with the test plan. + +- **Callouts or follow-up items** + - This is a good place for identifying “to-dos” that you’ve placed in the code (Must have an accompanying JIRA Ticket). + - Potential bugs that you are unsure how to test in the code. + - Opinions you want to receive about your code. + + +Running Linters +--------------- + +PyMongo uses `pre-commit `_ +for managing linting of the codebase. +``pre-commit`` performs various checks on all files in PyMongo and uses tools +that help follow a consistent code style within the codebase. + +To set up ``pre-commit`` locally, run:: + + pip install pre-commit + pre-commit install + +To run ``pre-commit`` manually, run:: + + pre-commit run --all-files + +To run a manual hook like ``flake8`` manually, run:: + + pre-commit run --all-files --hook-stage manual flake8 + Documentation ------------- -To contribute to the `API documentation `_ +To contribute to the `API documentation `_ just make your changes to the inline documentation of the appropriate `source code `_ or `rst file `_ in a branch and submit a `pull request `_. You might also use the GitHub `Edit `_ button. + +Running Tests Locally +--------------------- +- Ensure you have started the appropriate Mongo Server(s). +- Run ``pip install tox`` to use ``tox`` for testing or run ``pip install -e ".[test]"`` to run ``pytest`` directly. +- Run ``tox -m test`` or ``pytest`` to run all of the tests. +- Append ``test/.py::::`` to + run specific tests. You can omit the ```` to test a full class + and the ```` to test a full module. For example: + ``tox -m test test/test_change_stream.py::TestUnifiedChangeStreamsErrors::test_change_stream_errors_on_ElectionInProgress``. +- Use the ``-k`` argument to select tests by pattern. + +Running Load Balancer Tests Locally +----------------------------------- +- Install ``haproxy`` (available as ``brew install haproxy`` on macOS). +- Clone ``drivers-evergreen-tools``: ``git clone git@github.com:mongodb-labs/drivers-evergreen-tools.git``. +- Start the servers using ``LOAD_BALANCER=true TOPOLOGY=sharded_cluster AUTH=noauth SSL=nossl MONGODB_VERSION=6.0 DRIVERS_TOOLS=$PWD/drivers-evergreen-tools MONGO_ORCHESTRATION_HOME=$PWD/drivers-evergreen-tools/.evergreen/orchestration $PWD/drivers-evergreen-tools/.evergreen/run-orchestration.sh``. +- Start the load balancer using: ``MONGODB_URI='mongodb://localhost:27017,localhost:27018/' $PWD/drivers-evergreen-tools/.evergreen/run-load-balancer.sh start``. +- Run the tests from the ``pymongo`` checkout directory using: ``TEST_LOADBALANCER=1 tox -m test-eg``. + +Re-sync Spec Tests +------------------ + +If you would like to re-sync the copy of the specification tests in the +PyMongo repository with that which is inside the `specifications repo +`_, please +use the script provided in ``.evergreen/resync-specs.sh``.:: + + git clone git@github.com:mongodb/specifications.git + export MDB_SPECS=~/specifications + cd ~/mongo-python-driver/.evergreen + ./resync-specs.sh -b "" spec1 spec2 ... + ./resync-specs.sh -b "connection-string*" crud bson-corpus # Updates crud and bson-corpus specs while ignoring all files with the regex "connection-string*" + cd .. + +The ``-b`` flag adds as a regex pattern to block files you do not wish to +update in PyMongo. +This is primarily helpful if you are implementing a new feature in PyMongo +that has spec tests already implemented, or if you are attempting to +validate new spec tests in PyMongo. diff --git a/MANIFEST.in b/MANIFEST.in index 5b8d2a5651..444da54d57 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,15 +1,32 @@ include README.rst include LICENSE include THIRD-PARTY-NOTICES -include ez_setup.py +include *.ini +exclude .coveragerc +exclude .flake8 +exclude .git-blame-ignore-revs +exclude .pre-commit-config.yaml +exclude .readthedocs.yaml +exclude CONTRIBUTING.rst +exclude RELEASE.rst recursive-include doc *.rst recursive-include doc *.py recursive-include doc *.conf recursive-include doc *.css recursive-include doc *.js recursive-include doc *.png +include doc/Makefile +include doc/_templates/layout.html +include doc/docs-requirements.txt +include doc/make.bat +include doc/static/periodic-executor-refs.dot recursive-include tools *.py include tools/README.rst +include green_framework_test.py recursive-include test *.pem recursive-include test *.py +recursive-include test *.json recursive-include bson *.h +prune test/mod_wsgi_test +prune test/lambda +prune .evergreen diff --git a/README.rst b/README.rst index a1f3664e44..3172ecb8aa 100644 --- a/README.rst +++ b/README.rst @@ -2,8 +2,8 @@ PyMongo ======= :Info: See `the mongo site `_ for more information. See `GitHub `_ for the latest source. -:Author: Mike Dirolf -:Maintainer: Bernie Hackett +:Documentation: Available at `pymongo.readthedocs.io `_ +:Author: The MongoDB Python Team About ===== @@ -13,19 +13,19 @@ database from Python. The ``bson`` package is an implementation of the `BSON format `_ for Python. The ``pymongo`` package is a native Python driver for MongoDB. The ``gridfs`` package is a `gridfs -`_ +`_ implementation on top of ``pymongo``. -PyMongo supports MongoDB 2.6, 3.0, 3.2, 3.4, 3.6, 4.0 and 4.2. +PyMongo supports MongoDB 3.6, 4.0, 4.2, 4.4, 5.0, 6.0, and 7.0. Support / Feedback ================== For issues with, questions about, or feedback for PyMongo, please look into -our `support channels `_. Please +our `support channels `_. Please do not email any of the PyMongo developers directly with issues or -questions - you're more likely to get an answer on the `mongodb-user -`_ list on Google Groups. +questions - you're more likely to get an answer on `StackOverflow `_ +(using a "mongodb" tag). Bugs / Feature Requests ======================= @@ -63,7 +63,7 @@ Security Vulnerabilities If you’ve identified a security vulnerability in a driver or any other MongoDB project, please report it according to the `instructions here -`_. +`_. Installation ============ @@ -79,7 +79,7 @@ Or ``easy_install`` from You can also download the project source and do:: - $ python setup.py install + $ pip install . Do **not** install the "bson" package from pypi. PyMongo comes with its own bson package; doing "easy_install bson" installs a third-party package that @@ -88,7 +88,12 @@ is incompatible with PyMongo. Dependencies ============ -PyMongo supports CPython 2.7, 3.4+, PyPy, and PyPy3.5+. +PyMongo supports CPython 3.7+ and PyPy3.7+. + +Required dependencies: + +Support for mongodb+srv:// URIs requires `dnspython +`_ Optional dependencies: @@ -97,64 +102,60 @@ GSSAPI authentication requires `pykerberos `_ on Windows. The correct dependency can be installed automatically along with PyMongo:: - $ python -m pip install pymongo[gssapi] + $ python -m pip install "pymongo[gssapi]" -Support for mongodb+srv:// URIs requires `dnspython -`_:: +MONGODB-AWS authentication requires `pymongo-auth-aws +`_:: - $ python -m pip install pymongo[srv] + $ python -m pip install "pymongo[aws]" -TLS / SSL support may require `ipaddress -`_ and `certifi -`_ or `wincertstore -`_ depending on the Python -version in use. The necessary dependencies can be installed along with -PyMongo:: +OCSP (Online Certificate Status Protocol) requires `PyOpenSSL +`_, `requests +`_, `service_identity +`_ and may +require `certifi +`_:: - $ python -m pip install pymongo[tls] + $ python -m pip install "pymongo[ocsp]" Wire protocol compression with snappy requires `python-snappy `_:: - $ python -m pip install pymongo[snappy] + $ python -m pip install "pymongo[snappy]" Wire protocol compression with zstandard requires `zstandard `_:: - $ python -m pip install pymongo[zstd] - -You can install all dependencies automatically with the following -command:: + $ python -m pip install "pymongo[zstd]" - $ python -m pip install pymongo[snappy,gssapi,srv,tls,zstd] +Client-Side Field Level Encryption requires `pymongocrypt +`_ and +`pymongo-auth-aws `_:: -Other optional packages: + $ python -m pip install "pymongo[encryption]" -- `backports.pbkdf2 `_, - improves authentication performance with SCRAM-SHA-1 and SCRAM-SHA-256. - It especially improves performance on Python versions older than 2.7.8. -- `monotonic `_ adds support for - a monotonic clock, which improves reliability in environments - where clock adjustments are frequent. Not needed in Python 3. +You can install all dependencies automatically with the following +command:: + $ python -m pip install "pymongo[gssapi,aws,ocsp,snappy,zstd,encryption]" Additional dependencies are: -- (to generate documentation) sphinx_ +- (to generate documentation or run tests) tox_ Examples ======== Here's a basic example (for more see the *examples* section of the docs): -.. code-block:: python +.. code-block:: pycon >>> import pymongo >>> client = pymongo.MongoClient("localhost", 27017) >>> db = client.test >>> db.name - u'test' + 'test' >>> db.my_collection - Collection(Database(MongoClient('localhost', 27017), u'test'), u'my_collection') + Collection(Database(MongoClient('localhost', 27017), 'test'), 'my_collection') >>> db.my_collection.insert_one({"x": 10}).inserted_id ObjectId('4aba15ebe23f6b53b0000000') >>> db.my_collection.insert_one({"x": 8}).inserted_id @@ -162,7 +163,7 @@ Here's a basic example (for more see the *examples* section of the docs): >>> db.my_collection.insert_one({"x": 11}).inserted_id ObjectId('4aba160ee23f6b543e000002') >>> db.my_collection.find_one() - {u'x': 10, u'_id': ObjectId('4aba15ebe23f6b53b0000000')} + {'x': 10, '_id': ObjectId('4aba15ebe23f6b53b0000000')} >>> for item in db.my_collection.find(): ... print(item["x"]) ... @@ -170,7 +171,7 @@ Here's a basic example (for more see the *examples* section of the docs): 8 11 >>> db.my_collection.create_index("x") - u'x_1' + 'x_1' >>> for item in db.my_collection.find().sort("x", pymongo.ASCENDING): ... print(item["x"]) ... @@ -183,15 +184,21 @@ Here's a basic example (for more see the *examples* section of the docs): Documentation ============= -You will need sphinx_ installed to generate the -documentation. Documentation can be generated by running **python -setup.py doc**. Generated documentation can be found in the +Documentation is available at `pymongo.readthedocs.io `_. + +Documentation can be generated by running **tox -m doc**. Generated documentation can be found in the *doc/build/html/* directory. +Learning Resources +================== + +MongoDB Learn - `Python courses `_. +`Python Articles on Developer Center `_. + Testing ======= -The easiest way to run the tests is to run **python setup.py test** in +The easiest way to run the tests is to run **tox -m test** in the root of the distribution. To verify that PyMongo works with Gevent's monkey-patching:: @@ -202,4 +209,4 @@ Or with Eventlet's:: $ python green_framework_test.py eventlet -.. _sphinx: http://sphinx.pocoo.org/ +.. _tox: https://tox.wiki/en/latest/index.html diff --git a/RELEASE.rst b/RELEASE.rst index c7a030d526..55e39baf5a 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -31,39 +31,60 @@ that changes the major version number. Doing a Release --------------- -1. Test releases on Python 2.7 and 3.4+ on Windows, Linux and OSX, - with and without the C extensions. It's generally enough to just run the - tests on 2.7, 3.4 and the latest 3.x version with and without the - extensions on a single platform, and then just test any version on the - other platforms as a sanity check. `python setup.py test` will build the - extensions and test. `python tools/clean.py` will remove the extensions, - and then `python setup.py --no_ext test` will run the tests without - them. You can also run the doctests: `python setup.py doc -t`. - -2. Add release notes to doc/changelog.rst. Generally just summarize/clarify +1. PyMongo is tested on Evergreen. Ensure the latest commit are passing CI + as expected: https://evergreen.mongodb.com/waterfall/mongo-python-driver. + +2. Check Jira to ensure all the tickets in this version have been completed. + +3. Add release notes to doc/changelog.rst. Generally just summarize/clarify the git log, but you might add some more long form notes for big changes. -3. Search and replace the "devN" version number w/ the new version number (see - note above). +4. Make sure version number is updated in ``pymongo/_version.py`` + +5. Commit with a BUMP version_number message, eg ``git commit -m 'BUMP 3.11.0'``. + +6. Tag w/ version_number, eg, ``git tag -a '3.11.0' -m 'BUMP 3.11.0' ``. -4. Make sure version number is updated in setup.py and pymongo/__init__.py +7. Push commit / tag, eg ``git push && git push --tags``. -5. Commit with a BUMP version_number message. +8. Pushing a tag will trigger a release process in Evergreen which builds + wheels for manylinux, macOS, and Windows. Wait for the "release-combine" + task to complete and then download the "Release files all" archive. See: + https://evergreen.mongodb.com/waterfall/mongo-python-driver?bv_filter=release -6. Tag w/ version_number + The contents should look like this:: -7. Push commit / tag. + $ ls path/to/archive + pymongo--cp310-cp310-macosx_10_9_universal2.whl + ... + pymongo--cp38-cp38-manylinux2014_x86_64.whl + ... + pymongo--cp38-cp38-win_amd64.whl + ... + pymongo-.tar.gz -8. Push source to PyPI: `python setup.py sdist upload` +9. Upload all the release packages to PyPI with twine:: -9. Push binaries to PyPI; for each version of python and platform do:`python - setup.py bdist_egg upload`. Probably best to do `python setup.py bdist_egg` - first, to make sure the egg builds properly. We also publish wheels. - `python setup.py bdist_wheel upload`. + $ python3 -m twine upload path/to/archive/* -10. Make sure to push a build of the new docs (see the apidocs repo). +10. Make sure the new version appears on https://pymongo.readthedocs.io/. If the + new version does not show up automatically, trigger a rebuild of "latest": + https://readthedocs.org/projects/pymongo/builds/ -11. Bump the version number to .dev0 in setup.py/__init__.py, +11. Bump the version number to .dev0 in ``pymongo/_version.py``, commit, push. -12. Announce! +12. Publish the release version in Jira. + +13. Announce the release on: + https://www.mongodb.com/community/forums/c/announcements/driver-releases/110 + +14. File a ticket for DOCSP highlighting changes in server version and Python + version compatibility or the lack thereof, for example: + https://jira.mongodb.org/browse/DOCSP-13536 + +15. Create a GitHub Release for the tag using + https://github.com/mongodb/mongo-python-driver/releases/new. + The title should be "PyMongo X.Y.Z", and the description should contain + a link to the release notes on the the community forum, e.g. + "Release notes: mongodb.com/community/forums/t/pymongo-4-0-2-released/150457." diff --git a/THIRD-PARTY-NOTICES b/THIRD-PARTY-NOTICES index 0bbd9b1644..0b9fc738ed 100644 --- a/THIRD-PARTY-NOTICES +++ b/THIRD-PARTY-NOTICES @@ -4,7 +4,7 @@ be distributed under licenses different than the PyMongo software. In the event that we accidentally failed to list a required notice, please bring it to our attention through any of the ways detailed here: - mongodb-dev@googlegroups.com + https://jira.mongodb.org/projects/PYTHON The attached notices are provided for information only. @@ -71,81 +71,3 @@ OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -3) License Notice for encoding_helpers.c ----------------------------------------- - -Portions Copyright 2001 Unicode, Inc. - -Disclaimer - -This source code is provided as is by Unicode, Inc. No claims are -made as to fitness for any particular purpose. No warranties of any -kind are expressed or implied. The recipient agrees to determine -applicability of information provided. If this file has been -purchased on magnetic or optical media from Unicode, Inc., the -sole remedy for any claim will be exchange of defective media -within 90 days of receipt. - -Limitations on Rights to Redistribute This Code - -Unicode, Inc. hereby grants the right to freely use the information -supplied in this file in the creation of products supporting the -Unicode Standard, and to make copies of this file in any form -for internal or external distribution as long as this notice -remains attached. - -4) License Notice for ssl_match_hostname.py -------------------------------------------- - -Python License (Python-2.0) - -Python License, Version 2 (Python-2.0) - -PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 --------------------------------------------- - -1. This LICENSE AGREEMENT is between the Python Software Foundation -("PSF"), and the Individual or Organization ("Licensee") accessing and -otherwise using this software ("Python") in source or binary form and -its associated documentation. - -2. Subject to the terms and conditions of this License Agreement, PSF -hereby grants Licensee a nonexclusive, royalty-free, world-wide -license to reproduce, analyze, test, perform and/or display publicly, -prepare derivative works, distribute, and otherwise use Python -alone or in any derivative version, provided, however, that PSF's -License Agreement and PSF's notice of copyright, i.e., "Copyright (c) -2001-2013 Python Software Foundation; All Rights Reserved" are retained in -Python alone or in any derivative version prepared by Licensee. - -3. In the event Licensee prepares a derivative work that is based on -or incorporates Python or any part thereof, and wants to make -the derivative work available to others as provided herein, then -Licensee hereby agrees to include in any such work a brief summary of -the changes made to Python. - -4. PSF is making Python available to Licensee on an "AS IS" -basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR -IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND -DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS -FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT -INFRINGE ANY THIRD PARTY RIGHTS. - -5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON -FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS -A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, -OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. - -6. This License Agreement will automatically terminate upon a material -breach of its terms and conditions. - -7. Nothing in this License Agreement shall be deemed to create any -relationship of agency, partnership, or joint venture between PSF and -Licensee. This License Agreement does not grant permission to use PSF -trademarks or trade name in a trademark sense to endorse or promote -products or services of Licensee, or any third party. - -8. By copying, installing or otherwise using Python, Licensee -agrees to be bound by the terms and conditions of this License -Agreement. diff --git a/bson/__init__.py b/bson/__init__.py index c8ac12e46e..2c4bd3a8b2 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -22,11 +22,9 @@ None null both bool boolean both int [#int]_ int32 / int64 py -> bson -long int64 py -> bson `bson.int64.Int64` int64 both float number (real) both -string string py -> bson -unicode string both +str string both list array both dict / `SON` object both datetime.datetime [#dt]_ [#dt2]_ date both @@ -36,17 +34,11 @@ `bson.objectid.ObjectId` oid both `bson.dbref.DBRef` dbref both None undefined bson -> py -unicode code bson -> py -`bson.code.Code` code py -> bson -unicode symbol bson -> py -bytes (Python 3) [#bytes]_ binary both +`bson.code.Code` code both +str symbol bson -> py +bytes [#bytes]_ binary both ======================================= ============= =================== -Note that, when using Python 2.x, to save binary data it must be wrapped as -an instance of `bson.binary.Binary`. Otherwise it will be saved as a BSON -string and retrieved as unicode. Users of Python 3.x can use the Python bytes -type. - .. [#int] A Python int will be saved as a BSON int32 or BSON int64 depending on its size. A BSON int32 will always decode to a Python int. A BSON int64 will always decode to a :class:`~bson.int64.Int64`. @@ -58,84 +50,174 @@ objects from ``re.compile()`` are both saved as BSON regular expressions. BSON regular expressions are decoded as :class:`~bson.regex.Regex` instances. -.. [#bytes] The bytes type from Python 3.x is encoded as BSON binary with - subtype 0. In Python 3.x it will be decoded back to bytes. In Python 2.x - it will be decoded to an instance of :class:`~bson.binary.Binary` with - subtype 0. +.. [#bytes] The bytes type is encoded as BSON binary with + subtype 0. It will be decoded back to bytes. """ +from __future__ import annotations -import calendar import datetime import itertools -import platform +import os import re import struct import sys import uuid - -from codecs import (utf_8_decode as _utf_8_decode, - utf_8_encode as _utf_8_encode) - -from bson.binary import (Binary, OLD_UUID_SUBTYPE, - JAVA_LEGACY, CSHARP_LEGACY, - UUIDLegacy) +from codecs import utf_8_decode as _utf_8_decode +from codecs import utf_8_encode as _utf_8_encode +from collections import abc as _abc +from typing import ( + IO, + TYPE_CHECKING, + Any, + BinaryIO, + Callable, + Generator, + Iterator, + Mapping, + MutableMapping, + NoReturn, + Optional, + Sequence, + Tuple, + Type, + TypeVar, + Union, + cast, + overload, +) + +from bson.binary import ( + ALL_UUID_SUBTYPES, + CSHARP_LEGACY, + JAVA_LEGACY, + OLD_UUID_SUBTYPE, + STANDARD, + UUID_SUBTYPE, + Binary, + UuidRepresentation, +) from bson.code import Code from bson.codec_options import ( - CodecOptions, DEFAULT_CODEC_OPTIONS, _raw_document_class) + DEFAULT_CODEC_OPTIONS, + CodecOptions, + DatetimeConversion, + _raw_document_class, +) +from bson.datetime_ms import ( + EPOCH_AWARE, + EPOCH_NAIVE, + DatetimeMS, + _datetime_to_millis, + _millis_to_datetime, +) from bson.dbref import DBRef from bson.decimal128 import Decimal128 -from bson.errors import (InvalidBSON, - InvalidDocument, - InvalidStringData) +from bson.errors import InvalidBSON, InvalidDocument, InvalidStringData from bson.int64 import Int64 from bson.max_key import MaxKey from bson.min_key import MinKey from bson.objectid import ObjectId -from bson.py3compat import (abc, - b, - PY3, - iteritems, - text_type, - string_type, - reraise) from bson.regex import Regex -from bson.son import SON, RE_TYPE +from bson.son import RE_TYPE, SON from bson.timestamp import Timestamp from bson.tz_util import utc +# Import some modules for type-checking only. +if TYPE_CHECKING: + from bson.raw_bson import RawBSONDocument + from bson.typings import _DocumentType, _ReadableBuffer try: - from bson import _cbson + from bson import _cbson # type: ignore[attr-defined] + _USE_C = True except ImportError: _USE_C = False - -EPOCH_AWARE = datetime.datetime.fromtimestamp(0, utc) -EPOCH_NAIVE = datetime.datetime.utcfromtimestamp(0) - - -BSONNUM = b"\x01" # Floating point -BSONSTR = b"\x02" # UTF-8 string -BSONOBJ = b"\x03" # Embedded document -BSONARR = b"\x04" # Array -BSONBIN = b"\x05" # Binary -BSONUND = b"\x06" # Undefined -BSONOID = b"\x07" # ObjectId -BSONBOO = b"\x08" # Boolean -BSONDAT = b"\x09" # UTC Datetime -BSONNUL = b"\x0A" # Null -BSONRGX = b"\x0B" # Regex -BSONREF = b"\x0C" # DBRef -BSONCOD = b"\x0D" # Javascript code -BSONSYM = b"\x0E" # Symbol -BSONCWS = b"\x0F" # Javascript code with scope -BSONINT = b"\x10" # 32bit int -BSONTIM = b"\x11" # Timestamp -BSONLON = b"\x12" # 64bit int -BSONDEC = b"\x13" # Decimal128 -BSONMIN = b"\xFF" # Min key -BSONMAX = b"\x7F" # Max key +__all__ = [ + "ALL_UUID_SUBTYPES", + "CSHARP_LEGACY", + "JAVA_LEGACY", + "OLD_UUID_SUBTYPE", + "STANDARD", + "UUID_SUBTYPE", + "Binary", + "UuidRepresentation", + "Code", + "DEFAULT_CODEC_OPTIONS", + "CodecOptions", + "DBRef", + "Decimal128", + "InvalidBSON", + "InvalidDocument", + "InvalidStringData", + "Int64", + "MaxKey", + "MinKey", + "ObjectId", + "Regex", + "RE_TYPE", + "SON", + "Timestamp", + "utc", + "EPOCH_AWARE", + "EPOCH_NAIVE", + "BSONNUM", + "BSONSTR", + "BSONOBJ", + "BSONARR", + "BSONBIN", + "BSONUND", + "BSONOID", + "BSONBOO", + "BSONDAT", + "BSONNUL", + "BSONRGX", + "BSONREF", + "BSONCOD", + "BSONSYM", + "BSONCWS", + "BSONINT", + "BSONTIM", + "BSONLON", + "BSONDEC", + "BSONMIN", + "BSONMAX", + "get_data_and_view", + "gen_list_name", + "encode", + "decode", + "decode_all", + "decode_iter", + "decode_file_iter", + "is_valid", + "BSON", + "has_c", + "DatetimeConversion", + "DatetimeMS", +] + +BSONNUM = b"\x01" # Floating point +BSONSTR = b"\x02" # UTF-8 string +BSONOBJ = b"\x03" # Embedded document +BSONARR = b"\x04" # Array +BSONBIN = b"\x05" # Binary +BSONUND = b"\x06" # Undefined +BSONOID = b"\x07" # ObjectId +BSONBOO = b"\x08" # Boolean +BSONDAT = b"\x09" # UTC Datetime +BSONNUL = b"\x0A" # Null +BSONRGX = b"\x0B" # Regex +BSONREF = b"\x0C" # DBRef +BSONCOD = b"\x0D" # Javascript code +BSONSYM = b"\x0E" # Symbol +BSONCWS = b"\x0F" # Javascript code with scope +BSONINT = b"\x10" # 32bit int +BSONTIM = b"\x11" # Timestamp +BSONLON = b"\x12" # 64bit int +BSONDEC = b"\x13" # Decimal128 +BSONMIN = b"\xFF" # Min key +BSONMAX = b"\x7F" # Max key _UNPACK_FLOAT_FROM = struct.Struct(" Tuple[Any, memoryview]: + if isinstance(data, (bytes, bytearray)): + return data, memoryview(data) + view = memoryview(data) + return view.tobytes(), view -def _raise_unknown_type(element_type, element_name): +def _raise_unknown_type(element_type: int, element_name: str) -> NoReturn: """Unknown type helper.""" - raise InvalidBSON("Detected unknown BSON type %r for fieldname '%s'. Are " - "you using the latest driver version?" % ( - _elt_to_hex(element_type), element_name)) + raise InvalidBSON( + "Detected unknown BSON type {!r} for fieldname '{}'. Are " + "you using the latest driver version?".format(chr(element_type).encode(), element_name) + ) -def _get_int(data, view, position, dummy0, dummy1, dummy2): +def _get_int( + data: Any, _view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any +) -> Tuple[int, int]: """Decode a BSON int32 to python int.""" return _UNPACK_INT_FROM(data, position)[0], position + 4 -def _get_c_string(data, view, position, opts): - """Decode a BSON 'C' string to python unicode string.""" +def _get_c_string(data: Any, view: Any, position: int, opts: CodecOptions[Any]) -> Tuple[str, int]: + """Decode a BSON 'C' string to python str.""" end = data.index(b"\x00", position) - return _utf_8_decode(view[position:end], - opts.unicode_decode_error_handler, True)[0], end + 1 + return _utf_8_decode(view[position:end], opts.unicode_decode_error_handler, True)[0], end + 1 -def _get_float(data, view, position, dummy0, dummy1, dummy2): +def _get_float( + data: Any, _view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any +) -> Tuple[float, int]: """Decode a BSON double to python float.""" return _UNPACK_FLOAT_FROM(data, position)[0], position + 8 -def _get_string(data, view, position, obj_end, opts, dummy): - """Decode a BSON string to python unicode string.""" +def _get_string( + data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions[Any], dummy: Any +) -> Tuple[str, int]: + """Decode a BSON string to python str.""" length = _UNPACK_INT_FROM(data, position)[0] position += 4 if length < 1 or obj_end - position < length: raise InvalidBSON("invalid string length") end = position + length - 1 - if data[end] != _OBJEND: + if data[end] != 0: raise InvalidBSON("invalid end of string") - return _utf_8_decode(view[position:end], - opts.unicode_decode_error_handler, True)[0], end + 1 + return _utf_8_decode(view[position:end], opts.unicode_decode_error_handler, True)[0], end + 1 -def _get_object_size(data, position, obj_end): +def _get_object_size(data: Any, position: int, obj_end: int) -> Tuple[int, int]: """Validate and return a BSON document's size.""" try: obj_size = _UNPACK_INT_FROM(data, position)[0] except struct.error as exc: - raise InvalidBSON(str(exc)) + raise InvalidBSON(str(exc)) from None end = position + obj_size - 1 - if data[end] != _OBJEND: + if data[end] != 0: raise InvalidBSON("bad eoo") if end >= obj_end: raise InvalidBSON("invalid object length") @@ -235,32 +294,39 @@ def _get_object_size(data, position, obj_end): return obj_size, end -def _get_object(data, view, position, obj_end, opts, dummy): +def _get_object( + data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions[Any], dummy: Any +) -> Tuple[Any, int]: """Decode a BSON subdocument to opts.document_class or bson.dbref.DBRef.""" obj_size, end = _get_object_size(data, position, obj_end) if _raw_document_class(opts.document_class): - return (opts.document_class(data[position:end + 1], opts), - position + obj_size) + return (opts.document_class(data[position : end + 1], opts), position + obj_size) obj = _elements_to_dict(data, view, position + 4, end, opts) position += obj_size - if "$ref" in obj: - return (DBRef(obj.pop("$ref"), obj.pop("$id", None), - obj.pop("$db", None), obj), position) + # If DBRef validation fails, return a normal doc. + if ( + isinstance(obj.get("$ref"), str) + and "$id" in obj + and isinstance(obj.get("$db"), (str, type(None))) + ): + return (DBRef(obj.pop("$ref"), obj.pop("$id", None), obj.pop("$db", None), obj), position) return obj, position -def _get_array(data, view, position, obj_end, opts, element_name): +def _get_array( + data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions[Any], element_name: str +) -> Tuple[Any, int]: """Decode a BSON array to python list.""" size = _UNPACK_INT_FROM(data, position)[0] end = position + size - 1 - if data[end] != _OBJEND: + if data[end] != 0: raise InvalidBSON("bad eoo") position += 4 end -= 1 - result = [] + result: list[Any] = [] # Avoid doing global and attribute lookups in the loop. append = result.append @@ -271,10 +337,11 @@ def _get_array(data, view, position, obj_end, opts, element_name): while position < end: element_type = data[position] # Just skip the keys. - position = index(b'\x00', position) + 1 + position = index(b"\x00", position) + 1 try: value, position = getter[element_type]( - data, view, position, obj_end, opts, element_name) + data, view, position, obj_end, opts, element_name + ) except KeyError: _raise_unknown_type(element_type, element_name) @@ -286,11 +353,13 @@ def _get_array(data, view, position, obj_end, opts, element_name): append(value) if position != end + 1: - raise InvalidBSON('bad array length') + raise InvalidBSON("bad array length") return result, position + 1 -def _get_binary(data, view, position, obj_end, opts, dummy1): +def _get_binary( + data: Any, _view: Any, position: int, obj_end: int, opts: CodecOptions[Any], dummy1: Any +) -> Tuple[Union[Binary, uuid.UUID], int]: """Decode a BSON binary to bson.binary.Binary or python UUID.""" length, subtype = _UNPACK_LENGTH_SUBTYPE_FROM(data, position) position += 5 @@ -302,71 +371,80 @@ def _get_binary(data, view, position, obj_end, opts, dummy1): length = length2 end = position + length if length < 0 or end > obj_end: - raise InvalidBSON('bad binary object length') - if subtype == 3: - # Java Legacy - uuid_representation = opts.uuid_representation - if uuid_representation == JAVA_LEGACY: - java = data[position:end] - value = uuid.UUID(bytes=java[0:8][::-1] + java[8:16][::-1]) - # C# legacy - elif uuid_representation == CSHARP_LEGACY: - value = uuid.UUID(bytes_le=data[position:end]) - # Python - else: - value = uuid.UUID(bytes=data[position:end]) - return value, end - if subtype == 4: - return uuid.UUID(bytes=data[position:end]), end - # Python3 special case. Decode subtype 0 to 'bytes'. - if PY3 and subtype == 0: + raise InvalidBSON("bad binary object length") + + # Convert UUID subtypes to native UUIDs. + if subtype in ALL_UUID_SUBTYPES: + uuid_rep = opts.uuid_representation + binary_value = Binary(data[position:end], subtype) + if ( + (uuid_rep == UuidRepresentation.UNSPECIFIED) + or (subtype == UUID_SUBTYPE and uuid_rep != STANDARD) + or (subtype == OLD_UUID_SUBTYPE and uuid_rep == STANDARD) + ): + return binary_value, end + return binary_value.as_uuid(uuid_rep), end + + # Decode subtype 0 to 'bytes'. + if subtype == 0: value = data[position:end] else: value = Binary(data[position:end], subtype) + return value, end -def _get_oid(data, view, position, dummy0, dummy1, dummy2): +def _get_oid( + data: Any, _view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any +) -> Tuple[ObjectId, int]: """Decode a BSON ObjectId to bson.objectid.ObjectId.""" end = position + 12 return ObjectId(data[position:end]), end -def _get_boolean(data, view, position, dummy0, dummy1, dummy2): +def _get_boolean( + data: Any, _view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any +) -> Tuple[bool, int]: """Decode a BSON true/false to python True/False.""" end = position + 1 boolean_byte = data[position:end] - if boolean_byte == b'\x00': + if boolean_byte == b"\x00": return False, end - elif boolean_byte == b'\x01': + elif boolean_byte == b"\x01": return True, end - raise InvalidBSON('invalid boolean value: %r' % boolean_byte) + raise InvalidBSON("invalid boolean value: %r" % boolean_byte) -def _get_date(data, view, position, dummy0, opts, dummy1): +def _get_date( + data: Any, _view: Any, position: int, dummy0: int, opts: CodecOptions[Any], dummy1: Any +) -> Tuple[Union[datetime.datetime, DatetimeMS], int]: """Decode a BSON datetime to python datetime.datetime.""" - return _millis_to_datetime( - _UNPACK_LONG_FROM(data, position)[0], opts), position + 8 + return _millis_to_datetime(_UNPACK_LONG_FROM(data, position)[0], opts), position + 8 -def _get_code(data, view, position, obj_end, opts, element_name): +def _get_code( + data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions[Any], element_name: str +) -> Tuple[Code, int]: """Decode a BSON code to bson.code.Code.""" code, position = _get_string(data, view, position, obj_end, opts, element_name) return Code(code), position -def _get_code_w_scope(data, view, position, obj_end, opts, element_name): +def _get_code_w_scope( + data: Any, view: Any, position: int, _obj_end: int, opts: CodecOptions[Any], element_name: str +) -> Tuple[Code, int]: """Decode a BSON code_w_scope to bson.code.Code.""" code_end = position + _UNPACK_INT_FROM(data, position)[0] - code, position = _get_string( - data, view, position + 4, code_end, opts, element_name) + code, position = _get_string(data, view, position + 4, code_end, opts, element_name) scope, position = _get_object(data, view, position, code_end, opts, element_name) if position != code_end: - raise InvalidBSON('scope outside of javascript code boundaries') + raise InvalidBSON("scope outside of javascript code boundaries") return Code(code, scope), position -def _get_regex(data, view, position, dummy0, opts, dummy1): +def _get_regex( + data: Any, view: Any, position: int, dummy0: Any, opts: CodecOptions[Any], dummy1: Any +) -> Tuple[Regex[Any], int]: """Decode a BSON regex to bson.regex.Regex or a python pattern object.""" pattern, position = _get_c_string(data, view, position, opts) bson_flags, position = _get_c_string(data, view, position, opts) @@ -374,26 +452,33 @@ def _get_regex(data, view, position, dummy0, opts, dummy1): return bson_re, position -def _get_ref(data, view, position, obj_end, opts, element_name): +def _get_ref( + data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions[Any], element_name: str +) -> Tuple[DBRef, int]: """Decode (deprecated) BSON DBPointer to bson.dbref.DBRef.""" - collection, position = _get_string( - data, view, position, obj_end, opts, element_name) + collection, position = _get_string(data, view, position, obj_end, opts, element_name) oid, position = _get_oid(data, view, position, obj_end, opts, element_name) return DBRef(collection, oid), position -def _get_timestamp(data, view, position, dummy0, dummy1, dummy2): +def _get_timestamp( + data: Any, _view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any +) -> Tuple[Timestamp, int]: """Decode a BSON timestamp to bson.timestamp.Timestamp.""" inc, timestamp = _UNPACK_TIMESTAMP_FROM(data, position) return Timestamp(timestamp, inc), position + 8 -def _get_int64(data, view, position, dummy0, dummy1, dummy2): +def _get_int64( + data: Any, _view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any +) -> Tuple[Int64, int]: """Decode a BSON int64 to bson.int64.Int64.""" return Int64(_UNPACK_LONG_FROM(data, position)[0]), position + 8 -def _get_decimal128(data, view, position, dummy0, dummy1, dummy2): +def _get_decimal128( + data: Any, _view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any +) -> Tuple[Decimal128, int]: """Decode a BSON decimal128 to bson.decimal128.Decimal128.""" end = position + 16 return Decimal128.from_bid(data[position:end]), end @@ -405,43 +490,67 @@ def _get_decimal128(data, view, position, dummy0, dummy1, dummy2): # - position: int, beginning of object in 'data' to decode # - obj_end: int, end of object to decode in 'data' if variable-length type # - opts: a CodecOptions -_ELEMENT_GETTER = { - _maybe_ord(BSONNUM): _get_float, - _maybe_ord(BSONSTR): _get_string, - _maybe_ord(BSONOBJ): _get_object, - _maybe_ord(BSONARR): _get_array, - _maybe_ord(BSONBIN): _get_binary, - _maybe_ord(BSONUND): lambda u, v, w, x, y, z: (None, w), # Deprecated undefined - _maybe_ord(BSONOID): _get_oid, - _maybe_ord(BSONBOO): _get_boolean, - _maybe_ord(BSONDAT): _get_date, - _maybe_ord(BSONNUL): lambda u, v, w, x, y, z: (None, w), - _maybe_ord(BSONRGX): _get_regex, - _maybe_ord(BSONREF): _get_ref, # Deprecated DBPointer - _maybe_ord(BSONCOD): _get_code, - _maybe_ord(BSONSYM): _get_string, # Deprecated symbol - _maybe_ord(BSONCWS): _get_code_w_scope, - _maybe_ord(BSONINT): _get_int, - _maybe_ord(BSONTIM): _get_timestamp, - _maybe_ord(BSONLON): _get_int64, - _maybe_ord(BSONDEC): _get_decimal128, - _maybe_ord(BSONMIN): lambda u, v, w, x, y, z: (MinKey(), w), - _maybe_ord(BSONMAX): lambda u, v, w, x, y, z: (MaxKey(), w)} +_ELEMENT_GETTER: dict[int, Callable[..., Tuple[Any, int]]] = { + ord(BSONNUM): _get_float, + ord(BSONSTR): _get_string, + ord(BSONOBJ): _get_object, + ord(BSONARR): _get_array, + ord(BSONBIN): _get_binary, + ord(BSONUND): lambda u, v, w, x, y, z: (None, w), # noqa: ARG005 # Deprecated undefined + ord(BSONOID): _get_oid, + ord(BSONBOO): _get_boolean, + ord(BSONDAT): _get_date, + ord(BSONNUL): lambda u, v, w, x, y, z: (None, w), # noqa: ARG005 + ord(BSONRGX): _get_regex, + ord(BSONREF): _get_ref, # Deprecated DBPointer + ord(BSONCOD): _get_code, + ord(BSONSYM): _get_string, # Deprecated symbol + ord(BSONCWS): _get_code_w_scope, + ord(BSONINT): _get_int, + ord(BSONTIM): _get_timestamp, + ord(BSONLON): _get_int64, + ord(BSONDEC): _get_decimal128, + ord(BSONMIN): lambda u, v, w, x, y, z: (MinKey(), w), # noqa: ARG005 + ord(BSONMAX): lambda u, v, w, x, y, z: (MaxKey(), w), # noqa: ARG005 +} if _USE_C: - def _element_to_dict(data, view, position, obj_end, opts): - return _cbson._element_to_dict(data, position, obj_end, opts) + + def _element_to_dict( + data: Any, + view: Any, # noqa: ARG001 + position: int, + obj_end: int, + opts: CodecOptions[Any], + raw_array: bool = False, + ) -> Tuple[str, Any, int]: + return cast( + "Tuple[str, Any, int]", + _cbson._element_to_dict(data, position, obj_end, opts, raw_array), + ) + else: - def _element_to_dict(data, view, position, obj_end, opts): + + def _element_to_dict( + data: Any, + view: Any, + position: int, + obj_end: int, + opts: CodecOptions[Any], + raw_array: bool = False, + ) -> Tuple[str, Any, int]: """Decode a single key, value pair.""" element_type = data[position] position += 1 element_name, position = _get_c_string(data, view, position, opts) + if raw_array and element_type == ord(BSONARR): + _, end = _get_object_size(data, position, len(data)) + return element_name, view[position : end + 1], end + 1 try: - value, position = _ELEMENT_GETTER[element_type](data, view, position, - obj_end, opts, - element_name) + value, position = _ELEMENT_GETTER[element_type]( + data, view, position, obj_end, opts, element_name + ) except KeyError: _raise_unknown_type(element_type, element_name) @@ -453,38 +562,62 @@ def _element_to_dict(data, view, position, obj_end, opts): return element_name, value, position -def _raw_to_dict(data, position, obj_end, opts, result): - data, view = get_data_and_view(data) - return _elements_to_dict(data, view, position, obj_end, opts, result) +_T = TypeVar("_T", bound=MutableMapping[str, Any]) -def _elements_to_dict(data, view, position, obj_end, opts, result=None): +def _raw_to_dict( + data: Any, + position: int, + obj_end: int, + opts: CodecOptions[RawBSONDocument], + result: _T, + raw_array: bool = False, +) -> _T: + data, view = get_data_and_view(data) + return cast( + _T, _elements_to_dict(data, view, position, obj_end, opts, result, raw_array=raw_array) + ) + + +def _elements_to_dict( + data: Any, + view: Any, + position: int, + obj_end: int, + opts: CodecOptions[Any], + result: Any = None, + raw_array: bool = False, +) -> Any: """Decode a BSON document into result.""" if result is None: result = opts.document_class() end = obj_end - 1 while position < end: - key, value, position = _element_to_dict(data, view, position, obj_end, opts) + key, value, position = _element_to_dict( + data, view, position, obj_end, opts, raw_array=raw_array + ) result[key] = value if position != obj_end: - raise InvalidBSON('bad object or element length') + raise InvalidBSON("bad object or element length") return result -def _bson_to_dict(data, opts): +def _bson_to_dict(data: Any, opts: CodecOptions[_DocumentType]) -> _DocumentType: """Decode a BSON string to document_class.""" data, view = get_data_and_view(data) try: if _raw_document_class(opts.document_class): - return opts.document_class(data, opts) + return opts.document_class(data, opts) # type:ignore[call-arg] _, end = _get_object_size(data, 0, len(data)) - return _elements_to_dict(data, view, 4, end, opts) + return cast("_DocumentType", _elements_to_dict(data, view, 4, end, opts)) except InvalidBSON: raise except Exception: # Change exception type to InvalidBSON but preserve traceback. _, exc_value, exc_tb = sys.exc_info() - reraise(InvalidBSON, exc_value, exc_tb) + raise InvalidBSON(str(exc_value)).with_traceback(exc_tb) from None + + if _USE_C: _bson_to_dict = _cbson._bson_to_dict @@ -494,191 +627,163 @@ def _bson_to_dict(data, opts): _PACK_LENGTH_SUBTYPE = struct.Struct(" Generator[bytes, None, None]: """Generate "keys" for encoded lists in the sequence b"0\x00", b"1\x00", b"2\x00", ... The first 1000 keys are returned from a pre-built cache. All subsequent keys are generated on the fly. """ - for name in _LIST_NAMES: - yield name + yield from _LIST_NAMES counter = itertools.count(1000) while True: - yield b(str(next(counter))) + b"\x00" + yield (str(next(counter)) + "\x00").encode("utf8") -def _make_c_string_check(string): +def _make_c_string_check(string: Union[str, bytes]) -> bytes: """Make a 'C' string, checking for embedded NUL characters.""" if isinstance(string, bytes): if b"\x00" in string: - raise InvalidDocument("BSON keys / regex patterns must not " - "contain a NUL character") + raise InvalidDocument("BSON keys / regex patterns must not contain a NUL character") try: _utf_8_decode(string, None, True) return string + b"\x00" except UnicodeError: - raise InvalidStringData("strings in documents must be valid " - "UTF-8: %r" % string) + raise InvalidStringData( + "strings in documents must be valid UTF-8: %r" % string + ) from None else: if "\x00" in string: - raise InvalidDocument("BSON keys / regex patterns must not " - "contain a NUL character") + raise InvalidDocument("BSON keys / regex patterns must not contain a NUL character") return _utf_8_encode(string)[0] + b"\x00" -def _make_c_string(string): +def _make_c_string(string: Union[str, bytes]) -> bytes: """Make a 'C' string.""" if isinstance(string, bytes): try: _utf_8_decode(string, None, True) return string + b"\x00" except UnicodeError: - raise InvalidStringData("strings in documents must be valid " - "UTF-8: %r" % string) + raise InvalidStringData( + "strings in documents must be valid UTF-8: %r" % string + ) from None else: return _utf_8_encode(string)[0] + b"\x00" -if PY3: - def _make_name(string): - """Make a 'C' string suitable for a BSON key.""" - # Keys can only be text in python 3. - if "\x00" in string: - raise InvalidDocument("BSON keys / regex patterns must not " - "contain a NUL character") - return _utf_8_encode(string)[0] + b"\x00" -else: - # Keys can be unicode or bytes in python 2. - _make_name = _make_c_string_check +def _make_name(string: str) -> bytes: + """Make a 'C' string suitable for a BSON key.""" + if "\x00" in string: + raise InvalidDocument("BSON keys must not contain a NUL character") + return _utf_8_encode(string)[0] + b"\x00" -def _encode_float(name, value, dummy0, dummy1): +def _encode_float(name: bytes, value: float, dummy0: Any, dummy1: Any) -> bytes: """Encode a float.""" return b"\x01" + name + _PACK_FLOAT(value) -if PY3: - def _encode_bytes(name, value, dummy0, dummy1): - """Encode a python bytes.""" - # Python3 special case. Store 'bytes' as BSON binary subtype 0. - return b"\x05" + name + _PACK_INT(len(value)) + b"\x00" + value -else: - def _encode_bytes(name, value, dummy0, dummy1): - """Encode a python str (python 2.x).""" - try: - _utf_8_decode(value, None, True) - except UnicodeError: - raise InvalidStringData("strings in documents must be valid " - "UTF-8: %r" % (value,)) - return b"\x02" + name + _PACK_INT(len(value) + 1) + value + b"\x00" +def _encode_bytes(name: bytes, value: bytes, dummy0: Any, dummy1: Any) -> bytes: + """Encode a python bytes.""" + # Python3 special case. Store 'bytes' as BSON binary subtype 0. + return b"\x05" + name + _PACK_INT(len(value)) + b"\x00" + value -def _encode_mapping(name, value, check_keys, opts): +def _encode_mapping(name: bytes, value: Any, check_keys: bool, opts: CodecOptions[Any]) -> bytes: """Encode a mapping type.""" if _raw_document_class(value): - return b'\x03' + name + value.raw - data = b"".join([_element_to_bson(key, val, check_keys, opts) - for key, val in iteritems(value)]) + return b"\x03" + name + cast(bytes, value.raw) + data = b"".join([_element_to_bson(key, val, check_keys, opts) for key, val in value.items()]) return b"\x03" + name + _PACK_INT(len(data) + 5) + data + b"\x00" -def _encode_dbref(name, value, check_keys, opts): +def _encode_dbref(name: bytes, value: DBRef, check_keys: bool, opts: CodecOptions[Any]) -> bytes: """Encode bson.dbref.DBRef.""" buf = bytearray(b"\x03" + name + b"\x00\x00\x00\x00") begin = len(buf) - 4 - buf += _name_value_to_bson(b"$ref\x00", - value.collection, check_keys, opts) - buf += _name_value_to_bson(b"$id\x00", - value.id, check_keys, opts) + buf += _name_value_to_bson(b"$ref\x00", value.collection, check_keys, opts) + buf += _name_value_to_bson(b"$id\x00", value.id, check_keys, opts) if value.database is not None: - buf += _name_value_to_bson( - b"$db\x00", value.database, check_keys, opts) - for key, val in iteritems(value._DBRef__kwargs): + buf += _name_value_to_bson(b"$db\x00", value.database, check_keys, opts) + for key, val in value._DBRef__kwargs.items(): buf += _element_to_bson(key, val, check_keys, opts) buf += b"\x00" - buf[begin:begin + 4] = _PACK_INT(len(buf) - begin) + buf[begin : begin + 4] = _PACK_INT(len(buf) - begin) return bytes(buf) -def _encode_list(name, value, check_keys, opts): +def _encode_list( + name: bytes, value: Sequence[Any], check_keys: bool, opts: CodecOptions[Any] +) -> bytes: """Encode a list/tuple.""" lname = gen_list_name() - data = b"".join([_name_value_to_bson(next(lname), item, - check_keys, opts) - for item in value]) + data = b"".join([_name_value_to_bson(next(lname), item, check_keys, opts) for item in value]) return b"\x04" + name + _PACK_INT(len(data) + 5) + data + b"\x00" -def _encode_text(name, value, dummy0, dummy1): - """Encode a python unicode (python 2.x) / str (python 3.x).""" - value = _utf_8_encode(value)[0] - return b"\x02" + name + _PACK_INT(len(value) + 1) + value + b"\x00" +def _encode_text(name: bytes, value: str, dummy0: Any, dummy1: Any) -> bytes: + """Encode a python str.""" + bvalue = _utf_8_encode(value)[0] + return b"\x02" + name + _PACK_INT(len(bvalue) + 1) + bvalue + b"\x00" -def _encode_binary(name, value, dummy0, dummy1): +def _encode_binary(name: bytes, value: Binary, dummy0: Any, dummy1: Any) -> bytes: """Encode bson.binary.Binary.""" subtype = value.subtype if subtype == 2: - value = _PACK_INT(len(value)) + value + value = _PACK_INT(len(value)) + value # type: ignore return b"\x05" + name + _PACK_LENGTH_SUBTYPE(len(value), subtype) + value -def _encode_uuid(name, value, dummy, opts): +def _encode_uuid(name: bytes, value: uuid.UUID, dummy: Any, opts: CodecOptions[Any]) -> bytes: """Encode uuid.UUID.""" uuid_representation = opts.uuid_representation - # Python Legacy Common Case - if uuid_representation == OLD_UUID_SUBTYPE: - return b"\x05" + name + b'\x10\x00\x00\x00\x03' + value.bytes - # Java Legacy - elif uuid_representation == JAVA_LEGACY: - from_uuid = value.bytes - data = from_uuid[0:8][::-1] + from_uuid[8:16][::-1] - return b"\x05" + name + b'\x10\x00\x00\x00\x03' + data - # C# legacy - elif uuid_representation == CSHARP_LEGACY: - # Microsoft GUID representation. - return b"\x05" + name + b'\x10\x00\x00\x00\x03' + value.bytes_le - # New - return b"\x05" + name + b'\x10\x00\x00\x00\x04' + value.bytes - - -def _encode_objectid(name, value, dummy0, dummy1): + binval = Binary.from_uuid(value, uuid_representation=uuid_representation) + return _encode_binary(name, binval, dummy, opts) + + +def _encode_objectid(name: bytes, value: ObjectId, dummy: Any, dummy1: Any) -> bytes: """Encode bson.objectid.ObjectId.""" return b"\x07" + name + value.binary -def _encode_bool(name, value, dummy0, dummy1): +def _encode_bool(name: bytes, value: bool, dummy0: Any, dummy1: Any) -> bytes: """Encode a python boolean (True/False).""" return b"\x08" + name + (value and b"\x01" or b"\x00") -def _encode_datetime(name, value, dummy0, dummy1): +def _encode_datetime(name: bytes, value: datetime.datetime, dummy0: Any, dummy1: Any) -> bytes: """Encode datetime.datetime.""" millis = _datetime_to_millis(value) return b"\x09" + name + _PACK_LONG(millis) -def _encode_none(name, dummy0, dummy1, dummy2): +def _encode_datetime_ms(name: bytes, value: DatetimeMS, dummy0: Any, dummy1: Any) -> bytes: + """Encode datetime.datetime.""" + millis = int(value) + return b"\x09" + name + _PACK_LONG(millis) + + +def _encode_none(name: bytes, dummy0: Any, dummy1: Any, dummy2: Any) -> bytes: """Encode python None.""" return b"\x0A" + name -def _encode_regex(name, value, dummy0, dummy1): +def _encode_regex(name: bytes, value: Regex[Any], dummy0: Any, dummy1: Any) -> bytes: """Encode a python regex or bson.regex.Regex.""" flags = value.flags - # Python 2 common case - if flags == 0: - return b"\x0B" + name + _make_c_string_check(value.pattern) + b"\x00" # Python 3 common case - elif flags == re.UNICODE: + if flags == re.UNICODE: return b"\x0B" + name + _make_c_string_check(value.pattern) + b"u\x00" + elif flags == 0: + return b"\x0B" + name + _make_c_string_check(value.pattern) + b"\x00" else: sflags = b"" if flags & re.IGNORECASE: @@ -697,7 +802,7 @@ def _encode_regex(name, value, dummy0, dummy1): return b"\x0B" + name + _make_c_string_check(value.pattern) + sflags -def _encode_code(name, value, dummy, opts): +def _encode_code(name: bytes, value: Code, dummy: Any, opts: CodecOptions[Any]) -> bytes: """Encode bson.code.Code.""" cstring = _make_c_string(value) cstrlen = len(cstring) @@ -708,7 +813,7 @@ def _encode_code(name, value, dummy, opts): return b"\x0F" + name + full_length + _PACK_INT(cstrlen) + cstring + scope -def _encode_int(name, value, dummy0, dummy1): +def _encode_int(name: bytes, value: int, dummy0: Any, dummy1: Any) -> bytes: """Encode a python int.""" if -2147483648 <= value <= 2147483647: return b"\x10" + name + _PACK_INT(value) @@ -716,33 +821,33 @@ def _encode_int(name, value, dummy0, dummy1): try: return b"\x12" + name + _PACK_LONG(value) except struct.error: - raise OverflowError("BSON can only handle up to 8-byte ints") + raise OverflowError("BSON can only handle up to 8-byte ints") from None -def _encode_timestamp(name, value, dummy0, dummy1): +def _encode_timestamp(name: bytes, value: Any, dummy0: Any, dummy1: Any) -> bytes: """Encode bson.timestamp.Timestamp.""" return b"\x11" + name + _PACK_TIMESTAMP(value.inc, value.time) -def _encode_long(name, value, dummy0, dummy1): - """Encode a python long (python 2.x)""" +def _encode_long(name: bytes, value: Any, dummy0: Any, dummy1: Any) -> bytes: + """Encode a bson.int64.Int64.""" try: return b"\x12" + name + _PACK_LONG(value) except struct.error: - raise OverflowError("BSON can only handle up to 8-byte ints") + raise OverflowError("BSON can only handle up to 8-byte ints") from None -def _encode_decimal128(name, value, dummy0, dummy1): +def _encode_decimal128(name: bytes, value: Decimal128, dummy0: Any, dummy1: Any) -> bytes: """Encode bson.decimal128.Decimal128.""" return b"\x13" + name + value.bid -def _encode_minkey(name, dummy0, dummy1, dummy2): +def _encode_minkey(name: bytes, dummy0: Any, dummy1: Any, dummy2: Any) -> bytes: """Encode bson.min_key.MinKey.""" return b"\xFF" + name -def _encode_maxkey(name, dummy0, dummy1, dummy2): +def _encode_maxkey(name: bytes, dummy0: Any, dummy1: Any, dummy2: Any) -> bytes: """Encode bson.max_key.MaxKey.""" return b"\x7F" + name @@ -756,12 +861,12 @@ def _encode_maxkey(name, dummy0, dummy1, dummy2): bool: _encode_bool, bytes: _encode_bytes, datetime.datetime: _encode_datetime, + DatetimeMS: _encode_datetime_ms, dict: _encode_mapping, float: _encode_float, int: _encode_int, list: _encode_list, - # unicode in py2, str in py3 - text_type: _encode_text, + str: _encode_text, tuple: _encode_list, type(None): _encode_none, uuid.UUID: _encode_uuid, @@ -776,10 +881,9 @@ def _encode_maxkey(name, dummy0, dummy1, dummy2): RE_TYPE: _encode_regex, SON: _encode_mapping, Timestamp: _encode_timestamp, - UUIDLegacy: _encode_binary, Decimal128: _encode_decimal128, # Special case. This will never be looked up directly. - abc.Mapping: _encode_mapping, + _abc.Mapping: _encode_mapping, } @@ -795,23 +899,34 @@ def _encode_maxkey(name, dummy0, dummy1, dummy2): 255: _encode_minkey, } -if not PY3: - _ENCODERS[long] = _encode_long - _BUILT_IN_TYPES = tuple(t for t in _ENCODERS) -def _name_value_to_bson(name, value, check_keys, opts, - in_custom_call=False, - in_fallback_call=False): +def _name_value_to_bson( + name: bytes, + value: Any, + check_keys: bool, + opts: CodecOptions[Any], + in_custom_call: bool = False, + in_fallback_call: bool = False, +) -> bytes: """Encode a single name, value pair.""" + + was_integer_overflow = False + # First see if the type is already cached. KeyError will only ever # happen once per subtype. try: - return _ENCODERS[type(value)](name, value, check_keys, opts) + return _ENCODERS[type(value)](name, value, check_keys, opts) # type: ignore except KeyError: pass + except OverflowError: + if not isinstance(value, int): + raise + + # Give the fallback_encoder a chance + was_integer_overflow = True # Second, fall back to trying _type_marker. This has to be done # before the loop below since users could subclass one of our @@ -821,7 +936,7 @@ def _name_value_to_bson(name, value, check_keys, opts, func = _MARKERS[marker] # Cache this type for faster subsequent lookup. _ENCODERS[type(value)] = func - return func(name, value, check_keys, opts) + return func(name, value, check_keys, opts) # type: ignore # Third, check if a type encoder is registered for this type. # Note that subtypes of registered custom types are not auto-encoded. @@ -829,105 +944,85 @@ def _name_value_to_bson(name, value, check_keys, opts, custom_encoder = opts.type_registry._encoder_map.get(type(value)) if custom_encoder is not None: return _name_value_to_bson( - name, custom_encoder(value), check_keys, opts, - in_custom_call=True) + name, custom_encoder(value), check_keys, opts, in_custom_call=True + ) # Fourth, test each base type. This will only happen once for # a subtype of a supported base type. Unlike in the C-extensions, this # is done after trying the custom type encoder because checking for each # subtype is expensive. for base in _BUILT_IN_TYPES: - if isinstance(value, base): + if not was_integer_overflow and isinstance(value, base): func = _ENCODERS[base] # Cache this type for faster subsequent lookup. _ENCODERS[type(value)] = func - return func(name, value, check_keys, opts) + return func(name, value, check_keys, opts) # type: ignore # As a last resort, try using the fallback encoder, if the user has # provided one. fallback_encoder = opts.type_registry._fallback_encoder if not in_fallback_call and fallback_encoder is not None: return _name_value_to_bson( - name, fallback_encoder(value), check_keys, opts, - in_fallback_call=True) + name, fallback_encoder(value), check_keys, opts, in_fallback_call=True + ) - raise InvalidDocument( - "cannot encode object: %r, of type: %r" % (value, type(value))) + if was_integer_overflow: + raise OverflowError("BSON can only handle up to 8-byte ints") + raise InvalidDocument(f"cannot encode object: {value!r}, of type: {type(value)!r}") -def _element_to_bson(key, value, check_keys, opts): +def _element_to_bson(key: Any, value: Any, check_keys: bool, opts: CodecOptions[Any]) -> bytes: """Encode a single key, value pair.""" - if not isinstance(key, string_type): - raise InvalidDocument("documents must have only string keys, " - "key was %r" % (key,)) + if not isinstance(key, str): + raise InvalidDocument(f"documents must have only string keys, key was {key!r}") if check_keys: if key.startswith("$"): - raise InvalidDocument("key %r must not start with '$'" % (key,)) + raise InvalidDocument(f"key {key!r} must not start with '$'") if "." in key: - raise InvalidDocument("key %r must not contain '.'" % (key,)) + raise InvalidDocument(f"key {key!r} must not contain '.'") name = _make_name(key) return _name_value_to_bson(name, value, check_keys, opts) -def _dict_to_bson(doc, check_keys, opts, top_level=True): +def _dict_to_bson( + doc: Any, check_keys: bool, opts: CodecOptions[Any], top_level: bool = True +) -> bytes: """Encode a document to BSON.""" if _raw_document_class(doc): - return doc.raw + return cast(bytes, doc.raw) try: elements = [] if top_level and "_id" in doc: - elements.append(_name_value_to_bson(b"_id\x00", doc["_id"], - check_keys, opts)) - for (key, value) in iteritems(doc): + elements.append(_name_value_to_bson(b"_id\x00", doc["_id"], check_keys, opts)) + for key, value in doc.items(): if not top_level or key != "_id": - elements.append(_element_to_bson(key, value, - check_keys, opts)) + elements.append(_element_to_bson(key, value, check_keys, opts)) except AttributeError: - raise TypeError("encoder expected a mapping type but got: %r" % (doc,)) + raise TypeError(f"encoder expected a mapping type but got: {doc!r}") from None encoded = b"".join(elements) return _PACK_INT(len(encoded) + 5) + encoded + b"\x00" -if _USE_C: - _dict_to_bson = _cbson._dict_to_bson - - -def _millis_to_datetime(millis, opts): - """Convert milliseconds since epoch UTC to datetime.""" - diff = ((millis % 1000) + 1000) % 1000 - seconds = (millis - diff) // 1000 - micros = diff * 1000 - if opts.tz_aware: - dt = EPOCH_AWARE + datetime.timedelta(seconds=seconds, - microseconds=micros) - if opts.tzinfo: - dt = dt.astimezone(opts.tzinfo) - return dt - else: - return EPOCH_NAIVE + datetime.timedelta(seconds=seconds, - microseconds=micros) -def _datetime_to_millis(dtm): - """Convert datetime to milliseconds since epoch UTC.""" - if dtm.utcoffset() is not None: - dtm = dtm - dtm.utcoffset() - return int(calendar.timegm(dtm.timetuple()) * 1000 + - dtm.microsecond // 1000) +if _USE_C: + _dict_to_bson = _cbson._dict_to_bson -_CODEC_OPTIONS_TYPE_ERROR = TypeError( - "codec_options must be an instance of CodecOptions") +_CODEC_OPTIONS_TYPE_ERROR = TypeError("codec_options must be an instance of CodecOptions") -def encode(document, check_keys=False, codec_options=DEFAULT_CODEC_OPTIONS): +def encode( + document: Mapping[str, Any], + check_keys: bool = False, + codec_options: CodecOptions[Any] = DEFAULT_CODEC_OPTIONS, +) -> bytes: """Encode a document to BSON. A document can be any mapping type (like :class:`dict`). Raises :class:`TypeError` if `document` is not a mapping type, - or contains keys that are not instances of - :class:`basestring` (:class:`str` in python 3). Raises + or contains keys that are not instances of :class:`str`. Raises :class:`~bson.errors.InvalidDocument` if `document` cannot be converted to :class:`BSON`. @@ -947,7 +1042,19 @@ def encode(document, check_keys=False, codec_options=DEFAULT_CODEC_OPTIONS): return _dict_to_bson(document, check_keys, codec_options) -def decode(data, codec_options=DEFAULT_CODEC_OPTIONS): +@overload +def decode(data: _ReadableBuffer, codec_options: None = None) -> dict[str, Any]: + ... + + +@overload +def decode(data: _ReadableBuffer, codec_options: CodecOptions[_DocumentType]) -> _DocumentType: + ... + + +def decode( + data: _ReadableBuffer, codec_options: Optional[CodecOptions[_DocumentType]] = None +) -> Union[dict[str, Any], _DocumentType]: """Decode BSON to a document. By default, returns a BSON document represented as a Python @@ -973,13 +1080,62 @@ def decode(data, codec_options=DEFAULT_CODEC_OPTIONS): .. versionadded:: 3.9 """ - if not isinstance(codec_options, CodecOptions): + opts: CodecOptions[Any] = codec_options or DEFAULT_CODEC_OPTIONS + if not isinstance(opts, CodecOptions): raise _CODEC_OPTIONS_TYPE_ERROR - return _bson_to_dict(data, codec_options) + return cast("Union[dict[str, Any], _DocumentType]", _bson_to_dict(data, opts)) -def decode_all(data, codec_options=DEFAULT_CODEC_OPTIONS): +def _decode_all(data: _ReadableBuffer, opts: CodecOptions[_DocumentType]) -> list[_DocumentType]: + """Decode a BSON data to multiple documents.""" + data, view = get_data_and_view(data) + data_len = len(data) + docs: list[_DocumentType] = [] + position = 0 + end = data_len - 1 + use_raw = _raw_document_class(opts.document_class) + try: + while position < end: + obj_size = _UNPACK_INT_FROM(data, position)[0] + if data_len - position < obj_size: + raise InvalidBSON("invalid object size") + obj_end = position + obj_size - 1 + if data[obj_end] != 0: + raise InvalidBSON("bad eoo") + if use_raw: + docs.append(opts.document_class(data[position : obj_end + 1], opts)) # type: ignore + else: + docs.append(_elements_to_dict(data, view, position + 4, obj_end, opts)) + position += obj_size + return docs + except InvalidBSON: + raise + except Exception: + # Change exception type to InvalidBSON but preserve traceback. + _, exc_value, exc_tb = sys.exc_info() + raise InvalidBSON(str(exc_value)).with_traceback(exc_tb) from None + + +if _USE_C: + _decode_all = _cbson._decode_all + + +@overload +def decode_all(data: _ReadableBuffer, codec_options: None = None) -> list[dict[str, Any]]: + ... + + +@overload +def decode_all( + data: _ReadableBuffer, codec_options: CodecOptions[_DocumentType] +) -> list[_DocumentType]: + ... + + +def decode_all( + data: _ReadableBuffer, codec_options: Optional[CodecOptions[_DocumentType]] = None +) -> Union[list[dict[str, Any]], list[_DocumentType]]: """Decode BSON data to multiple documents. `data` must be a bytes-like object implementing the buffer protocol that @@ -1001,77 +1157,82 @@ def decode_all(data, codec_options=DEFAULT_CODEC_OPTIONS): Replaced `as_class`, `tz_aware`, and `uuid_subtype` options with `codec_options`. - - .. versionchanged:: 2.7 - Added `compile_re` option. If set to False, PyMongo represented BSON - regular expressions as :class:`~bson.regex.Regex` objects instead of - attempting to compile BSON regular expressions as Python native - regular expressions, thus preventing errors for some incompatible - patterns, see `PYTHON-500`_. - - .. _PYTHON-500: https://jira.mongodb.org/browse/PYTHON-500 """ - data, view = get_data_and_view(data) + if codec_options is None: + return _decode_all(data, DEFAULT_CODEC_OPTIONS) + if not isinstance(codec_options, CodecOptions): raise _CODEC_OPTIONS_TYPE_ERROR - data_len = len(data) - docs = [] - position = 0 - end = data_len - 1 - use_raw = _raw_document_class(codec_options.document_class) - try: - while position < end: - obj_size = _UNPACK_INT_FROM(data, position)[0] - if data_len - position < obj_size: - raise InvalidBSON("invalid object size") - obj_end = position + obj_size - 1 - if data[obj_end] != _OBJEND: - raise InvalidBSON("bad eoo") - if use_raw: - docs.append( - codec_options.document_class( - data[position:obj_end + 1], codec_options)) - else: - docs.append(_elements_to_dict(data, - view, - position + 4, - obj_end, - codec_options)) - position += obj_size - return docs - except InvalidBSON: - raise - except Exception: - # Change exception type to InvalidBSON but preserve traceback. - _, exc_value, exc_tb = sys.exc_info() - reraise(InvalidBSON, exc_value, exc_tb) - - -if _USE_C: - decode_all = _cbson.decode_all + return _decode_all(data, codec_options) -def _decode_selective(rawdoc, fields, codec_options): +def _decode_selective( + rawdoc: Any, fields: Any, codec_options: CodecOptions[_DocumentType] +) -> _DocumentType: if _raw_document_class(codec_options.document_class): # If document_class is RawBSONDocument, use vanilla dictionary for # decoding command response. - doc = {} + doc: _DocumentType = {} # type:ignore[assignment] else: # Else, use the specified document_class. doc = codec_options.document_class() - for key, value in iteritems(rawdoc): + for key, value in rawdoc.items(): if key in fields: if fields[key] == 1: - doc[key] = _bson_to_dict(rawdoc.raw, codec_options)[key] + doc[key] = _bson_to_dict(rawdoc.raw, codec_options)[key] # type:ignore[index] else: - doc[key] = _decode_selective(value, fields[key], codec_options) + doc[key] = _decode_selective( # type:ignore[index] + value, fields[key], codec_options + ) else: - doc[key] = value + doc[key] = value # type:ignore[index] return doc -def _decode_all_selective(data, codec_options, fields): +def _array_of_documents_to_buffer(view: memoryview) -> bytes: + # Extract the raw bytes of each document. + position = 0 + _, end = _get_object_size(view, position, len(view)) + position += 4 + buffers: list[memoryview] = [] + append = buffers.append + while position < end - 1: + # Just skip the keys. + while view[position] != 0: + position += 1 + position += 1 + obj_size, _ = _get_object_size(view, position, end) + append(view[position : position + obj_size]) + position += obj_size + if position != end: + raise InvalidBSON("bad object or element length") + return b"".join(buffers) + + +if _USE_C: + _array_of_documents_to_buffer = _cbson._array_of_documents_to_buffer + + +def _convert_raw_document_lists_to_streams(document: Any) -> None: + """Convert raw array of documents to a stream of BSON documents.""" + cursor = document.get("cursor") + if not cursor: + return + for key in ("firstBatch", "nextBatch"): + batch = cursor.get(key) + if not batch: + continue + data = _array_of_documents_to_buffer(batch) + if data: + cursor[key] = [data] + else: + cursor[key] = [] + + +def _decode_all_selective( + data: Any, codec_options: CodecOptions[_DocumentType], fields: Any +) -> list[_DocumentType]: """Decode BSON data to a single document while using user-provided custom decoding logic. @@ -1102,13 +1263,33 @@ def _decode_all_selective(data, codec_options, fields): # Decode documents for internal use. from bson.raw_bson import RawBSONDocument - internal_codec_options = codec_options.with_options( - document_class=RawBSONDocument, type_registry=None) + + internal_codec_options: CodecOptions[RawBSONDocument] = codec_options.with_options( + document_class=RawBSONDocument, type_registry=None + ) _doc = _bson_to_dict(data, internal_codec_options) - return [_decode_selective(_doc, fields, codec_options,)] + return [ + _decode_selective( + _doc, + fields, + codec_options, + ) + ] -def decode_iter(data, codec_options=DEFAULT_CODEC_OPTIONS): +@overload +def decode_iter(data: bytes, codec_options: None = None) -> Iterator[dict[str, Any]]: + ... + + +@overload +def decode_iter(data: bytes, codec_options: CodecOptions[_DocumentType]) -> Iterator[_DocumentType]: + ... + + +def decode_iter( + data: bytes, codec_options: Optional[CodecOptions[_DocumentType]] = None +) -> Union[Iterator[dict[str, Any]], Iterator[_DocumentType]]: """Decode BSON data to multiple documents as a generator. Works similarly to the decode_all function, but yields one document at a @@ -1128,20 +1309,38 @@ def decode_iter(data, codec_options=DEFAULT_CODEC_OPTIONS): .. versionadded:: 2.8 """ - if not isinstance(codec_options, CodecOptions): + opts = codec_options or DEFAULT_CODEC_OPTIONS + if not isinstance(opts, CodecOptions): raise _CODEC_OPTIONS_TYPE_ERROR position = 0 end = len(data) - 1 while position < end: obj_size = _UNPACK_INT_FROM(data, position)[0] - elements = data[position:position + obj_size] + elements = data[position : position + obj_size] position += obj_size - yield _bson_to_dict(elements, codec_options) + yield _bson_to_dict(elements, opts) # type:ignore[misc, type-var] + + +@overload +def decode_file_iter( + file_obj: Union[BinaryIO, IO[bytes]], codec_options: None = None +) -> Iterator[dict[str, Any]]: + ... + +@overload +def decode_file_iter( + file_obj: Union[BinaryIO, IO[bytes]], codec_options: CodecOptions[_DocumentType] +) -> Iterator[_DocumentType]: + ... -def decode_file_iter(file_obj, codec_options=DEFAULT_CODEC_OPTIONS): + +def decode_file_iter( + file_obj: Union[BinaryIO, IO[bytes]], + codec_options: Optional[CodecOptions[_DocumentType]] = None, +) -> Union[Iterator[dict[str, Any]], Iterator[_DocumentType]]: """Decode bson data from a file to multiple documents as a generator. Works similarly to the decode_all function, but reads from the file object @@ -1158,23 +1357,24 @@ def decode_file_iter(file_obj, codec_options=DEFAULT_CODEC_OPTIONS): .. versionadded:: 2.8 """ + opts = codec_options or DEFAULT_CODEC_OPTIONS while True: # Read size of next object. - size_data = file_obj.read(4) + size_data: Any = file_obj.read(4) if not size_data: - break # Finished with file normaly. + break # Finished with file normally. elif len(size_data) != 4: raise InvalidBSON("cut off in middle of objsize") obj_size = _UNPACK_INT_FROM(size_data, 0)[0] - 4 elements = size_data + file_obj.read(max(0, obj_size)) - yield _bson_to_dict(elements, codec_options) + yield _bson_to_dict(elements, opts) # type:ignore[type-var, arg-type, misc] -def is_valid(bson): +def is_valid(bson: bytes) -> bool: """Check that the given string represents valid :class:`BSON` data. Raises :class:`TypeError` if `bson` is not an instance of - :class:`str` (:class:`bytes` in python 3). Returns ``True`` + :class:`bytes`. Returns ``True`` if `bson` is valid :class:`BSON`, ``False`` otherwise. :Parameters: @@ -1199,17 +1399,20 @@ class BSON(bytes): """ @classmethod - def encode(cls, document, check_keys=False, - codec_options=DEFAULT_CODEC_OPTIONS): + def encode( + cls: Type[BSON], + document: Mapping[str, Any], + check_keys: bool = False, + codec_options: CodecOptions[Any] = DEFAULT_CODEC_OPTIONS, + ) -> BSON: """Encode a document to a new :class:`BSON` instance. A document can be any mapping type (like :class:`dict`). Raises :class:`TypeError` if `document` is not a mapping type, or contains keys that are not instances of - :class:`basestring` (:class:`str` in python 3). Raises - :class:`~bson.errors.InvalidDocument` if `document` cannot be - converted to :class:`BSON`. + :class:`str'. Raises :class:`~bson.errors.InvalidDocument` + if `document` cannot be converted to :class:`BSON`. :Parameters: - `document`: mapping type representing a document @@ -1224,7 +1427,9 @@ def encode(cls, document, check_keys=False, """ return cls(encode(document, check_keys, codec_options)) - def decode(self, codec_options=DEFAULT_CODEC_OPTIONS): + def decode( # type:ignore[override] + self, codec_options: CodecOptions[Any] = DEFAULT_CODEC_OPTIONS + ) -> dict[str, Any]: """Decode this BSON data. By default, returns a BSON document represented as a Python @@ -1254,20 +1459,23 @@ def decode(self, codec_options=DEFAULT_CODEC_OPTIONS): Replaced `as_class`, `tz_aware`, and `uuid_subtype` options with `codec_options`. - - .. versionchanged:: 2.7 - Added `compile_re` option. If set to False, PyMongo represented BSON - regular expressions as :class:`~bson.regex.Regex` objects instead of - attempting to compile BSON regular expressions as Python native - regular expressions, thus preventing errors for some incompatible - patterns, see `PYTHON-500`_. - - .. _PYTHON-500: https://jira.mongodb.org/browse/PYTHON-500 """ return decode(self, codec_options) -def has_c(): - """Is the C extension installed? - """ +def has_c() -> bool: + """Is the C extension installed?""" return _USE_C + + +def _after_fork() -> None: + """Releases the ObjectID lock child.""" + if ObjectId._inc_lock.locked(): + ObjectId._inc_lock.release() + + +if hasattr(os, "register_at_fork"): + # This will run in the same thread as the fork was called. + # If we fork in a critical region on the same thread, it should break. + # This is fine since we would never call fork directly from a critical region. + os.register_at_fork(after_in_child=_after_fork) diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index 1fbb48cc96..da86cd8133 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -26,7 +26,6 @@ #include "buffer.h" #include "time64.h" -#include "encoding_helpers.h" #define _CBSON_MODULE #include "_cbsonmodule.h" @@ -53,20 +52,37 @@ struct module_state { PyObject* BSONInt64; PyObject* Decimal128; PyObject* Mapping; - PyObject* CodecOptions; + PyObject* DatetimeMS; + PyObject* _min_datetime_ms; + PyObject* _max_datetime_ms; + PyObject* _type_marker_str; + PyObject* _flags_str; + PyObject* _pattern_str; + PyObject* _encoder_map_str; + PyObject* _decoder_map_str; + PyObject* _fallback_encoder_str; + PyObject* _raw_str; + PyObject* _subtype_str; + PyObject* _binary_str; + PyObject* _scope_str; + PyObject* _inc_str; + PyObject* _time_str; + PyObject* _bid_str; + PyObject* _replace_str; + PyObject* _astimezone_str; + PyObject* _id_str; + PyObject* _dollar_ref_str; + PyObject* _dollar_id_str; + PyObject* _dollar_db_str; + PyObject* _tzinfo_str; + PyObject* _as_doc_str; + PyObject* _utcoffset_str; + PyObject* _from_uuid_str; + PyObject* _as_uuid_str; + PyObject* _from_bid_str; }; -/* The Py_TYPE macro was introduced in CPython 2.6 */ -#ifndef Py_TYPE -#define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) -#endif - -#if PY_MAJOR_VERSION >= 3 #define GETSTATE(m) ((struct module_state*)PyModule_GetState(m)) -#else -#define GETSTATE(m) (&_state) -static struct module_state _state; -#endif /* Maximum number of regex flags */ #define FLAGS_SIZE 7 @@ -78,11 +94,111 @@ static struct module_state _state; #define STANDARD 4 #define JAVA_LEGACY 5 #define CSHARP_LEGACY 6 +#define UNSPECIFIED 0 #define BSON_MAX_SIZE 2147483647 /* The smallest possible BSON document, i.e. "{}" */ #define BSON_MIN_SIZE 5 +/* Datetime codec options */ +#define DATETIME 1 +#define DATETIME_CLAMP 2 +#define DATETIME_MS 3 +#define DATETIME_AUTO 4 + +/* Converts integer to its string representation in decimal notation. */ +extern int cbson_long_long_to_str(long long num, char* str, size_t size) { + // Buffer should fit 64-bit signed integer + if (size < 21) { + PyErr_Format( + PyExc_RuntimeError, + "Buffer too small to hold long long: %d < 21", size); + return -1; + } + int index = 0; + int sign = 1; + // Convert to unsigned to handle -LLONG_MIN overflow + unsigned long long absNum; + // Handle the case of 0 + if (num == 0) { + str[index++] = '0'; + str[index] = '\0'; + return 0; + } + // Handle negative numbers + if (num < 0) { + sign = -1; + absNum = 0ULL - (unsigned long long)num; + } else { + absNum = (unsigned long long)num; + } + // Convert the number to string + unsigned long long digit; + while (absNum > 0) { + digit = absNum % 10ULL; + str[index++] = (char)digit + '0'; // Convert digit to character + absNum /= 10; + } + // Add minus sign if negative + if (sign == -1) { + str[index++] = '-'; + } + str[index] = '\0'; // Null terminator + // Reverse the string + int start = 0; + int end = index - 1; + while (start < end) { + char temp = str[start]; + str[start++] = str[end]; + str[end--] = temp; + } + return 0; +} + +static PyObject* _test_long_long_to_str(PyObject* self, PyObject* args) { + // Test extreme values + Py_ssize_t maxNum = PY_SSIZE_T_MAX; + Py_ssize_t minNum = PY_SSIZE_T_MIN; + Py_ssize_t num; + char str_1[BUF_SIZE]; + char str_2[BUF_SIZE]; + int res = LL2STR(str_1, (long long)minNum); + if (res == -1) { + return NULL; + } + INT2STRING(str_2, (long long)minNum); + if (strcmp(str_1, str_2) != 0) { + PyErr_Format( + PyExc_RuntimeError, + "LL2STR != INT2STRING: %s != %s", str_1, str_2); + return NULL; + } + LL2STR(str_1, (long long)maxNum); + INT2STRING(str_2, (long long)maxNum); + if (strcmp(str_1, str_2) != 0) { + PyErr_Format( + PyExc_RuntimeError, + "LL2STR != INT2STRING: %s != %s", str_1, str_2); + return NULL; + } + + // Test common values + for (num = 0; num < 10000; num++) { + char str_1[BUF_SIZE]; + char str_2[BUF_SIZE]; + LL2STR(str_1, (long long)num); + INT2STRING(str_2, (long long)num); + if (strcmp(str_1, str_2) != 0) { + PyErr_Format( + PyExc_RuntimeError, + "LL2STR != INT2STRING: %s != %s", str_1, str_2); + return NULL; + } + } + + return args; +} + /* Get an error class from the bson.errors module. * * Returns a new ref */ @@ -127,7 +243,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, /* Write a RawBSONDocument to the buffer. * Returns the number of bytes written or 0 on failure. */ -static int write_raw_doc(buffer_t buffer, PyObject* raw); +static int write_raw_doc(buffer_t buffer, PyObject* raw, PyObject* _raw); /* Date stuff */ static PyObject* datetime_from_millis(long long millis) { @@ -159,19 +275,51 @@ static PyObject* datetime_from_millis(long long millis) { * micros = diff * 1000 111000 * Resulting in datetime(1, 1, 1, 1, 1, 1, 111000) -- the expected result */ + PyObject* datetime; int diff = (int)(((millis % 1000) + 1000) % 1000); int microseconds = diff * 1000; Time64_T seconds = (millis - diff) / 1000; struct TM timeinfo; - gmtime64_r(&seconds, &timeinfo); - - return PyDateTime_FromDateAndTime(timeinfo.tm_year + 1900, - timeinfo.tm_mon + 1, - timeinfo.tm_mday, - timeinfo.tm_hour, - timeinfo.tm_min, - timeinfo.tm_sec, - microseconds); + cbson_gmtime64_r(&seconds, &timeinfo); + + datetime = PyDateTime_FromDateAndTime(timeinfo.tm_year + 1900, + timeinfo.tm_mon + 1, + timeinfo.tm_mday, + timeinfo.tm_hour, + timeinfo.tm_min, + timeinfo.tm_sec, + microseconds); + if(!datetime) { + PyObject *etype, *evalue, *etrace; + + /* + * Calling _error clears the error state, so fetch it first. + */ + PyErr_Fetch(&etype, &evalue, &etrace); + + /* Only add addition error message on ValueError exceptions. */ + if (PyErr_GivenExceptionMatches(etype, PyExc_ValueError)) { + if (evalue) { + PyObject* err_msg = PyObject_Str(evalue); + if (err_msg) { + PyObject* appendage = PyUnicode_FromString(" (Consider Using CodecOptions(datetime_conversion=DATETIME_AUTO) or MongoClient(datetime_conversion='DATETIME_AUTO')). See: https://pymongo.readthedocs.io/en/stable/examples/datetimes.html#handling-out-of-range-datetimes"); + if (appendage) { + PyObject* msg = PyUnicode_Concat(err_msg, appendage); + if (msg) { + Py_DECREF(evalue); + evalue = msg; + } + } + Py_XDECREF(appendage); + } + Py_XDECREF(err_msg); + } + PyErr_NormalizeException(&etype, &evalue, &etrace); + } + /* Steals references to args. */ + PyErr_Restore(etype, evalue, etrace); + } + return datetime; } static long long millis_from_datetime(PyObject* datetime) { @@ -185,15 +333,49 @@ static long long millis_from_datetime(PyObject* datetime) { timeinfo.tm_min = PyDateTime_DATE_GET_MINUTE(datetime); timeinfo.tm_sec = PyDateTime_DATE_GET_SECOND(datetime); - millis = timegm64(&timeinfo) * 1000; + millis = cbson_timegm64(&timeinfo) * 1000; millis += PyDateTime_DATE_GET_MICROSECOND(datetime) / 1000; return millis; } +/* Extended-range datetime, returns a DatetimeMS object with millis */ +static PyObject* datetime_ms_from_millis(PyObject* self, long long millis){ + // Allocate a new DatetimeMS object. + struct module_state *state = GETSTATE(self); + + PyObject* dt; + PyObject* ll_millis; + + if (!(ll_millis = PyLong_FromLongLong(millis))){ + return NULL; + } + dt = PyObject_CallFunctionObjArgs(state->DatetimeMS, ll_millis, NULL); + Py_DECREF(ll_millis); + return dt; +} + +/* Extended-range datetime, takes a DatetimeMS object and extracts the long long value. */ +static int millis_from_datetime_ms(PyObject* dt, long long* out){ + PyObject* ll_millis; + long long millis; + + if (!(ll_millis = PyNumber_Long(dt))){ + return 0; + } + millis = PyLong_AsLongLong(ll_millis); + Py_DECREF(ll_millis); + if (millis == -1 && PyErr_Occurred()) { /* Overflow */ + PyErr_SetString(PyExc_OverflowError, + "MongoDB datetimes can only handle up to 8-byte ints"); + return 0; + } + *out = millis; + return 1; +} + /* Just make this compatible w/ the old API. */ int buffer_write_bytes(buffer_t buffer, const char* data, int size) { - if (buffer_write(buffer, data, size)) { - PyErr_NoMemory(); + if (pymongo_buffer_write(buffer, data, size)) { return 0; } return 1; @@ -218,7 +400,7 @@ void buffer_write_int32_at_position(buffer_t buffer, int position, int32_t data) { uint32_t data_le = BSON_UINT32_TO_LE(data); - memcpy(buffer_get_buffer(buffer) + position, &data_le, 4); + memcpy(pymongo_buffer_get_buffer(buffer) + position, &data_le, 4); } static int write_unicode(buffer_t buffer, PyObject* py_string) { @@ -228,19 +410,11 @@ static int write_unicode(buffer_t buffer, PyObject* py_string) { if (!encoded) { return 0; } -#if PY_MAJOR_VERSION >= 3 data = PyBytes_AS_STRING(encoded); -#else - data = PyString_AS_STRING(encoded); -#endif if (!data) goto unicodefail; -#if PY_MAJOR_VERSION >= 3 if ((size = _downcast_and_check(PyBytes_GET_SIZE(encoded), 1)) == -1) -#else - if ((size = _downcast_and_check(PyString_GET_SIZE(encoded), 1)) == -1) -#endif goto unicodefail; if (!buffer_write_int32(buffer, (int32_t)size)) @@ -261,23 +435,15 @@ static int write_unicode(buffer_t buffer, PyObject* py_string) { static int write_string(buffer_t buffer, PyObject* py_string) { int size; const char* data; -#if PY_MAJOR_VERSION >= 3 if (PyUnicode_Check(py_string)){ return write_unicode(buffer, py_string); } data = PyBytes_AsString(py_string); -#else - data = PyString_AsString(py_string); -#endif if (!data) { return 0; } -#if PY_MAJOR_VERSION >= 3 if ((size = _downcast_and_check(PyBytes_Size(py_string), 1)) == -1) -#else - if ((size = _downcast_and_check(PyString_Size(py_string), 1)) == -1) -#endif return 0; if (!buffer_write_int32(buffer, (int32_t)size)) { @@ -358,6 +524,35 @@ static int _load_python_objects(PyObject* module) { PyObject* compiled = NULL; struct module_state *state = GETSTATE(module); + /* Cache commonly used attribute names to improve performance. */ + if (!((state->_type_marker_str = PyUnicode_FromString("_type_marker")) && + (state->_flags_str = PyUnicode_FromString("flags")) && + (state->_pattern_str = PyUnicode_FromString("pattern")) && + (state->_encoder_map_str = PyUnicode_FromString("_encoder_map")) && + (state->_decoder_map_str = PyUnicode_FromString("_decoder_map")) && + (state->_fallback_encoder_str = PyUnicode_FromString("_fallback_encoder")) && + (state->_raw_str = PyUnicode_FromString("raw")) && + (state->_subtype_str = PyUnicode_FromString("subtype")) && + (state->_binary_str = PyUnicode_FromString("binary")) && + (state->_scope_str = PyUnicode_FromString("scope")) && + (state->_inc_str = PyUnicode_FromString("inc")) && + (state->_time_str = PyUnicode_FromString("time")) && + (state->_bid_str = PyUnicode_FromString("bid")) && + (state->_replace_str = PyUnicode_FromString("replace")) && + (state->_astimezone_str = PyUnicode_FromString("astimezone")) && + (state->_id_str = PyUnicode_FromString("_id")) && + (state->_dollar_ref_str = PyUnicode_FromString("$ref")) && + (state->_dollar_id_str = PyUnicode_FromString("$id")) && + (state->_dollar_db_str = PyUnicode_FromString("$db")) && + (state->_tzinfo_str = PyUnicode_FromString("tzinfo")) && + (state->_as_doc_str = PyUnicode_FromString("as_doc")) && + (state->_utcoffset_str = PyUnicode_FromString("utcoffset")) && + (state->_from_uuid_str = PyUnicode_FromString("from_uuid")) && + (state->_as_uuid_str = PyUnicode_FromString("as_uuid")) && + (state->_from_bid_str = PyUnicode_FromString("from_bid")))) { + return 1; + } + if (_load_object(&state->Binary, "bson.binary", "Binary") || _load_object(&state->Code, "bson.code", "Code") || _load_object(&state->ObjectId, "bson.objectid", "ObjectId") || @@ -370,20 +565,14 @@ static int _load_python_objects(PyObject* module) { _load_object(&state->BSONInt64, "bson.int64", "Int64") || _load_object(&state->Decimal128, "bson.decimal128", "Decimal128") || _load_object(&state->UUID, "uuid", "UUID") || -#if PY_MAJOR_VERSION >= 3 _load_object(&state->Mapping, "collections.abc", "Mapping") || -#else - _load_object(&state->Mapping, "collections", "Mapping") || -#endif - _load_object(&state->CodecOptions, "bson.codec_options", "CodecOptions")) { + _load_object(&state->DatetimeMS, "bson.datetime_ms", "DatetimeMS") || + _load_object(&state->_min_datetime_ms, "bson.datetime_ms", "_min_datetime_ms") || + _load_object(&state->_max_datetime_ms, "bson.datetime_ms", "_max_datetime_ms")) { return 1; } /* Reload our REType hack too. */ -#if PY_MAJOR_VERSION >= 3 empty_string = PyBytes_FromString(""); -#else - empty_string = PyString_FromString(""); -#endif if (empty_string == NULL) { state->REType = NULL; return 1; @@ -414,12 +603,12 @@ static int _load_python_objects(PyObject* module) { * * Return the type marker, 0 if there is no marker, or -1 on failure. */ -static long _type_marker(PyObject* object) { +static long _type_marker(PyObject* object, PyObject* _type_marker_str) { PyObject* type_marker = NULL; long type = 0; - if (PyObject_HasAttrString(object, "_type_marker")) { - type_marker = PyObject_GetAttrString(object, "_type_marker"); + if (PyObject_HasAttr(object, _type_marker_str)) { + type_marker = PyObject_GetAttr(object, _type_marker_str); if (type_marker == NULL) { return -1; } @@ -433,21 +622,9 @@ static long _type_marker(PyObject* object) { * or method. In some cases "value" could be a subtype of something * we know how to serialize. Make a best effort to encode these types. */ -#if PY_MAJOR_VERSION >= 3 if (type_marker && PyLong_CheckExact(type_marker)) { type = PyLong_AsLong(type_marker); -#else - if (type_marker && PyInt_CheckExact(type_marker)) { - type = PyInt_AsLong(type_marker); -#endif Py_DECREF(type_marker); - /* - * Py(Long|Int)_AsLong returns -1 for error but -1 is a valid value - * so we call PyErr_Occurred to differentiate. - */ - if (type == -1 && PyErr_Occurred()) { - return -1; - } } else { Py_XDECREF(type_marker); } @@ -460,25 +637,25 @@ static long _type_marker(PyObject* object) { * Return 1 on success. options->document_class is a new reference. * Return 0 on failure. */ -int convert_type_registry(PyObject* registry_obj, type_registry_t* registry) { +int cbson_convert_type_registry(PyObject* registry_obj, type_registry_t* registry, PyObject* _encoder_map_str, PyObject* _decoder_map_str, PyObject* _fallback_encoder_str) { registry->encoder_map = NULL; registry->decoder_map = NULL; registry->fallback_encoder = NULL; registry->registry_obj = NULL; - registry->encoder_map = PyObject_GetAttrString(registry_obj, "_encoder_map"); + registry->encoder_map = PyObject_GetAttr(registry_obj, _encoder_map_str); if (registry->encoder_map == NULL) { goto fail; } registry->is_encoder_empty = (PyDict_Size(registry->encoder_map) == 0); - registry->decoder_map = PyObject_GetAttrString(registry_obj, "_decoder_map"); + registry->decoder_map = PyObject_GetAttr(registry_obj, _decoder_map_str); if (registry->decoder_map == NULL) { goto fail; } registry->is_decoder_empty = (PyDict_Size(registry->decoder_map) == 0); - registry->fallback_encoder = PyObject_GetAttrString(registry_obj, "_fallback_encoder"); + registry->fallback_encoder = PyObject_GetAttr(registry_obj, _fallback_encoder_str); if (registry->fallback_encoder == NULL) { goto fail; } @@ -495,35 +672,37 @@ int convert_type_registry(PyObject* registry_obj, type_registry_t* registry) { return 0; } -/* Fill out a codec_options_t* from a CodecOptions object. Use with the "O&" - * format spec in PyArg_ParseTuple. +/* Fill out a codec_options_t* from a CodecOptions object. * * Return 1 on success. options->document_class is a new reference. * Return 0 on failure. */ -int convert_codec_options(PyObject* options_obj, void* p) { - codec_options_t* options = (codec_options_t*)p; +int convert_codec_options(PyObject* self, PyObject* options_obj, codec_options_t* options) { PyObject* type_registry_obj = NULL; + struct module_state *state = GETSTATE(self); long type_marker; options->unicode_decode_error_handler = NULL; - if (!PyArg_ParseTuple(options_obj, "ObbzOO", + if (!PyArg_ParseTuple(options_obj, "ObbzOOb", &options->document_class, &options->tz_aware, &options->uuid_rep, &options->unicode_decode_error_handler, &options->tzinfo, - &type_registry_obj)) + &type_registry_obj, + &options->datetime_conversion)) { return 0; + } - type_marker = _type_marker(options->document_class); + type_marker = _type_marker(options->document_class, + state->_type_marker_str); if (type_marker < 0) { return 0; } - if (!convert_type_registry(type_registry_obj, - &options->type_registry)) { + if (!cbson_convert_type_registry(type_registry_obj, + &options->type_registry, state->_encoder_map_str, state->_decoder_map_str, state->_fallback_encoder_str)) { return 0; } @@ -537,26 +716,6 @@ int convert_codec_options(PyObject* options_obj, void* p) { return 1; } -/* Fill out a codec_options_t* with default options. - * - * Return 1 on success. - * Return 0 on failure. - */ -int default_codec_options(struct module_state* state, codec_options_t* options) { - PyObject* options_obj = NULL; - PyObject* codec_options_func = _get_object( - state->CodecOptions, "bson.codec_options", "CodecOptions"); - if (codec_options_func == NULL) { - return 0; - } - options_obj = PyObject_CallFunctionObjArgs(codec_options_func, NULL); - Py_DECREF(codec_options_func); - if (options_obj == NULL) { - return 0; - } - return convert_codec_options(options_obj, options); -} - void destroy_codec_options(codec_options_t* options) { Py_CLEAR(options->document_class); Py_CLEAR(options->tzinfo); @@ -585,20 +744,14 @@ static int write_element_to_buffer(PyObject* self, buffer_t buffer, } static void -_fix_java(const char* in, char* out) { - int i, j; - for (i = 0, j = 7; i < j; i++, j--) { - out[i] = in[j]; - out[j] = in[i]; - } - for (i = 8, j = 15; i < j; i++, j--) { - out[i] = in[j]; - out[j] = in[i]; +_set_cannot_encode(PyObject* value) { + if (PyLong_Check(value)) { + if ((PyLong_AsLongLong(value) == -1) && PyErr_Occurred()) { + return PyErr_SetString(PyExc_OverflowError, + "MongoDB can only handle up to 8-byte ints"); + } } -} -static void -_set_cannot_encode(PyObject* value) { PyObject* type = NULL; PyObject* InvalidDocument = _error("InvalidDocument"); if (InvalidDocument == NULL) { @@ -609,40 +762,8 @@ _set_cannot_encode(PyObject* value) { if (type == NULL) { goto error; } -#if PY_MAJOR_VERSION >= 3 PyErr_Format(InvalidDocument, "cannot encode object: %R, of type: %R", value, type); -#else - else { - PyObject* value_repr = NULL; - PyObject* type_repr = NULL; - char* value_str = NULL; - char* type_str = NULL; - - value_repr = PyObject_Repr(value); - if (value_repr == NULL) { - goto py2error; - } - value_str = PyString_AsString(value_repr); - if (value_str == NULL) { - goto py2error; - } - type_repr = PyObject_Repr(type); - if (type_repr == NULL) { - goto py2error; - } - type_str = PyString_AsString(type_repr); - if (type_str == NULL) { - goto py2error; - } - - PyErr_Format(InvalidDocument, "cannot encode object: %s, of type: %s", - value_str, type_str); -py2error: - Py_XDECREF(type_repr); - Py_XDECREF(value_repr); - } -#endif error: Py_XDECREF(type); Py_XDECREF(InvalidDocument); @@ -654,36 +775,32 @@ _set_cannot_encode(PyObject* value) { * Sets exception and returns 0 on failure. */ static int _write_regex_to_buffer( - buffer_t buffer, int type_byte, PyObject* value) { + buffer_t buffer, int type_byte, PyObject* value, PyObject* _flags_str, PyObject* _pattern_str) { PyObject* py_flags; PyObject* py_pattern; PyObject* encoded_pattern; + PyObject* decoded_pattern; long int_flags; char flags[FLAGS_SIZE]; char check_utf8 = 0; const char* pattern_data; int pattern_length, flags_length; - result_t status; /* * Both the builtin re type and our Regex class have attributes * "flags" and "pattern". */ - py_flags = PyObject_GetAttrString(value, "flags"); + py_flags = PyObject_GetAttr(value, _flags_str); if (!py_flags) { return 0; } -#if PY_MAJOR_VERSION >= 3 int_flags = PyLong_AsLong(py_flags); -#else - int_flags = PyInt_AsLong(py_flags); -#endif Py_DECREF(py_flags); if (int_flags == -1 && PyErr_Occurred()) { return 0; } - py_pattern = PyObject_GetAttrString(value, "pattern"); + py_pattern = PyObject_GetAttr(value, _pattern_str); if (!py_pattern) { return 0; } @@ -699,7 +816,6 @@ static int _write_regex_to_buffer( check_utf8 = 1; } -#if PY_MAJOR_VERSION >= 3 if (!(pattern_data = PyBytes_AsString(encoded_pattern))) { Py_DECREF(encoded_pattern); return 0; @@ -708,28 +824,8 @@ static int _write_regex_to_buffer( Py_DECREF(encoded_pattern); return 0; } -#else - if (!(pattern_data = PyString_AsString(encoded_pattern))) { - Py_DECREF(encoded_pattern); - return 0; - } - if ((pattern_length = _downcast_and_check(PyString_Size(encoded_pattern), 0)) == -1) { - Py_DECREF(encoded_pattern); - return 0; - } -#endif - status = check_string((const unsigned char*)pattern_data, - pattern_length, check_utf8, 1); - if (status == NOT_UTF_8) { - PyObject* InvalidStringData = _error("InvalidStringData"); - if (InvalidStringData) { - PyErr_SetString(InvalidStringData, - "regex patterns must be valid UTF-8"); - Py_DECREF(InvalidStringData); - } - Py_DECREF(encoded_pattern); - return 0; - } else if (status == HAS_NULL) { + + if (strlen(pattern_data) != (size_t) pattern_length){ PyObject* InvalidDocument = _error("InvalidDocument"); if (InvalidDocument) { PyErr_SetString(InvalidDocument, @@ -740,6 +836,22 @@ static int _write_regex_to_buffer( return 0; } + if (check_utf8) { + decoded_pattern = PyUnicode_DecodeUTF8(pattern_data, (Py_ssize_t) pattern_length, NULL); + if (decoded_pattern == NULL) { + PyErr_Clear(); + PyObject* InvalidStringData = _error("InvalidStringData"); + if (InvalidStringData) { + PyErr_SetString(InvalidStringData, + "regex patterns must be valid UTF-8"); + Py_DECREF(InvalidStringData); + } + Py_DECREF(encoded_pattern); + return 0; + } + Py_DECREF(decoded_pattern); + } + if (!buffer_write_bytes(buffer, pattern_data, pattern_length + 1)) { Py_DECREF(encoded_pattern); return 0; @@ -770,7 +882,7 @@ static int _write_regex_to_buffer( if (!buffer_write_bytes(buffer, flags, flags_length)) { return 0; } - *(buffer_get_buffer(buffer) + type_byte) = 0x0B; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x0B; return 1; } @@ -789,12 +901,13 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, PyObject* new_value = NULL; int retval; PyObject* uuid_type; + int is_list; /* * Don't use PyObject_IsInstance for our custom types. It causes * problems with python sub interpreters. Our custom types should * have a _type_marker attribute, which we can switch on instead. */ - long type = _type_marker(value); + long type = _type_marker(value, state->_type_marker_str); if (type < 0) { return 0; } @@ -808,25 +921,17 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, const char* data; int size; - *(buffer_get_buffer(buffer) + type_byte) = 0x05; - subtype_object = PyObject_GetAttrString(value, "subtype"); + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x05; + subtype_object = PyObject_GetAttr(value, state->_subtype_str); if (!subtype_object) { return 0; } -#if PY_MAJOR_VERSION >= 3 subtype = (char)PyLong_AsLong(subtype_object); -#else - subtype = (char)PyInt_AsLong(subtype_object); -#endif if (subtype == -1) { Py_DECREF(subtype_object); return 0; } -#if PY_MAJOR_VERSION >= 3 size = _downcast_and_check(PyBytes_Size(value), 0); -#else - size = _downcast_and_check(PyString_Size(value), 0); -#endif if (size == -1) { Py_DECREF(subtype_object); return 0; @@ -834,11 +939,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, Py_DECREF(subtype_object); if (subtype == 2) { -#if PY_MAJOR_VERSION >= 3 int other_size = _downcast_and_check(PyBytes_Size(value), 4); -#else - int other_size = _downcast_and_check(PyString_Size(value), 4); -#endif if (other_size == -1) return 0; if (!buffer_write_int32(buffer, other_size)) { @@ -856,11 +957,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, return 0; } } -#if PY_MAJOR_VERSION >= 3 data = PyBytes_AsString(value); -#else - data = PyString_AsString(value); -#endif if (!data) { return 0; } @@ -873,15 +970,11 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, { /* ObjectId */ const char* data; - PyObject* pystring = PyObject_GetAttrString(value, "binary"); + PyObject* pystring = PyObject_GetAttr(value, state->_binary_str); if (!pystring) { return 0; } -#if PY_MAJOR_VERSION >= 3 data = PyBytes_AsString(pystring); -#else - data = PyString_AsString(pystring); -#endif if (!data) { Py_DECREF(pystring); return 0; @@ -891,13 +984,13 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, return 0; } Py_DECREF(pystring); - *(buffer_get_buffer(buffer) + type_byte) = 0x07; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x07; return 1; } case 11: { /* Regex */ - return _write_regex_to_buffer(buffer, type_byte, value); + return _write_regex_to_buffer(buffer, type_byte, value, state->_flags_str, state->_pattern_str); } case 13: { @@ -906,24 +999,23 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, length_location, length; - PyObject* scope = PyObject_GetAttrString(value, "scope"); + PyObject* scope = PyObject_GetAttr(value, state->_scope_str); if (!scope) { return 0; } if (scope == Py_None) { Py_DECREF(scope); - *(buffer_get_buffer(buffer) + type_byte) = 0x0D; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x0D; return write_string(buffer, value); } - *(buffer_get_buffer(buffer) + type_byte) = 0x0F; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x0F; - start_position = buffer_get_position(buffer); + start_position = pymongo_buffer_get_position(buffer); /* save space for length */ - length_location = buffer_save_space(buffer, 4); + length_location = pymongo_buffer_save_space(buffer, 4); if (length_location == -1) { - PyErr_NoMemory(); Py_DECREF(scope); return 0; } @@ -939,7 +1031,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, } Py_DECREF(scope); - length = buffer_get_position(buffer) - start_position; + length = pymongo_buffer_get_position(buffer) - start_position; buffer_write_int32_at_position( buffer, length_location, (int32_t)length); return 1; @@ -950,7 +1042,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, PyObject* obj; unsigned long i; - obj = PyObject_GetAttrString(value, "inc"); + obj = PyObject_GetAttr(value, state->_inc_str); if (!obj) { return 0; } @@ -963,7 +1055,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, return 0; } - obj = PyObject_GetAttrString(value, "time"); + obj = PyObject_GetAttr(value, state->_time_str); if (!obj) { return 0; } @@ -976,7 +1068,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, return 0; } - *(buffer_get_buffer(buffer) + type_byte) = 0x11; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x11; return 1; } case 18: @@ -991,22 +1083,18 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, if (!buffer_write_int64(buffer, (int64_t)ll)) { return 0; } - *(buffer_get_buffer(buffer) + type_byte) = 0x12; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x12; return 1; } case 19: { /* Decimal128 */ const char* data; - PyObject* pystring = PyObject_GetAttrString(value, "bid"); + PyObject* pystring = PyObject_GetAttr(value, state->_bid_str); if (!pystring) { return 0; } -#if PY_MAJOR_VERSION >= 3 data = PyBytes_AsString(pystring); -#else - data = PyString_AsString(pystring); -#endif if (!data) { Py_DECREF(pystring); return 0; @@ -1016,13 +1104,13 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, return 0; } Py_DECREF(pystring); - *(buffer_get_buffer(buffer) + type_byte) = 0x13; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x13; return 1; } case 100: { /* DBRef */ - PyObject* as_doc = PyObject_CallMethod(value, "as_doc", NULL); + PyObject* as_doc = PyObject_CallMethodObjArgs(value, state->_as_doc_str, NULL); if (!as_doc) { return 0; } @@ -1031,101 +1119,82 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, return 0; } Py_DECREF(as_doc); - *(buffer_get_buffer(buffer) + type_byte) = 0x03; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x03; return 1; } case 101: { /* RawBSONDocument */ - if (!write_raw_doc(buffer, value)) { + if (!write_raw_doc(buffer, value, state->_raw_str)) { return 0; } - *(buffer_get_buffer(buffer) + type_byte) = 0x03; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x03; return 1; } case 255: { /* MinKey */ - *(buffer_get_buffer(buffer) + type_byte) = 0xFF; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0xFF; return 1; } case 127: { /* MaxKey */ - *(buffer_get_buffer(buffer) + type_byte) = 0x7F; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x7F; return 1; } } - /* No _type_marker attibute or not one of our types. */ + /* No _type_marker attribute or not one of our types. */ if (PyBool_Check(value)) { const char c = (value == Py_True) ? 0x01 : 0x00; - *(buffer_get_buffer(buffer) + type_byte) = 0x08; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x08; return buffer_write_bytes(buffer, &c, 1); } -#if PY_MAJOR_VERSION >= 3 else if (PyLong_Check(value)) { - const long long_value = PyLong_AsLong(value); -#else - else if (PyInt_Check(value)) { - const long long_value = PyInt_AsLong(value); -#endif - - const int int_value = (int)long_value; - if (PyErr_Occurred() || long_value != int_value) { /* Overflow */ - long long long_long_value; + const long long long_long_value = PyLong_AsLongLong(value); + if (long_long_value == -1 && PyErr_Occurred()) { + /* Ignore error and give the fallback_encoder a chance. */ PyErr_Clear(); - long_long_value = PyLong_AsLongLong(value); - if (PyErr_Occurred()) { /* Overflow AGAIN */ - PyErr_SetString(PyExc_OverflowError, - "MongoDB can only handle up to 8-byte ints"); - return 0; - } - *(buffer_get_buffer(buffer) + type_byte) = 0x12; + } else if (-2147483648LL <= long_long_value && long_long_value <= 2147483647LL) { + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x10; + return buffer_write_int32(buffer, (int32_t)long_long_value); + } else { + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x12; return buffer_write_int64(buffer, (int64_t)long_long_value); } - *(buffer_get_buffer(buffer) + type_byte) = 0x10; - return buffer_write_int32(buffer, (int32_t)int_value); -#if PY_MAJOR_VERSION < 3 - } else if (PyLong_Check(value)) { - const long long long_long_value = PyLong_AsLongLong(value); - if (PyErr_Occurred()) { /* Overflow */ - PyErr_SetString(PyExc_OverflowError, - "MongoDB can only handle up to 8-byte ints"); - return 0; - } - *(buffer_get_buffer(buffer) + type_byte) = 0x12; - return buffer_write_int64(buffer, (int64_t)long_long_value); -#endif } else if (PyFloat_Check(value)) { const double d = PyFloat_AsDouble(value); - *(buffer_get_buffer(buffer) + type_byte) = 0x01; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x01; return buffer_write_double(buffer, d); } else if (value == Py_None) { - *(buffer_get_buffer(buffer) + type_byte) = 0x0A; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x0A; return 1; } else if (PyDict_Check(value)) { - *(buffer_get_buffer(buffer) + type_byte) = 0x03; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x03; return write_dict(self, buffer, value, check_keys, options, 0); - } else if (PyList_Check(value) || PyTuple_Check(value)) { + } else if ((is_list = PyList_Check(value)) || PyTuple_Check(value)) { Py_ssize_t items, i; int start_position, length_location, length; char zero = 0; - *(buffer_get_buffer(buffer) + type_byte) = 0x04; - start_position = buffer_get_position(buffer); + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x04; + start_position = pymongo_buffer_get_position(buffer); /* save space for length */ - length_location = buffer_save_space(buffer, 4); + length_location = pymongo_buffer_save_space(buffer, 4); if (length_location == -1) { - PyErr_NoMemory(); return 0; } - - if ((items = PySequence_Size(value)) > BSON_MAX_SIZE) { + if (is_list) { + items = PyList_Size(value); + } else { + items = PyTuple_Size(value); + } + if (items > BSON_MAX_SIZE) { PyObject* BSONError = _error("BSONError"); if (BSONError) { PyErr_SetString(BSONError, @@ -1135,39 +1204,43 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, return 0; } for(i = 0; i < items; i++) { - int list_type_byte = buffer_save_space(buffer, 1); - char name[16]; + int list_type_byte = pymongo_buffer_save_space(buffer, 1); + char name[BUF_SIZE]; PyObject* item_value; if (list_type_byte == -1) { - PyErr_NoMemory(); return 0; } - INT2STRING(name, (int)i); + int res = LL2STR(name, (long long)i); + if (res == -1) { + return 0; + } if (!buffer_write_bytes(buffer, name, (int)strlen(name) + 1)) { return 0; } - - if (!(item_value = PySequence_GetItem(value, i))) + if (is_list) { + item_value = PyList_GET_ITEM(value, i); + } else { + item_value = PyTuple_GET_ITEM(value, i); + } + if (!item_value) { return 0; + } if (!write_element_to_buffer(self, buffer, list_type_byte, item_value, check_keys, options, 0, 0)) { - Py_DECREF(item_value); return 0; } - Py_DECREF(item_value); } /* write null byte and fill in length */ if (!buffer_write_bytes(buffer, &zero, 1)) { return 0; } - length = buffer_get_position(buffer) - start_position; + length = pymongo_buffer_get_position(buffer) - start_position; buffer_write_int32_at_position( buffer, length_location, (int32_t)length); return 1; -#if PY_MAJOR_VERSION >= 3 /* Python3 special case. Store bytes as BSON binary subtype 0. */ } else if (PyBytes_Check(value)) { char subtype = 0; @@ -1177,7 +1250,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, return 0; if ((size = _downcast_and_check(PyBytes_GET_SIZE(value), 0)) == -1) return 0; - *(buffer_get_buffer(buffer) + type_byte) = 0x05; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x05; if (!buffer_write_int32(buffer, (int32_t)size)) { return 0; } @@ -1188,58 +1261,12 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, return 0; } return 1; -#else - /* PyString_Check only works in Python 2.x. */ - } else if (PyString_Check(value)) { - result_t status; - const char* data; - int size; - if (!(data = PyString_AS_STRING(value))) - return 0; - if ((size = _downcast_and_check(PyString_GET_SIZE(value), 1)) == -1) - return 0; - *(buffer_get_buffer(buffer) + type_byte) = 0x02; - status = check_string((const unsigned char*)data, size - 1, 1, 0); - - if (status == NOT_UTF_8) { - PyObject* InvalidStringData = _error("InvalidStringData"); - if (InvalidStringData) { - PyObject* repr = PyObject_Repr(value); - char* repr_as_cstr = repr ? PyString_AsString(repr) : NULL; - if (repr_as_cstr) { - PyObject *message = PyString_FromFormat( - "strings in documents must be valid UTF-8: %s", - repr_as_cstr); - - if (message) { - PyErr_SetObject(InvalidStringData, message); - Py_DECREF(message); - } - } else { - /* repr(value) failed, use a generic message. */ - PyErr_SetString( - InvalidStringData, - "strings in documents must be valid UTF-8"); - } - Py_XDECREF(repr); - Py_DECREF(InvalidStringData); - } - return 0; - } - if (!buffer_write_int32(buffer, (int32_t)size)) { - return 0; - } - if (!buffer_write_bytes(buffer, data, size)) { - return 0; - } - return 1; -#endif } else if (PyUnicode_Check(value)) { - *(buffer_get_buffer(buffer) + type_byte) = 0x02; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x02; return write_unicode(buffer, value); } else if (PyDateTime_Check(value)) { long long millis; - PyObject* utcoffset = PyObject_CallMethod(value, "utcoffset", NULL); + PyObject* utcoffset = PyObject_CallMethodObjArgs(value, state->_utcoffset_str , NULL); if (utcoffset == NULL) return 0; if (utcoffset != Py_None) { @@ -1253,41 +1280,40 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, } else { millis = millis_from_datetime(value); } - *(buffer_get_buffer(buffer) + type_byte) = 0x09; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x09; + return buffer_write_int64(buffer, (int64_t)millis); + } else if (PyObject_TypeCheck(value, (PyTypeObject *) state->DatetimeMS)) { + long long millis; + if (!millis_from_datetime_ms(value, &millis)) { + return 0; + } + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x09; return buffer_write_int64(buffer, (int64_t)millis); } else if (PyObject_TypeCheck(value, state->REType)) { - return _write_regex_to_buffer(buffer, type_byte, value); + return _write_regex_to_buffer(buffer, type_byte, value, state->_flags_str, state->_pattern_str); } /* * Try Mapping and UUID last since we have to import * them if we're in a sub-interpreter. */ -#if PY_MAJOR_VERSION >= 3 mapping_type = _get_object(state->Mapping, "collections.abc", "Mapping"); -#else - mapping_type = _get_object(state->Mapping, "collections", "Mapping"); -#endif if (mapping_type && PyObject_IsInstance(value, mapping_type)) { Py_DECREF(mapping_type); /* PyObject_IsInstance returns -1 on error */ if (PyErr_Occurred()) { return 0; } - *(buffer_get_buffer(buffer) + type_byte) = 0x03; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x03; return write_dict(self, buffer, value, check_keys, options, 0); } uuid_type = _get_object(state->UUID, "uuid", "UUID"); if (uuid_type && PyObject_IsInstance(value, uuid_type)) { - /* Just a special case of Binary above, but - * simpler to do as a separate case. */ - PyObject* bytes; - /* Could be bytes, bytearray, str... */ - const char* data; - /* UUID is always 16 bytes */ - int size = 16; - char subtype; + PyObject* binary_type = NULL; + PyObject* binary_value = NULL; + PyObject *uuid_rep_obj = NULL; + int result; Py_DECREF(uuid_type); /* PyObject_IsInstance returns -1 on error */ @@ -1295,58 +1321,30 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, return 0; } - if (options->uuid_rep == JAVA_LEGACY - || options->uuid_rep == CSHARP_LEGACY) { - subtype = 3; - } - else { - subtype = options->uuid_rep; - } - - *(buffer_get_buffer(buffer) + type_byte) = 0x05; - if (!buffer_write_int32(buffer, (int32_t)size)) { - return 0; - } - if (!buffer_write_bytes(buffer, &subtype, 1)) { + binary_type = _get_object(state->Binary, "bson", "Binary"); + if (binary_type == NULL) { return 0; } - if (options->uuid_rep == CSHARP_LEGACY) { - /* Legacy C# byte order */ - bytes = PyObject_GetAttrString(value, "bytes_le"); - } - else { - bytes = PyObject_GetAttrString(value, "bytes"); - } - if (!bytes) { + if (!(uuid_rep_obj = PyLong_FromLong(options->uuid_rep))) { return 0; } -#if PY_MAJOR_VERSION >= 3 - data = PyBytes_AsString(bytes); -#else - data = PyString_AsString(bytes); -#endif - if (data == NULL) { - Py_DECREF(bytes); + binary_value = PyObject_CallMethodObjArgs(binary_type, state->_from_uuid_str, value, uuid_rep_obj, NULL); + Py_DECREF(uuid_rep_obj); + + if (binary_value == NULL) { + Py_DECREF(binary_type); return 0; } - if (options->uuid_rep == JAVA_LEGACY) { - /* Store in legacy java byte order. */ - char as_legacy_java[16]; - _fix_java(data, as_legacy_java); - if (!buffer_write_bytes(buffer, as_legacy_java, size)) { - Py_DECREF(bytes); - return 0; - } - } - else { - if (!buffer_write_bytes(buffer, data, size)) { - Py_DECREF(bytes); - return 0; - } - } - Py_DECREF(bytes); - return 1; + + result = _write_element_to_buffer(self, buffer, + type_byte, binary_value, + check_keys, options, + in_custom_call, + in_fallback_call); + Py_DECREF(binary_type); + Py_DECREF(binary_value); + return result; } Py_XDECREF(mapping_type); Py_XDECREF(uuid_type); @@ -1401,13 +1399,8 @@ static int check_key_name(const char* name, int name_length) { if (name_length > 0 && name[0] == '$') { PyObject* InvalidDocument = _error("InvalidDocument"); if (InvalidDocument) { -#if PY_MAJOR_VERSION >= 3 PyObject* errmsg = PyUnicode_FromFormat( "key '%s' must not start with '$'", name); -#else - PyObject* errmsg = PyString_FromFormat( - "key '%s' must not start with '$'", name); -#endif if (errmsg) { PyErr_SetObject(InvalidDocument, errmsg); Py_DECREF(errmsg); @@ -1419,13 +1412,8 @@ static int check_key_name(const char* name, int name_length) { if (strchr(name, '.')) { PyObject* InvalidDocument = _error("InvalidDocument"); if (InvalidDocument) { -#if PY_MAJOR_VERSION >= 3 PyObject* errmsg = PyUnicode_FromFormat( "key '%s' must not contain '.'", name); -#else - PyObject* errmsg = PyString_FromFormat( - "key '%s' must not contain '.'", name); -#endif if (errmsg) { PyErr_SetObject(InvalidDocument, errmsg); Py_DECREF(errmsg); @@ -1452,9 +1440,8 @@ int write_pair(PyObject* self, buffer_t buffer, const char* name, int name_lengt return 1; } - type_byte = buffer_save_space(buffer, 1); + type_byte = pymongo_buffer_save_space(buffer, 1); if (type_byte == -1) { - PyErr_NoMemory(); return 0; } if (check_keys && !check_key_name(name, name_length)) { @@ -1483,7 +1470,6 @@ int decode_and_write_pair(PyObject* self, buffer_t buffer, if (!encoded) { return 0; } -#if PY_MAJOR_VERSION >= 3 if (!(data = PyBytes_AS_STRING(encoded))) { Py_DECREF(encoded); return 0; @@ -1492,16 +1478,6 @@ int decode_and_write_pair(PyObject* self, buffer_t buffer, Py_DECREF(encoded); return 0; } -#else - if (!(data = PyString_AS_STRING(encoded))) { - Py_DECREF(encoded); - return 0; - } - if ((size = _downcast_and_check(PyString_GET_SIZE(encoded), 1)) == -1) { - Py_DECREF(encoded); - return 0; - } -#endif if (strlen(data) != (size_t)(size - 1)) { PyObject* InvalidDocument = _error("InvalidDocument"); if (InvalidDocument) { @@ -1512,56 +1488,14 @@ int decode_and_write_pair(PyObject* self, buffer_t buffer, Py_DECREF(encoded); return 0; } -#if PY_MAJOR_VERSION < 3 - } else if (PyString_Check(key)) { - result_t status; - encoded = key; - Py_INCREF(encoded); - - if (!(data = PyString_AS_STRING(encoded))) { - Py_DECREF(encoded); - return 0; - } - if ((size = _downcast_and_check(PyString_GET_SIZE(encoded), 1)) == -1) { - Py_DECREF(encoded); - return 0; - } - status = check_string((const unsigned char*)data, size - 1, 1, 1); - - if (status == NOT_UTF_8) { - PyObject* InvalidStringData = _error("InvalidStringData"); - if (InvalidStringData) { - PyErr_SetString(InvalidStringData, - "strings in documents must be valid UTF-8"); - Py_DECREF(InvalidStringData); - } - Py_DECREF(encoded); - return 0; - } else if (status == HAS_NULL) { - PyObject* InvalidDocument = _error("InvalidDocument"); - if (InvalidDocument) { - PyErr_SetString(InvalidDocument, - "Key names must not contain the NULL byte"); - Py_DECREF(InvalidDocument); - } - Py_DECREF(encoded); - return 0; - } -#endif } else { PyObject* InvalidDocument = _error("InvalidDocument"); if (InvalidDocument) { PyObject* repr = PyObject_Repr(key); if (repr) { -#if PY_MAJOR_VERSION >= 3 PyObject* errmsg = PyUnicode_FromString( "documents must have only string keys, key was "); -#else - PyObject* errmsg = PyString_FromString( - "documents must have only string keys, key was "); -#endif if (errmsg) { -#if PY_MAJOR_VERSION >= 3 PyObject* error = PyUnicode_Concat(errmsg, repr); if (error) { PyErr_SetObject(InvalidDocument, error); @@ -1569,13 +1503,6 @@ int decode_and_write_pair(PyObject* self, buffer_t buffer, } Py_DECREF(errmsg); Py_DECREF(repr); -#else - PyString_ConcatAndDel(&errmsg, repr); - if (errmsg) { - PyErr_SetObject(InvalidDocument, errmsg); - Py_DECREF(errmsg); - } -#endif } else { Py_DECREF(repr); } @@ -1600,14 +1527,14 @@ int decode_and_write_pair(PyObject* self, buffer_t buffer, /* Write a RawBSONDocument to the buffer. * Returns the number of bytes written or 0 on failure. */ -static int write_raw_doc(buffer_t buffer, PyObject* raw) { +static int write_raw_doc(buffer_t buffer, PyObject* raw, PyObject* _raw_str) { char* bytes; Py_ssize_t len; int len_int; int bytes_written = 0; PyObject* bytes_obj = NULL; - bytes_obj = PyObject_GetAttrString(raw, "raw"); + bytes_obj = PyObject_GetAttr(raw, _raw_str); if (!bytes_obj) { goto fail; } @@ -1640,71 +1567,57 @@ int write_dict(PyObject* self, buffer_t buffer, struct module_state *state = GETSTATE(self); PyObject* mapping_type; long type_marker; + int is_dict = PyDict_Check(dict); - /* check for RawBSONDocument */ - type_marker = _type_marker(dict); - if (type_marker < 0) { - return 0; - } + if (!is_dict) { + /* check for RawBSONDocument */ + type_marker = _type_marker(dict, state->_type_marker_str); + if (type_marker < 0) { + return 0; + } - if (101 == type_marker) { - return write_raw_doc(buffer, dict); - } + if (101 == type_marker) { + return write_raw_doc(buffer, dict, state->_raw_str); + } -#if PY_MAJOR_VERSION >= 3 - mapping_type = _get_object(state->Mapping, "collections.abc", "Mapping"); -#else - mapping_type = _get_object(state->Mapping, "collections", "Mapping"); -#endif + mapping_type = _get_object(state->Mapping, "collections.abc", "Mapping"); - if (mapping_type) { - if (!PyObject_IsInstance(dict, mapping_type)) { - PyObject* repr; - Py_DECREF(mapping_type); - if ((repr = PyObject_Repr(dict))) { -#if PY_MAJOR_VERSION >= 3 - PyObject* errmsg = PyUnicode_FromString( - "encoder expected a mapping type but got: "); - if (errmsg) { - PyObject* error = PyUnicode_Concat(errmsg, repr); - if (error) { - PyErr_SetObject(PyExc_TypeError, error); - Py_DECREF(error); - } - Py_DECREF(errmsg); - Py_DECREF(repr); - } -#else - PyObject* errmsg = PyString_FromString( - "encoder expected a mapping type but got: "); - if (errmsg) { - PyString_ConcatAndDel(&errmsg, repr); + if (mapping_type) { + if (!PyObject_IsInstance(dict, mapping_type)) { + PyObject* repr; + Py_DECREF(mapping_type); + if ((repr = PyObject_Repr(dict))) { + PyObject* errmsg = PyUnicode_FromString( + "encoder expected a mapping type but got: "); if (errmsg) { - PyErr_SetObject(PyExc_TypeError, errmsg); + PyObject* error = PyUnicode_Concat(errmsg, repr); + if (error) { + PyErr_SetObject(PyExc_TypeError, error); + Py_DECREF(error); + } Py_DECREF(errmsg); + Py_DECREF(repr); } + else { + Py_DECREF(repr); + } + } else { + PyErr_SetString(PyExc_TypeError, + "encoder expected a mapping type"); } -#endif - else { - Py_DECREF(repr); - } - } else { - PyErr_SetString(PyExc_TypeError, - "encoder expected a mapping type"); - } - return 0; - } - Py_DECREF(mapping_type); - /* PyObject_IsInstance returns -1 on error */ - if (PyErr_Occurred()) { - return 0; + return 0; + } + Py_DECREF(mapping_type); + /* PyObject_IsInstance returns -1 on error */ + if (PyErr_Occurred()) { + return 0; + } } } - length_location = buffer_save_space(buffer, 4); + length_location = pymongo_buffer_save_space(buffer, 4); if (length_location == -1) { - PyErr_NoMemory(); return 0; } @@ -1712,20 +1625,20 @@ int write_dict(PyObject* self, buffer_t buffer, if (top_level) { /* * If "dict" is a defaultdict we don't want to call - * PyMapping_GetItemString on it. That would **create** + * PyObject_GetItem on it. That would **create** * an _id where one didn't previously exist (PYTHON-871). */ - if (PyDict_Check(dict)) { - /* PyDict_GetItemString returns a borrowed reference. */ - PyObject* _id = PyDict_GetItemString(dict, "_id"); + if (is_dict) { + /* PyDict_GetItem returns a borrowed reference. */ + PyObject* _id = PyDict_GetItem(dict, state->_id_str); if (_id) { if (!write_pair(self, buffer, "_id", 3, _id, check_keys, options, 1)) { return 0; } } - } else if (PyMapping_HasKeyString(dict, "_id")) { - PyObject* _id = PyMapping_GetItemString(dict, "_id"); + } else if (PyMapping_HasKey(dict, state->_id_str)) { + PyObject* _id = PyObject_GetItem(dict, state->_id_str); if (!_id) { return 0; } @@ -1734,43 +1647,54 @@ int write_dict(PyObject* self, buffer_t buffer, Py_DECREF(_id); return 0; } - /* PyMapping_GetItemString returns a new reference. */ + /* PyObject_GetItem returns a new reference. */ Py_DECREF(_id); } } - iter = PyObject_GetIter(dict); - if (iter == NULL) { - return 0; - } - while ((key = PyIter_Next(iter)) != NULL) { - PyObject* value = PyObject_GetItem(dict, key); - if (!value) { - PyErr_SetObject(PyExc_KeyError, key); - Py_DECREF(key); - Py_DECREF(iter); + if (is_dict) { + PyObject* value; + Py_ssize_t pos = 0; + while (PyDict_Next(dict, &pos, &key, &value)) { + if (!decode_and_write_pair(self, buffer, key, value, + check_keys, options, top_level)) { + return 0; + } + } + } else { + iter = PyObject_GetIter(dict); + if (iter == NULL) { return 0; } - if (!decode_and_write_pair(self, buffer, key, value, - check_keys, options, top_level)) { + while ((key = PyIter_Next(iter)) != NULL) { + PyObject* value = PyObject_GetItem(dict, key); + if (!value) { + PyErr_SetObject(PyExc_KeyError, key); + Py_DECREF(key); + Py_DECREF(iter); + return 0; + } + if (!decode_and_write_pair(self, buffer, key, value, + check_keys, options, top_level)) { + Py_DECREF(key); + Py_DECREF(value); + Py_DECREF(iter); + return 0; + } Py_DECREF(key); Py_DECREF(value); - Py_DECREF(iter); + } + Py_DECREF(iter); + if (PyErr_Occurred()) { return 0; } - Py_DECREF(key); - Py_DECREF(value); - } - Py_DECREF(iter); - if (PyErr_Occurred()) { - return 0; } /* write null byte and fill in length */ if (!buffer_write_bytes(buffer, &zero, 1)) { return 0; } - length = buffer_get_position(buffer) - length_location; + length = pymongo_buffer_get_position(buffer) - length_location; buffer_write_int32_at_position( buffer, length_location, (int32_t)length); return length; @@ -1781,56 +1705,122 @@ static PyObject* _cbson_dict_to_bson(PyObject* self, PyObject* args) { PyObject* result; unsigned char check_keys; unsigned char top_level = 1; + PyObject* options_obj; codec_options_t options; buffer_t buffer; PyObject* raw_bson_document_bytes_obj; long type_marker; + struct module_state *state = GETSTATE(self); - if (!PyArg_ParseTuple(args, "ObO&|b", &dict, &check_keys, - convert_codec_options, &options, &top_level)) { + if (!(PyArg_ParseTuple(args, "ObO|b", &dict, &check_keys, + &options_obj, &top_level) && + convert_codec_options(self, options_obj, &options))) { return NULL; } /* check for RawBSONDocument */ - type_marker = _type_marker(dict); + type_marker = _type_marker(dict, state->_type_marker_str); if (type_marker < 0) { destroy_codec_options(&options); return NULL; } else if (101 == type_marker) { destroy_codec_options(&options); - raw_bson_document_bytes_obj = PyObject_GetAttrString(dict, "raw"); + raw_bson_document_bytes_obj = PyObject_GetAttr(dict, state->_raw_str); if (NULL == raw_bson_document_bytes_obj) { return NULL; } return raw_bson_document_bytes_obj; } - buffer = buffer_new(); + buffer = pymongo_buffer_new(); if (!buffer) { destroy_codec_options(&options); - PyErr_NoMemory(); return NULL; } if (!write_dict(self, buffer, dict, check_keys, &options, top_level)) { destroy_codec_options(&options); - buffer_free(buffer); + pymongo_buffer_free(buffer); return NULL; } /* objectify buffer */ - result = Py_BuildValue(BYTES_FORMAT_STRING, buffer_get_buffer(buffer), - (Py_ssize_t)buffer_get_position(buffer)); + result = Py_BuildValue("y#", pymongo_buffer_get_buffer(buffer), + (Py_ssize_t)pymongo_buffer_get_position(buffer)); destroy_codec_options(&options); - buffer_free(buffer); + pymongo_buffer_free(buffer); return result; } +/* + * Hook for optional decoding BSON documents to DBRef. + */ +static PyObject *_dbref_hook(PyObject* self, PyObject* value) { + struct module_state *state = GETSTATE(self); + PyObject* dbref = NULL; + PyObject* dbref_type = NULL; + PyObject* ref = NULL; + PyObject* id = NULL; + PyObject* database = NULL; + PyObject* ret = NULL; + int db_present = 0; + + /* Decoding for DBRefs */ + if (PyMapping_HasKey(value, state->_dollar_ref_str) && PyMapping_HasKey(value, state->_dollar_id_str)) { /* DBRef */ + ref = PyObject_GetItem(value, state->_dollar_ref_str); + /* PyObject_GetItem returns NULL to indicate error. */ + if (!ref) { + goto invalid; + } + id = PyObject_GetItem(value, state->_dollar_id_str); + /* PyObject_GetItem returns NULL to indicate error. */ + if (!id) { + goto invalid; + } + + if (PyMapping_HasKey(value, state->_dollar_db_str)) { + database = PyObject_GetItem(value, state->_dollar_db_str); + if (!database) { + goto invalid; + } + db_present = 1; + } else { + database = Py_None; + Py_INCREF(database); + } + + // check types + if (!(PyUnicode_Check(ref) && (database == Py_None || PyUnicode_Check(database)))) { + ret = value; + goto invalid; + } + + PyMapping_DelItem(value, state->_dollar_ref_str); + PyMapping_DelItem(value, state->_dollar_id_str); + if (db_present) { + PyMapping_DelItem(value, state->_dollar_db_str); + } + + if ((dbref_type = _get_object(state->DBRef, "bson.dbref", "DBRef"))) { + dbref = PyObject_CallFunctionObjArgs(dbref_type, ref, id, database, value, NULL); + Py_DECREF(value); + ret = dbref; + } + } else { + ret = value; + } +invalid: + Py_XDECREF(dbref_type); + Py_XDECREF(ref); + Py_XDECREF(id); + Py_XDECREF(database); + return ret; +} + static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, unsigned* position, unsigned char type, - unsigned max, const codec_options_t* options) { + unsigned max, const codec_options_t* options, int raw_array) { struct module_state *state = GETSTATE(self); - PyObject* value = NULL; switch (type) { case 1: @@ -1873,7 +1863,6 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, } case 3: { - PyObject* collection; uint32_t size; if (max < 4) { @@ -1891,7 +1880,7 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, if (options->is_raw_bson) { value = PyObject_CallFunction( - options->document_class, BYTES_FORMAT_STRING "O", + options->document_class, "y#O", buffer + *position, (Py_ssize_t)size, options->options_obj); if (!value) { goto invalid; @@ -1906,55 +1895,10 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, goto invalid; } - /* Decoding for DBRefs */ - if (PyMapping_HasKeyString(value, "$ref")) { /* DBRef */ - PyObject* dbref = NULL; - PyObject* dbref_type; - PyObject* id; - PyObject* database; - - collection = PyMapping_GetItemString(value, "$ref"); - /* PyMapping_GetItemString returns NULL to indicate error. */ - if (!collection) { - goto invalid; - } - PyMapping_DelItemString(value, "$ref"); - - if (PyMapping_HasKeyString(value, "$id")) { - id = PyMapping_GetItemString(value, "$id"); - if (!id) { - Py_DECREF(collection); - goto invalid; - } - PyMapping_DelItemString(value, "$id"); - } else { - id = Py_None; - Py_INCREF(id); - } - - if (PyMapping_HasKeyString(value, "$db")) { - database = PyMapping_GetItemString(value, "$db"); - if (!database) { - Py_DECREF(collection); - Py_DECREF(id); - goto invalid; - } - PyMapping_DelItemString(value, "$db"); - } else { - database = Py_None; - Py_INCREF(database); - } - - if ((dbref_type = _get_object(state->DBRef, "bson.dbref", "DBRef"))) { - dbref = PyObject_CallFunctionObjArgs(dbref_type, collection, id, database, value, NULL); - Py_DECREF(dbref_type); - } - Py_DECREF(value); - value = dbref; - - Py_DECREF(id); - Py_DECREF(collection); - Py_DECREF(database); + /* Hook for DBRefs */ + value = _dbref_hook(self, value); + if (!value) { + goto invalid; } *position += size; @@ -1972,11 +1916,20 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, if (size < BSON_MIN_SIZE || max < size) { goto invalid; } + end = *position + size - 1; /* Check for bad eoo */ if (buffer[end]) { goto invalid; } + + if (raw_array != 0) { + // Treat it as a binary buffer. + value = PyBytes_FromStringAndSize(buffer + *position, size); + *position += size; + break; + } + *position += 4; value = PyList_New(0); @@ -2000,7 +1953,7 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, goto invalid; } to_append = get_value(self, name, buffer, position, bson_type, - max - (unsigned)key_size, options); + max - (unsigned)key_size, options, raw_array); Py_LeaveRecursiveCall(); if (!to_append) { Py_DECREF(value); @@ -2048,7 +2001,6 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, goto invalid; } } -#if PY_MAJOR_VERSION >= 3 /* Python3 special case. Decode BSON binary subtype 0 to bytes. */ if (subtype == 0) { value = PyBytes_FromStringAndSize(buffer + *position, length); @@ -2060,87 +2012,56 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, } else { data = PyBytes_FromStringAndSize(buffer + *position, length); } -#else - if (subtype == 2) { - data = PyString_FromStringAndSize(buffer + *position + 4, length - 4); - } else { - data = PyString_FromStringAndSize(buffer + *position, length); - } -#endif if (!data) { goto invalid; } - /* Encode as UUID, not Binary */ + /* Encode as UUID or Binary based on options->uuid_rep */ if (subtype == 3 || subtype == 4) { - PyObject* kwargs; - PyObject* args = PyTuple_New(0); + PyObject* binary_type = NULL; + PyObject* binary_value = NULL; + char uuid_rep = options->uuid_rep; + /* UUID should always be 16 bytes */ - if (!args || length != 16) { - Py_DECREF(data); - goto invalid; - } - kwargs = PyDict_New(); - if (!kwargs) { - Py_DECREF(data); - Py_DECREF(args); - goto invalid; + if (length != 16) { + goto uuiderror; } - /* - * From this point, we hold refs to args, kwargs, and data. - * If anything fails, goto uuiderror to clean them up. - */ - if (subtype == 3 && options->uuid_rep == CSHARP_LEGACY) { - /* Legacy C# byte order */ - if ((PyDict_SetItemString(kwargs, "bytes_le", data)) == -1) - goto uuiderror; + binary_type = _get_object(state->Binary, "bson", "Binary"); + if (binary_type == NULL) { + goto uuiderror; } - else { - if (subtype == 3 && options->uuid_rep == JAVA_LEGACY) { - /* Convert from legacy java byte order */ - char big_endian[16]; - _fix_java(buffer + *position, big_endian); - /* Free the previously created PyString object */ - Py_DECREF(data); -#if PY_MAJOR_VERSION >= 3 - data = PyBytes_FromStringAndSize(big_endian, length); -#else - data = PyString_FromStringAndSize(big_endian, length); -#endif - if (data == NULL) - goto uuiderror; - } - if ((PyDict_SetItemString(kwargs, "bytes", data)) == -1) - goto uuiderror; + binary_value = PyObject_CallFunction(binary_type, "(Oi)", data, subtype); + if (binary_value == NULL) { + goto uuiderror; } - if ((type_to_create = _get_object(state->UUID, "uuid", "UUID"))) { - value = PyObject_Call(type_to_create, args, kwargs); - Py_DECREF(type_to_create); + + if ((uuid_rep == UNSPECIFIED) || + (subtype == 4 && uuid_rep != STANDARD) || + (subtype == 3 && uuid_rep == STANDARD)) { + value = binary_value; + Py_INCREF(value); + } else { + PyObject *uuid_rep_obj = PyLong_FromLong(uuid_rep); + if (!uuid_rep_obj) { + goto uuiderror; + } + value = PyObject_CallMethodObjArgs(binary_value, state->_as_uuid_str, uuid_rep_obj, NULL); + Py_DECREF(uuid_rep_obj); } - Py_DECREF(args); - Py_DECREF(kwargs); + uuiderror: + Py_XDECREF(binary_type); + Py_XDECREF(binary_value); Py_DECREF(data); if (!value) { goto invalid; } - *position += length; break; - - uuiderror: - Py_DECREF(args); - Py_DECREF(kwargs); - Py_XDECREF(data); - goto invalid; } -#if PY_MAJOR_VERSION >= 3 st = PyLong_FromLong(subtype); -#else - st = PyInt_FromLong(subtype); -#endif if (!st) { Py_DECREF(data); goto invalid; @@ -2171,7 +2092,7 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, goto invalid; } if ((objectid_type = _get_object(state->ObjectId, "bson.objectid", "ObjectId"))) { - value = PyObject_CallFunction(objectid_type, BYTES_FORMAT_STRING, + value = PyObject_CallFunction(objectid_type, "y#", buffer + *position, (Py_ssize_t)12); Py_DECREF(objectid_type); } @@ -2210,8 +2131,79 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, } memcpy(&millis, buffer + *position, 8); millis = (int64_t)BSON_UINT64_FROM_LE(millis); - naive = datetime_from_millis(millis); *position += 8; + + if (options->datetime_conversion == DATETIME_MS){ + value = datetime_ms_from_millis(self, millis); + break; + } + + int dt_clamp = options->datetime_conversion == DATETIME_CLAMP; + int dt_auto = options->datetime_conversion == DATETIME_AUTO; + + + if (dt_clamp || dt_auto){ + PyObject *min_millis_fn = _get_object(state->_min_datetime_ms, "bson.datetime_ms", "_min_datetime_ms"); + PyObject *max_millis_fn = _get_object(state->_max_datetime_ms, "bson.datetime_ms", "_max_datetime_ms"); + PyObject *min_millis_fn_res; + PyObject *max_millis_fn_res; + int64_t min_millis; + int64_t max_millis; + + if (min_millis_fn == NULL || max_millis_fn == NULL) { + Py_XDECREF(min_millis_fn); + Py_XDECREF(max_millis_fn); + goto invalid; + } + + if (options->tz_aware){ + PyObject* tzinfo = options->tzinfo; + if (tzinfo == Py_None) { + // Default to UTC. + utc_type = _get_object(state->UTC, "bson.tz_util", "utc"); + tzinfo = utc_type; + } + min_millis_fn_res = PyObject_CallFunctionObjArgs(min_millis_fn, tzinfo, NULL); + max_millis_fn_res = PyObject_CallFunctionObjArgs(max_millis_fn, tzinfo, NULL); + } else { + min_millis_fn_res = PyObject_CallObject(min_millis_fn, NULL); + max_millis_fn_res = PyObject_CallObject(max_millis_fn, NULL); + } + + Py_DECREF(min_millis_fn); + Py_DECREF(max_millis_fn); + + if (!min_millis_fn_res || !max_millis_fn_res){ + Py_XDECREF(min_millis_fn_res); + Py_XDECREF(max_millis_fn_res); + goto invalid; + } + + min_millis = PyLong_AsLongLong(min_millis_fn_res); + max_millis = PyLong_AsLongLong(max_millis_fn_res); + + if ((min_millis == -1 || max_millis == -1) && PyErr_Occurred()) + { + // min/max_millis check + goto invalid; + } + + if (dt_clamp) { + if (millis < min_millis) { + millis = min_millis; + } else if (millis > max_millis) { + millis = max_millis; + } + // Continues from here to return a datetime. + } else { // dt_auto + if (millis < min_millis || millis > max_millis){ + value = datetime_ms_from_millis(self, millis); + break; // Out-of-range so done. + } + } + } + + naive = datetime_from_millis(millis); if (!options->tz_aware) { /* In the naive case, we're done here. */ value = naive; break; @@ -2220,7 +2212,7 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, if (!naive) { goto invalid; } - replace = PyObject_GetAttrString(naive, "replace"); + replace = PyObject_GetAttr(naive, state->_replace_str); Py_DECREF(naive); if (!replace) { goto invalid; @@ -2237,7 +2229,7 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, goto invalid; } utc_type = _get_object(state->UTC, "bson.tz_util", "utc"); - if (!utc_type || PyDict_SetItemString(kwargs, "tzinfo", utc_type) == -1) { + if (!utc_type || PyDict_SetItem(kwargs, state->_tzinfo_str, utc_type) == -1) { Py_DECREF(replace); Py_DECREF(args); Py_DECREF(kwargs); @@ -2255,7 +2247,7 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, /* convert to local time */ if (options->tzinfo != Py_None) { - astimezone = PyObject_GetAttrString(value, "astimezone"); + astimezone = PyObject_GetAttr(value, state->_astimezone_str); Py_DECREF(value); if (!astimezone) { Py_DECREF(replace); @@ -2358,7 +2350,7 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, *position += coll_length; if ((objectid_type = _get_object(state->ObjectId, "bson.objectid", "ObjectId"))) { - id = PyObject_CallFunction(objectid_type, BYTES_FORMAT_STRING, + id = PyObject_CallFunction(objectid_type, "y#", buffer + *position, (Py_ssize_t)12); Py_DECREF(objectid_type); } @@ -2413,6 +2405,7 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, uint32_t c_w_s_size; uint32_t code_size; uint32_t scope_size; + uint32_t len; PyObject* code; PyObject* scope; PyObject* code_type; @@ -2432,7 +2425,8 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, memcpy(&code_size, buffer + *position, 4); code_size = BSON_UINT32_FROM_LE(code_size); /* code_w_scope length + code length + code + scope length */ - if (!code_size || max < code_size || max < 4 + 4 + code_size + 4) { + len = 4 + 4 + code_size + 4; + if (!code_size || max < code_size || max < len || len < code_size) { goto invalid; } *position += 4; @@ -2450,12 +2444,9 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, memcpy(&scope_size, buffer + *position, 4); scope_size = BSON_UINT32_FROM_LE(scope_size); - if (scope_size < BSON_MIN_SIZE) { - Py_DECREF(code); - goto invalid; - } /* code length + code + scope length + scope */ - if ((4 + code_size + 4 + scope_size) != c_w_s_size) { + len = 4 + 4 + code_size + scope_size; + if (scope_size < BSON_MIN_SIZE || len != c_w_s_size || len < scope_size) { Py_DECREF(code); goto invalid; } @@ -2488,11 +2479,7 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, } memcpy(&i, buffer + *position, 4); i = (int32_t)BSON_UINT32_FROM_LE(i); -#if PY_MAJOR_VERSION >= 3 value = PyLong_FromLong(i); -#else - value = PyInt_FromLong(i); -#endif if (!value) { goto invalid; } @@ -2544,12 +2531,14 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, if ((dec128 = _get_object(state->Decimal128, "bson.decimal128", "Decimal128"))) { - value = PyObject_CallMethod(dec128, - "from_bid", - BYTES_FORMAT_STRING, - buffer + *position, - (Py_ssize_t)16); + PyObject *_bytes_obj = PyBytes_FromStringAndSize(buffer + *position, (Py_ssize_t)16); + if (!_bytes_obj) { + Py_DECREF(dec128); + goto invalid; + } + value = PyObject_CallMethodObjArgs(dec128, state->_from_bid_str, _bytes_obj, NULL); Py_DECREF(dec128); + Py_DECREF(_bytes_obj); } *position += 16; break; @@ -2694,13 +2683,14 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, static int _element_to_dict(PyObject* self, const char* string, unsigned position, unsigned max, const codec_options_t* options, + int raw_array, PyObject** name, PyObject** value) { unsigned char type = (unsigned char)string[position++]; size_t name_length = strlen(string + position); if (name_length > BSON_MAX_SIZE || position + name_length >= max) { PyObject* InvalidBSON = _error("InvalidBSON"); if (InvalidBSON) { - PyErr_SetNone(InvalidBSON); + PyErr_SetString(InvalidBSON, "field name too large"); Py_DECREF(InvalidBSON); } return -1; @@ -2734,7 +2724,7 @@ static int _element_to_dict(PyObject* self, const char* string, } position += (unsigned)name_length + 1; *value = get_value(self, *name, string, &position, type, - max - position, options); + max - position, options, raw_array); if (!*value) { Py_DECREF(*name); return -1; @@ -2746,41 +2736,29 @@ static PyObject* _cbson_element_to_dict(PyObject* self, PyObject* args) { /* TODO: Support buffer protocol */ char* string; PyObject* bson; + PyObject* options_obj; codec_options_t options; unsigned position; unsigned max; int new_position; + int raw_array = 0; PyObject* name; PyObject* value; PyObject* result_tuple; - if (!PyArg_ParseTuple(args, "OII|O&", &bson, &position, &max, - convert_codec_options, &options)) { + if (!(PyArg_ParseTuple(args, "OIIOp", &bson, &position, &max, + &options_obj, &raw_array) && + convert_codec_options(self, options_obj, &options))) { return NULL; } - if (PyTuple_GET_SIZE(args) < 4) { - if (!default_codec_options(GETSTATE(self), &options)) { - return NULL; - } - } -#if PY_MAJOR_VERSION >= 3 if (!PyBytes_Check(bson)) { PyErr_SetString(PyExc_TypeError, "argument to _element_to_dict must be a bytes object"); -#else - if (!PyString_Check(bson)) { - PyErr_SetString(PyExc_TypeError, "argument to _element_to_dict must be a string"); -#endif return NULL; } -#if PY_MAJOR_VERSION >= 3 string = PyBytes_AS_STRING(bson); -#else - string = PyString_AS_STRING(bson); -#endif - new_position = _element_to_dict(self, string, position, max, &options, - &name, &value); + new_position = _element_to_dict(self, string, position, max, &options, raw_array, &name, &value); if (new_position < 0) { return NULL; } @@ -2792,6 +2770,7 @@ static PyObject* _cbson_element_to_dict(PyObject* self, PyObject* args) { return NULL; } + destroy_codec_options(&options); return result_tuple; } @@ -2803,13 +2782,14 @@ static PyObject* _elements_to_dict(PyObject* self, const char* string, if (!dict) { return NULL; } + int raw_array = 0; while (position < max) { PyObject* name = NULL; PyObject* value = NULL; int new_position; new_position = _element_to_dict( - self, string, position, max, options, &name, &value); + self, string, position, max, options, raw_array, &name, &value); if (new_position < 0) { Py_DECREF(dict); return NULL; @@ -2867,10 +2847,10 @@ static PyObject* _cbson_bson_to_dict(PyObject* self, PyObject* args) { codec_options_t options; PyObject* result = NULL; PyObject* options_obj; - Py_buffer view; + Py_buffer view = {0}; if (! (PyArg_ParseTuple(args, "OO", &bson, &options_obj) && - convert_codec_options(options_obj, &options))) { + convert_codec_options(self, options_obj, &options))) { return result; } @@ -2892,7 +2872,6 @@ static PyObject* _cbson_bson_to_dict(PyObject* self, PyObject* args) { } string = (char*)view.buf; - memcpy(&size, string, 4); size = (int32_t)BSON_UINT32_FROM_LE(size); if (size < BSON_MIN_SIZE) { @@ -2925,7 +2904,7 @@ static PyObject* _cbson_bson_to_dict(PyObject* self, PyObject* args) { /* No need to decode fields if using RawBSONDocument */ if (options.is_raw_bson) { result = PyObject_CallFunction( - options.document_class, BYTES_FORMAT_STRING "O", string, (Py_ssize_t)size, + options.document_class, "y#O", string, (Py_ssize_t)size, options_obj); } else { @@ -2945,17 +2924,11 @@ static PyObject* _cbson_decode_all(PyObject* self, PyObject* args) { PyObject* dict; PyObject* result = NULL; codec_options_t options; - PyObject* options_obj; - Py_buffer view; + PyObject* options_obj = NULL; + Py_buffer view = {0}; - if (!PyArg_ParseTuple(args, "O|O", &bson, &options_obj)) { - return NULL; - } - if (PyTuple_GET_SIZE(args) < 2) { - if (!default_codec_options(GETSTATE(self), &options)) { - return NULL; - } - } else if (!convert_codec_options(options_obj, &options)) { + if (!(PyArg_ParseTuple(args, "OO", &bson, &options_obj) && + convert_codec_options(self, options_obj, &options))) { return NULL; } @@ -3017,7 +2990,7 @@ static PyObject* _cbson_decode_all(PyObject* self, PyObject* args) { /* No need to decode fields if using RawBSONDocument. */ if (options.is_raw_bson) { dict = PyObject_CallFunction( - options.document_class, BYTES_FORMAT_STRING "O", string, (Py_ssize_t)size, + options.document_class, "y#O", string, (Py_ssize_t)size, options_obj); } else { dict = elements_to_dict(self, string + 4, (unsigned)size - 5, &options); @@ -3044,19 +3017,138 @@ static PyObject* _cbson_decode_all(PyObject* self, PyObject* args) { return result; } + +static PyObject* _cbson_array_of_documents_to_buffer(PyObject* self, PyObject* args) { + uint32_t size; + uint32_t value_length; + uint32_t position = 0; + buffer_t buffer; + const char* string; + PyObject* arr; + PyObject* result = NULL; + Py_buffer view = {0}; + + if (!PyArg_ParseTuple(args, "O", &arr)) { + return NULL; + } + + if (!_get_buffer(arr, &view)) { + return NULL; + } + + buffer = pymongo_buffer_new(); + if (!buffer) { + PyBuffer_Release(&view); + return NULL; + } + + string = (char*)view.buf; + + if (view.len < BSON_MIN_SIZE) { + PyObject* InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + PyErr_SetString(InvalidBSON, + "not enough data for a BSON document"); + Py_DECREF(InvalidBSON); + } + goto done; + } + + memcpy(&size, string, 4); + size = BSON_UINT32_FROM_LE(size); + /* save space for length */ + if (pymongo_buffer_save_space(buffer, size) == -1) { + goto fail; + } + pymongo_buffer_update_position(buffer, 0); + + position += 4; + while (position < size - 1) { + // Verify the value is an object. + unsigned char type = (unsigned char)string[position]; + if (type != 3) { + PyObject* InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + PyErr_SetString(InvalidBSON, "array element was not an object"); + Py_DECREF(InvalidBSON); + } + goto fail; + } + + // Just skip the keys. + position = position + strlen(string + position) + 1; + + if (position >= size || (size - position) < BSON_MIN_SIZE) { + PyObject* InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + PyErr_SetString(InvalidBSON, "invalid array content"); + Py_DECREF(InvalidBSON); + } + goto fail; + } + + memcpy(&value_length, string + position, 4); + value_length = BSON_UINT32_FROM_LE(value_length); + if (value_length < BSON_MIN_SIZE) { + PyObject* InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + PyErr_SetString(InvalidBSON, "invalid message size"); + Py_DECREF(InvalidBSON); + } + goto fail; + } + + if (view.len < size) { + PyObject* InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + PyErr_SetString(InvalidBSON, "objsize too large"); + Py_DECREF(InvalidBSON); + } + goto fail; + } + + if (string[size - 1]) { + PyObject* InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + PyErr_SetString(InvalidBSON, "bad eoo"); + Py_DECREF(InvalidBSON); + } + goto fail; + } + + if (pymongo_buffer_write(buffer, string + position, value_length) == 1) { + goto fail; + } + position += value_length; + } + + /* objectify buffer */ + result = Py_BuildValue("y#", pymongo_buffer_get_buffer(buffer), + (Py_ssize_t)pymongo_buffer_get_position(buffer)); + goto done; +fail: + result = NULL; +done: + PyBuffer_Release(&view); + pymongo_buffer_free(buffer); + return result; +} + + static PyMethodDef _CBSONMethods[] = { {"_dict_to_bson", _cbson_dict_to_bson, METH_VARARGS, "convert a dictionary to a string containing its BSON representation."}, {"_bson_to_dict", _cbson_bson_to_dict, METH_VARARGS, "convert a BSON string to a SON object."}, - {"decode_all", _cbson_decode_all, METH_VARARGS, + {"_decode_all", _cbson_decode_all, METH_VARARGS, "convert binary data to a sequence of documents."}, {"_element_to_dict", _cbson_element_to_dict, METH_VARARGS, "Decode a single key, value pair."}, + {"_array_of_documents_to_buffer", _cbson_array_of_documents_to_buffer, METH_VARARGS, "Convert raw array of documents to a stream of BSON documents"}, + {"_test_long_long_to_str", _test_long_long_to_str, METH_VARARGS, "Test conversion of extreme and common Py_ssize_t values to str."}, {NULL, NULL, 0, NULL} }; -#if PY_MAJOR_VERSION >= 3 #define INITERROR return NULL static int _cbson_traverse(PyObject *m, visitproc visit, void *arg) { Py_VISIT(GETSTATE(m)->Binary); @@ -3070,6 +3162,31 @@ static int _cbson_traverse(PyObject *m, visitproc visit, void *arg) { Py_VISIT(GETSTATE(m)->MaxKey); Py_VISIT(GETSTATE(m)->UTC); Py_VISIT(GETSTATE(m)->REType); + Py_VISIT(GETSTATE(m)->_type_marker_str); + Py_VISIT(GETSTATE(m)->_flags_str); + Py_VISIT(GETSTATE(m)->_pattern_str); + Py_VISIT(GETSTATE(m)->_encoder_map_str); + Py_VISIT(GETSTATE(m)->_decoder_map_str); + Py_VISIT(GETSTATE(m)->_fallback_encoder_str); + Py_VISIT(GETSTATE(m)->_raw_str); + Py_VISIT(GETSTATE(m)->_subtype_str); + Py_VISIT(GETSTATE(m)->_binary_str); + Py_VISIT(GETSTATE(m)->_scope_str); + Py_VISIT(GETSTATE(m)->_inc_str); + Py_VISIT(GETSTATE(m)->_time_str); + Py_VISIT(GETSTATE(m)->_bid_str); + Py_VISIT(GETSTATE(m)->_replace_str); + Py_VISIT(GETSTATE(m)->_astimezone_str); + Py_VISIT(GETSTATE(m)->_id_str); + Py_VISIT(GETSTATE(m)->_dollar_ref_str); + Py_VISIT(GETSTATE(m)->_dollar_id_str); + Py_VISIT(GETSTATE(m)->_dollar_db_str); + Py_VISIT(GETSTATE(m)->_tzinfo_str); + Py_VISIT(GETSTATE(m)->_as_doc_str); + Py_VISIT(GETSTATE(m)->_utcoffset_str); + Py_VISIT(GETSTATE(m)->_from_uuid_str); + Py_VISIT(GETSTATE(m)->_as_uuid_str); + Py_VISIT(GETSTATE(m)->_from_bid_str); return 0; } @@ -3085,6 +3202,31 @@ static int _cbson_clear(PyObject *m) { Py_CLEAR(GETSTATE(m)->MaxKey); Py_CLEAR(GETSTATE(m)->UTC); Py_CLEAR(GETSTATE(m)->REType); + Py_CLEAR(GETSTATE(m)->_type_marker_str); + Py_CLEAR(GETSTATE(m)->_flags_str); + Py_CLEAR(GETSTATE(m)->_pattern_str); + Py_CLEAR(GETSTATE(m)->_encoder_map_str); + Py_CLEAR(GETSTATE(m)->_decoder_map_str); + Py_CLEAR(GETSTATE(m)->_fallback_encoder_str); + Py_CLEAR(GETSTATE(m)->_raw_str); + Py_CLEAR(GETSTATE(m)->_subtype_str); + Py_CLEAR(GETSTATE(m)->_binary_str); + Py_CLEAR(GETSTATE(m)->_scope_str); + Py_CLEAR(GETSTATE(m)->_inc_str); + Py_CLEAR(GETSTATE(m)->_time_str); + Py_CLEAR(GETSTATE(m)->_bid_str); + Py_CLEAR(GETSTATE(m)->_replace_str); + Py_CLEAR(GETSTATE(m)->_astimezone_str); + Py_CLEAR(GETSTATE(m)->_id_str); + Py_CLEAR(GETSTATE(m)->_dollar_ref_str); + Py_CLEAR(GETSTATE(m)->_dollar_id_str); + Py_CLEAR(GETSTATE(m)->_dollar_db_str); + Py_CLEAR(GETSTATE(m)->_tzinfo_str); + Py_CLEAR(GETSTATE(m)->_as_doc_str); + Py_CLEAR(GETSTATE(m)->_utcoffset_str); + Py_CLEAR(GETSTATE(m)->_from_uuid_str); + Py_CLEAR(GETSTATE(m)->_as_uuid_str); + Py_CLEAR(GETSTATE(m)->_from_bid_str); return 0; } @@ -3102,11 +3244,6 @@ static struct PyModuleDef moduledef = { PyMODINIT_FUNC PyInit__cbson(void) -#else -#define INITERROR return -PyMODINIT_FUNC -init_cbson(void) -#endif { PyObject *m; PyObject *c_api_object; @@ -3131,20 +3268,11 @@ init_cbson(void) (void *) buffer_write_int32_at_position; _cbson_API[_cbson_downcast_and_check_INDEX] = (void *) _downcast_and_check; -#if PY_VERSION_HEX >= 0x03010000 - /* PyCapsule is new in python 3.1 */ c_api_object = PyCapsule_New((void *) _cbson_API, "_cbson._C_API", NULL); -#else - c_api_object = PyCObject_FromVoidPtr((void *) _cbson_API, NULL); -#endif if (c_api_object == NULL) INITERROR; -#if PY_MAJOR_VERSION >= 3 m = PyModule_Create(&moduledef); -#else - m = Py_InitModule("_cbson", _CBSONMethods); -#endif if (m == NULL) { Py_DECREF(c_api_object); INITERROR; @@ -3153,21 +3281,15 @@ init_cbson(void) /* Import several python objects */ if (_load_python_objects(m)) { Py_DECREF(c_api_object); -#if PY_MAJOR_VERSION >= 3 Py_DECREF(m); -#endif INITERROR; } if (PyModule_AddObject(m, "_C_API", c_api_object) < 0) { Py_DECREF(c_api_object); -#if PY_MAJOR_VERSION >= 3 Py_DECREF(m); -#endif INITERROR; } -#if PY_MAJOR_VERSION >= 3 return m; -#endif } diff --git a/bson/_cbsonmodule.h b/bson/_cbsonmodule.h index 69590d5647..3be2b74427 100644 --- a/bson/_cbsonmodule.h +++ b/bson/_cbsonmodule.h @@ -23,33 +23,34 @@ /* * This macro is basically an implementation of asprintf for win32 * We print to the provided buffer to get the string value as an int. + * USE LL2STR. This is kept only to test LL2STR. */ #if defined(_MSC_VER) && (_MSC_VER >= 1400) #define INT2STRING(buffer, i) \ _snprintf_s((buffer), \ - _scprintf("%d", (i)) + 1, \ - _scprintf("%d", (i)) + 1, \ - "%d", \ + _scprintf("%lld", (i)) + 1, \ + _scprintf("%lld", (i)) + 1, \ + "%lld", \ (i)) #define STRCAT(dest, n, src) strcat_s((dest), (n), (src)) #else #define INT2STRING(buffer, i) \ _snprintf((buffer), \ - _scprintf("%d", (i)) + 1, \ - "%d", \ + _scprintf("%lld", (i)) + 1, \ + "%lld", \ (i)) #define STRCAT(dest, n, src) strcat((dest), (src)) #endif #else -#define INT2STRING(buffer, i) snprintf((buffer), sizeof((buffer)), "%d", (i)) +#define INT2STRING(buffer, i) snprintf((buffer), sizeof((buffer)), "%lld", (i)) #define STRCAT(dest, n, src) strcat((dest), (src)) #endif -#if PY_MAJOR_VERSION >= 3 -#define BYTES_FORMAT_STRING "y#" -#else -#define BYTES_FORMAT_STRING "s#" -#endif +/* Just enough space in char array to hold LLONG_MIN and null terminator */ +#define BUF_SIZE 21 +/* Converts integer to its string representation in decimal notation. */ +extern int cbson_long_long_to_str(long long int num, char* str, size_t size); +#define LL2STR(buffer, i) cbson_long_long_to_str((i), (buffer), sizeof(buffer)) typedef struct type_registry_t { PyObject* encoder_map; @@ -68,6 +69,7 @@ typedef struct codec_options_t { char* unicode_decode_error_handler; PyObject* tzinfo; type_registry_t type_registry; + unsigned char datetime_conversion; PyObject* options_obj; unsigned char is_raw_bson; } codec_options_t; @@ -91,7 +93,7 @@ typedef struct codec_options_t { #define _cbson_convert_codec_options_INDEX 4 #define _cbson_convert_codec_options_RETURN int -#define _cbson_convert_codec_options_PROTO (PyObject* options_obj, void* p) +#define _cbson_convert_codec_options_PROTO (PyObject* self, PyObject* options_obj, codec_options_t* options) #define _cbson_destroy_codec_options_INDEX 5 #define _cbson_destroy_codec_options_RETURN void diff --git a/bson/_helpers.py b/bson/_helpers.py new file mode 100644 index 0000000000..5a479867c2 --- /dev/null +++ b/bson/_helpers.py @@ -0,0 +1,43 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Setstate and getstate functions for objects with __slots__, allowing +compatibility with default pickling protocol +""" +from __future__ import annotations + +from typing import Any, Mapping + + +def _setstate_slots(self: Any, state: Any) -> None: + for slot, value in state.items(): + setattr(self, slot, value) + + +def _mangle_name(name: str, prefix: str) -> str: + if name.startswith("__"): + prefix = "_" + prefix + else: + prefix = "" + return prefix + name + + +def _getstate_slots(self: Any) -> Mapping[Any, Any]: + prefix = self.__class__.__name__ + ret = {} + for name in self.__slots__: + mangled_name = _mangle_name(name, prefix) + if hasattr(self, mangled_name): + ret[mangled_name] = getattr(self, mangled_name) + return ret diff --git a/bson/binary.py b/bson/binary.py index 1c833b5a56..a4cd44e930 100644 --- a/bson/binary.py +++ b/bson/binary.py @@ -11,11 +11,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations +from typing import TYPE_CHECKING, Any, Tuple, Type, Union from uuid import UUID -from bson.py3compat import PY3 - """Tools for representing BSON binary data. """ @@ -40,7 +40,10 @@ """Old BSON binary subtype for a UUID. :class:`uuid.UUID` instances will automatically be encoded -by :mod:`bson` using this subtype. +by :mod:`bson` using this subtype when using +:data:`UuidRepresentation.PYTHON_LEGACY`, +:data:`UuidRepresentation.JAVA_LEGACY`, or +:data:`UuidRepresentation.CSHARP_LEGACY`. .. versionadded:: 2.1 """ @@ -48,69 +51,146 @@ UUID_SUBTYPE = 4 """BSON binary subtype for a UUID. -This is the new BSON binary subtype for UUIDs. The -current default is :data:`OLD_UUID_SUBTYPE`. - -.. versionchanged:: 2.1 - Changed to subtype 4. +This is the standard BSON binary subtype for UUIDs. +:class:`uuid.UUID` instances will automatically be encoded +by :mod:`bson` using this subtype when using +:data:`UuidRepresentation.STANDARD`. """ -STANDARD = UUID_SUBTYPE -"""The standard UUID representation. -:class:`uuid.UUID` instances will automatically be encoded to -and decoded from BSON binary, using RFC-4122 byte order with -binary subtype :data:`UUID_SUBTYPE`. +if TYPE_CHECKING: + from array import array as _array + from mmap import mmap as _mmap -.. versionadded:: 3.0 -""" -PYTHON_LEGACY = OLD_UUID_SUBTYPE -"""The Python legacy UUID representation. +class UuidRepresentation: + UNSPECIFIED = 0 + """An unspecified UUID representation. + + When configured, :class:`uuid.UUID` instances will **not** be + automatically encoded to or decoded from :class:`~bson.binary.Binary`. + When encoding a :class:`uuid.UUID` instance, an error will be raised. + To encode a :class:`uuid.UUID` instance with this configuration, it must + be wrapped in the :class:`~bson.binary.Binary` class by the application + code. When decoding a BSON binary field with a UUID subtype, a + :class:`~bson.binary.Binary` instance will be returned instead of a + :class:`uuid.UUID` instance. + + See :ref:`unspecified-representation-details` for details. + + .. versionadded:: 3.11 + """ + + STANDARD = UUID_SUBTYPE + """The standard UUID representation. + + :class:`uuid.UUID` instances will automatically be encoded to + and decoded from BSON binary, using RFC-4122 byte order with + binary subtype :data:`UUID_SUBTYPE`. + + See :ref:`standard-representation-details` for details. + + .. versionadded:: 3.11 + """ + + PYTHON_LEGACY = OLD_UUID_SUBTYPE + """The Python legacy UUID representation. + + :class:`uuid.UUID` instances will automatically be encoded to + and decoded from BSON binary, using RFC-4122 byte order with + binary subtype :data:`OLD_UUID_SUBTYPE`. + + See :ref:`python-legacy-representation-details` for details. + + .. versionadded:: 3.11 + """ + + JAVA_LEGACY = 5 + """The Java legacy UUID representation. + + :class:`uuid.UUID` instances will automatically be encoded to + and decoded from BSON binary subtype :data:`OLD_UUID_SUBTYPE`, + using the Java driver's legacy byte order. + + See :ref:`java-legacy-representation-details` for details. + + .. versionadded:: 3.11 + """ + + CSHARP_LEGACY = 6 + """The C#/.net legacy UUID representation. + + :class:`uuid.UUID` instances will automatically be encoded to + and decoded from BSON binary subtype :data:`OLD_UUID_SUBTYPE`, + using the C# driver's legacy byte order. + + See :ref:`csharp-legacy-representation-details` for details. + + .. versionadded:: 3.11 + """ + -:class:`uuid.UUID` instances will automatically be encoded to -and decoded from BSON binary, using RFC-4122 byte order with -binary subtype :data:`OLD_UUID_SUBTYPE`. +STANDARD = UuidRepresentation.STANDARD +"""An alias for :data:`UuidRepresentation.STANDARD`. .. versionadded:: 3.0 """ -JAVA_LEGACY = 5 -"""The Java legacy UUID representation. +PYTHON_LEGACY = UuidRepresentation.PYTHON_LEGACY +"""An alias for :data:`UuidRepresentation.PYTHON_LEGACY`. -:class:`uuid.UUID` instances will automatically be encoded to -and decoded from BSON binary subtype :data:`OLD_UUID_SUBTYPE`, -using the Java driver's legacy byte order. +.. versionadded:: 3.0 +""" + +JAVA_LEGACY = UuidRepresentation.JAVA_LEGACY +"""An alias for :data:`UuidRepresentation.JAVA_LEGACY`. .. versionchanged:: 3.6 - BSON binary subtype 4 is decoded using RFC-4122 byte order. + BSON binary subtype 4 is decoded using RFC-4122 byte order. .. versionadded:: 2.3 """ -CSHARP_LEGACY = 6 -"""The C#/.net legacy UUID representation. - -:class:`uuid.UUID` instances will automatically be encoded to -and decoded from BSON binary subtype :data:`OLD_UUID_SUBTYPE`, -using the C# driver's legacy byte order. +CSHARP_LEGACY = UuidRepresentation.CSHARP_LEGACY +"""An alias for :data:`UuidRepresentation.CSHARP_LEGACY`. .. versionchanged:: 3.6 - BSON binary subtype 4 is decoded using RFC-4122 byte order. + BSON binary subtype 4 is decoded using RFC-4122 byte order. .. versionadded:: 2.3 """ ALL_UUID_SUBTYPES = (OLD_UUID_SUBTYPE, UUID_SUBTYPE) -ALL_UUID_REPRESENTATIONS = (STANDARD, PYTHON_LEGACY, JAVA_LEGACY, CSHARP_LEGACY) +ALL_UUID_REPRESENTATIONS = ( + UuidRepresentation.UNSPECIFIED, + UuidRepresentation.STANDARD, + UuidRepresentation.PYTHON_LEGACY, + UuidRepresentation.JAVA_LEGACY, + UuidRepresentation.CSHARP_LEGACY, +) UUID_REPRESENTATION_NAMES = { - PYTHON_LEGACY: 'PYTHON_LEGACY', - STANDARD: 'STANDARD', - JAVA_LEGACY: 'JAVA_LEGACY', - CSHARP_LEGACY: 'CSHARP_LEGACY'} + UuidRepresentation.UNSPECIFIED: "UuidRepresentation.UNSPECIFIED", + UuidRepresentation.STANDARD: "UuidRepresentation.STANDARD", + UuidRepresentation.PYTHON_LEGACY: "UuidRepresentation.PYTHON_LEGACY", + UuidRepresentation.JAVA_LEGACY: "UuidRepresentation.JAVA_LEGACY", + UuidRepresentation.CSHARP_LEGACY: "UuidRepresentation.CSHARP_LEGACY", +} MD5_SUBTYPE = 5 """BSON binary subtype for an MD5 hash. """ +COLUMN_SUBTYPE = 7 +"""BSON binary subtype for columns. + +.. versionadded:: 4.0 +""" + +SENSITIVE_SUBTYPE = 8 +"""BSON binary subtype for sensitive data. + +.. versionadded:: 4.5 +""" + + USER_DEFINED_SUBTYPE = 128 """BSON binary subtype for any user defined structure. """ @@ -125,18 +205,17 @@ class Binary(bytes): what should be considered a string when we encode to BSON. Raises TypeError if `data` is not an instance of :class:`bytes` - (:class:`str` in python 2) or `subtype` is not an instance of - :class:`int`. Raises ValueError if `subtype` is not in [0, 256). + or `subtype` is not an instance of :class:`int`. + Raises ValueError if `subtype` is not in [0, 256). .. note:: - In python 3 instances of Binary with subtype 0 will be decoded - directly to :class:`bytes`. + Instances of Binary with subtype 0 will be decoded directly to :class:`bytes`. :Parameters: - `data`: the binary data to represent. Can be any bytes-like type that implements the buffer protocol. - `subtype` (optional): the `binary subtype - `_ + `_ to use .. versionchanged:: 3.9 @@ -144,8 +223,13 @@ class Binary(bytes): """ _type_marker = 5 + __subtype: int - def __new__(cls, data, subtype=BINARY_SUBTYPE): + def __new__( + cls: Type[Binary], + data: Union[memoryview, bytes, _mmap, _array[Any]], + subtype: int = BINARY_SUBTYPE, + ) -> Binary: if not isinstance(subtype, int): raise TypeError("subtype must be an instance of int") if subtype >= 256 or subtype < 0: @@ -155,91 +239,132 @@ def __new__(cls, data, subtype=BINARY_SUBTYPE): self.__subtype = subtype return self - @property - def subtype(self): - """Subtype of this binary data. + @classmethod + def from_uuid( + cls: Type[Binary], uuid: UUID, uuid_representation: int = UuidRepresentation.STANDARD + ) -> Binary: + """Create a BSON Binary object from a Python UUID. + + Creates a :class:`~bson.binary.Binary` object from a + :class:`uuid.UUID` instance. Assumes that the native + :class:`uuid.UUID` instance uses the byte-order implied by the + provided ``uuid_representation``. + + Raises :exc:`TypeError` if `uuid` is not an instance of + :class:`~uuid.UUID`. + + :Parameters: + - `uuid`: A :class:`uuid.UUID` instance. + - `uuid_representation`: A member of + :class:`~bson.binary.UuidRepresentation`. Default: + :const:`~bson.binary.UuidRepresentation.STANDARD`. + See :ref:`handling-uuid-data-example` for details. + + .. versionadded:: 3.11 + """ + if not isinstance(uuid, UUID): + raise TypeError("uuid must be an instance of uuid.UUID") + + if uuid_representation not in ALL_UUID_REPRESENTATIONS: + raise ValueError( + "uuid_representation must be a value from bson.binary.UuidRepresentation" + ) + + if uuid_representation == UuidRepresentation.UNSPECIFIED: + raise ValueError( + "cannot encode native uuid.UUID with " + "UuidRepresentation.UNSPECIFIED. UUIDs can be manually " + "converted to bson.Binary instances using " + "bson.Binary.from_uuid() or a different UuidRepresentation " + "can be configured. See the documentation for " + "UuidRepresentation for more information." + ) + + subtype = OLD_UUID_SUBTYPE + if uuid_representation == UuidRepresentation.PYTHON_LEGACY: + payload = uuid.bytes + elif uuid_representation == UuidRepresentation.JAVA_LEGACY: + from_uuid = uuid.bytes + payload = from_uuid[0:8][::-1] + from_uuid[8:16][::-1] + elif uuid_representation == UuidRepresentation.CSHARP_LEGACY: + payload = uuid.bytes_le + else: + # uuid_representation == UuidRepresentation.STANDARD + subtype = UUID_SUBTYPE + payload = uuid.bytes + + return cls(payload, subtype) + + def as_uuid(self, uuid_representation: int = UuidRepresentation.STANDARD) -> UUID: + """Create a Python UUID from this BSON Binary object. + + Decodes this binary object as a native :class:`uuid.UUID` instance + with the provided ``uuid_representation``. + + Raises :exc:`ValueError` if this :class:`~bson.binary.Binary` instance + does not contain a UUID. + + :Parameters: + - `uuid_representation`: A member of + :class:`~bson.binary.UuidRepresentation`. Default: + :const:`~bson.binary.UuidRepresentation.STANDARD`. + See :ref:`handling-uuid-data-example` for details. + + .. versionadded:: 3.11 """ + if self.subtype not in ALL_UUID_SUBTYPES: + raise ValueError(f"cannot decode subtype {self.subtype} as a uuid") + + if uuid_representation not in ALL_UUID_REPRESENTATIONS: + raise ValueError( + "uuid_representation must be a value from bson.binary.UuidRepresentation" + ) + + if uuid_representation == UuidRepresentation.UNSPECIFIED: + raise ValueError("uuid_representation cannot be UNSPECIFIED") + elif uuid_representation == UuidRepresentation.PYTHON_LEGACY: + if self.subtype == OLD_UUID_SUBTYPE: + return UUID(bytes=self) + elif uuid_representation == UuidRepresentation.JAVA_LEGACY: + if self.subtype == OLD_UUID_SUBTYPE: + return UUID(bytes=self[0:8][::-1] + self[8:16][::-1]) + elif uuid_representation == UuidRepresentation.CSHARP_LEGACY: + if self.subtype == OLD_UUID_SUBTYPE: + return UUID(bytes_le=self) + else: + # uuid_representation == UuidRepresentation.STANDARD + if self.subtype == UUID_SUBTYPE: + return UUID(bytes=self) + + raise ValueError( + f"cannot decode subtype {self.subtype} to {UUID_REPRESENTATION_NAMES[uuid_representation]}" + ) + + @property + def subtype(self) -> int: + """Subtype of this binary data.""" return self.__subtype - def __getnewargs__(self): + def __getnewargs__(self) -> Tuple[bytes, int]: # type: ignore[override] # Work around http://bugs.python.org/issue7382 - data = super(Binary, self).__getnewargs__()[0] - if PY3 and not isinstance(data, bytes): - data = data.encode('latin-1') + data = super().__getnewargs__()[0] + if not isinstance(data, bytes): + data = data.encode("latin-1") return data, self.__subtype - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, Binary): - return ((self.__subtype, bytes(self)) == - (other.subtype, bytes(other))) + return (self.__subtype, bytes(self)) == (other.subtype, bytes(other)) # We don't return NotImplemented here because if we did then # Binary("foo") == "foo" would return True, since Binary is a # subclass of str... return False - def __hash__(self): - return super(Binary, self).__hash__() ^ hash(self.__subtype) + def __hash__(self) -> int: + return super().__hash__() ^ hash(self.__subtype) - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other - def __repr__(self): - return "Binary(%s, %s)" % (bytes.__repr__(self), self.__subtype) - - -class UUIDLegacy(Binary): - """UUID wrapper to support working with UUIDs stored as PYTHON_LEGACY. - - .. doctest:: - - >>> import uuid - >>> from bson.binary import Binary, UUIDLegacy, STANDARD - >>> from bson.codec_options import CodecOptions - >>> my_uuid = uuid.uuid4() - >>> coll = db.get_collection('test', - ... CodecOptions(uuid_representation=STANDARD)) - >>> coll.insert_one({'uuid': Binary(my_uuid.bytes, 3)}).inserted_id - ObjectId('...') - >>> coll.count_documents({'uuid': my_uuid}) - 0 - >>> coll.count_documents({'uuid': UUIDLegacy(my_uuid)}) - 1 - >>> coll.find({'uuid': UUIDLegacy(my_uuid)})[0]['uuid'] - UUID('...') - >>> - >>> # Convert from subtype 3 to subtype 4 - >>> doc = coll.find_one({'uuid': UUIDLegacy(my_uuid)}) - >>> coll.replace_one({"_id": doc["_id"]}, doc).matched_count - 1 - >>> coll.count_documents({'uuid': UUIDLegacy(my_uuid)}) - 0 - >>> coll.count_documents({'uuid': {'$in': [UUIDLegacy(my_uuid), my_uuid]}}) - 1 - >>> coll.find_one({'uuid': my_uuid})['uuid'] - UUID('...') - - Raises TypeError if `obj` is not an instance of :class:`~uuid.UUID`. - - :Parameters: - - `obj`: An instance of :class:`~uuid.UUID`. - """ - - def __new__(cls, obj): - if not isinstance(obj, UUID): - raise TypeError("obj must be an instance of uuid.UUID") - self = Binary.__new__(cls, obj.bytes, OLD_UUID_SUBTYPE) - self.__uuid = obj - return self - - def __getnewargs__(self): - # Support copy and deepcopy - return (self.__uuid,) - - @property - def uuid(self): - """UUID instance wrapped by this UUIDLegacy instance. - """ - return self.__uuid - - def __repr__(self): - return "UUIDLegacy('%s')" % self.__uuid + def __repr__(self) -> str: + return f"Binary({bytes.__repr__(self)}, {self.__subtype})" diff --git a/bson/bson-endian.h b/bson/bson-endian.h index c34a58dde1..e906b0776f 100644 --- a/bson/bson-endian.h +++ b/bson/bson-endian.h @@ -25,7 +25,6 @@ #ifdef _MSC_VER -# include "bson-stdint-win32.h" # define BSON_INLINE __inline #else # include diff --git a/bson/bson-stdint-win32.h b/bson/bson-stdint-win32.h deleted file mode 100644 index cb2acd9384..0000000000 --- a/bson/bson-stdint-win32.h +++ /dev/null @@ -1,259 +0,0 @@ -// ISO C9x compliant stdint.h for Microsoft Visual Studio -// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 -// -// Copyright (c) 2006-2013 Alexander Chemeris -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// 3. Neither the name of the product nor the names of its contributors may -// be used to endorse or promote products derived from this software -// without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED -// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO -// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; -// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR -// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -/////////////////////////////////////////////////////////////////////////////// - -#ifndef _MSC_VER // [ -#error "Use this header only with Microsoft Visual C++ compilers!" -#endif // _MSC_VER ] - -#ifndef _MSC_STDINT_H_ // [ -#define _MSC_STDINT_H_ - -#if _MSC_VER > 1000 -#pragma once -#endif - -#if _MSC_VER >= 1600 // [ -#include -#else // ] _MSC_VER >= 1600 [ - -#include - -// For Visual Studio 6 in C++ mode and for many Visual Studio versions when -// compiling for ARM we should wrap include with 'extern "C++" {}' -// or compiler give many errors like this: -// error C2733: second C linkage of overloaded function 'wmemchr' not allowed -#ifdef __cplusplus -extern "C" { -#endif -# include -#ifdef __cplusplus -} -#endif - -// Define _W64 macros to mark types changing their size, like intptr_t. -#ifndef _W64 -# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300 -# define _W64 __w64 -# else -# define _W64 -# endif -#endif - - -// 7.18.1 Integer types - -// 7.18.1.1 Exact-width integer types - -// Visual Studio 6 and Embedded Visual C++ 4 doesn't -// realize that, e.g. char has the same size as __int8 -// so we give up on __intX for them. -#if (_MSC_VER < 1300) - typedef signed char int8_t; - typedef signed short int16_t; - typedef signed int int32_t; - typedef unsigned char uint8_t; - typedef unsigned short uint16_t; - typedef unsigned int uint32_t; -#else - typedef signed __int8 int8_t; - typedef signed __int16 int16_t; - typedef signed __int32 int32_t; - typedef unsigned __int8 uint8_t; - typedef unsigned __int16 uint16_t; - typedef unsigned __int32 uint32_t; -#endif -typedef signed __int64 int64_t; -typedef unsigned __int64 uint64_t; - - -// 7.18.1.2 Minimum-width integer types -typedef int8_t int_least8_t; -typedef int16_t int_least16_t; -typedef int32_t int_least32_t; -typedef int64_t int_least64_t; -typedef uint8_t uint_least8_t; -typedef uint16_t uint_least16_t; -typedef uint32_t uint_least32_t; -typedef uint64_t uint_least64_t; - -// 7.18.1.3 Fastest minimum-width integer types -typedef int8_t int_fast8_t; -typedef int16_t int_fast16_t; -typedef int32_t int_fast32_t; -typedef int64_t int_fast64_t; -typedef uint8_t uint_fast8_t; -typedef uint16_t uint_fast16_t; -typedef uint32_t uint_fast32_t; -typedef uint64_t uint_fast64_t; - -// 7.18.1.4 Integer types capable of holding object pointers -#ifdef _WIN64 // [ - typedef signed __int64 intptr_t; - typedef unsigned __int64 uintptr_t; -#else // _WIN64 ][ - typedef _W64 signed int intptr_t; - typedef _W64 unsigned int uintptr_t; -#endif // _WIN64 ] - -// 7.18.1.5 Greatest-width integer types -typedef int64_t intmax_t; -typedef uint64_t uintmax_t; - - -// 7.18.2 Limits of specified-width integer types - -#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259 - -// 7.18.2.1 Limits of exact-width integer types -#define INT8_MIN ((int8_t)_I8_MIN) -#define INT8_MAX _I8_MAX -#define INT16_MIN ((int16_t)_I16_MIN) -#define INT16_MAX _I16_MAX -#define INT32_MIN ((int32_t)_I32_MIN) -#define INT32_MAX _I32_MAX -#define INT64_MIN ((int64_t)_I64_MIN) -#define INT64_MAX _I64_MAX -#define UINT8_MAX _UI8_MAX -#define UINT16_MAX _UI16_MAX -#define UINT32_MAX _UI32_MAX -#define UINT64_MAX _UI64_MAX - -// 7.18.2.2 Limits of minimum-width integer types -#define INT_LEAST8_MIN INT8_MIN -#define INT_LEAST8_MAX INT8_MAX -#define INT_LEAST16_MIN INT16_MIN -#define INT_LEAST16_MAX INT16_MAX -#define INT_LEAST32_MIN INT32_MIN -#define INT_LEAST32_MAX INT32_MAX -#define INT_LEAST64_MIN INT64_MIN -#define INT_LEAST64_MAX INT64_MAX -#define UINT_LEAST8_MAX UINT8_MAX -#define UINT_LEAST16_MAX UINT16_MAX -#define UINT_LEAST32_MAX UINT32_MAX -#define UINT_LEAST64_MAX UINT64_MAX - -// 7.18.2.3 Limits of fastest minimum-width integer types -#define INT_FAST8_MIN INT8_MIN -#define INT_FAST8_MAX INT8_MAX -#define INT_FAST16_MIN INT16_MIN -#define INT_FAST16_MAX INT16_MAX -#define INT_FAST32_MIN INT32_MIN -#define INT_FAST32_MAX INT32_MAX -#define INT_FAST64_MIN INT64_MIN -#define INT_FAST64_MAX INT64_MAX -#define UINT_FAST8_MAX UINT8_MAX -#define UINT_FAST16_MAX UINT16_MAX -#define UINT_FAST32_MAX UINT32_MAX -#define UINT_FAST64_MAX UINT64_MAX - -// 7.18.2.4 Limits of integer types capable of holding object pointers -#ifdef _WIN64 // [ -# define INTPTR_MIN INT64_MIN -# define INTPTR_MAX INT64_MAX -# define UINTPTR_MAX UINT64_MAX -#else // _WIN64 ][ -# define INTPTR_MIN INT32_MIN -# define INTPTR_MAX INT32_MAX -# define UINTPTR_MAX UINT32_MAX -#endif // _WIN64 ] - -// 7.18.2.5 Limits of greatest-width integer types -#define INTMAX_MIN INT64_MIN -#define INTMAX_MAX INT64_MAX -#define UINTMAX_MAX UINT64_MAX - -// 7.18.3 Limits of other integer types - -#ifdef _WIN64 // [ -# define PTRDIFF_MIN _I64_MIN -# define PTRDIFF_MAX _I64_MAX -#else // _WIN64 ][ -# define PTRDIFF_MIN _I32_MIN -# define PTRDIFF_MAX _I32_MAX -#endif // _WIN64 ] - -#define SIG_ATOMIC_MIN INT_MIN -#define SIG_ATOMIC_MAX INT_MAX - -#ifndef SIZE_MAX // [ -# ifdef _WIN64 // [ -# define SIZE_MAX _UI64_MAX -# else // _WIN64 ][ -# define SIZE_MAX _UI32_MAX -# endif // _WIN64 ] -#endif // SIZE_MAX ] - -// WCHAR_MIN and WCHAR_MAX are also defined in -#ifndef WCHAR_MIN // [ -# define WCHAR_MIN 0 -#endif // WCHAR_MIN ] -#ifndef WCHAR_MAX // [ -# define WCHAR_MAX _UI16_MAX -#endif // WCHAR_MAX ] - -#define WINT_MIN 0 -#define WINT_MAX _UI16_MAX - -#endif // __STDC_LIMIT_MACROS ] - - -// 7.18.4 Limits of other integer types - -#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 - -// 7.18.4.1 Macros for minimum-width integer constants - -#define INT8_C(val) val##i8 -#define INT16_C(val) val##i16 -#define INT32_C(val) val##i32 -#define INT64_C(val) val##i64 - -#define UINT8_C(val) val##ui8 -#define UINT16_C(val) val##ui16 -#define UINT32_C(val) val##ui32 -#define UINT64_C(val) val##ui64 - -// 7.18.4.2 Macros for greatest-width integer constants -// These #ifndef's are needed to prevent collisions with . -// Check out Issue 9 for the details. -#ifndef INTMAX_C // [ -# define INTMAX_C INT64_C -#endif // INTMAX_C ] -#ifndef UINTMAX_C // [ -# define UINTMAX_C UINT64_C -#endif // UINTMAX_C ] - -#endif // __STDC_CONSTANT_MACROS ] - -#endif // _MSC_VER >= 1600 ] - -#endif // _MSC_STDINT_H_ ] diff --git a/bson/buffer.c b/bson/buffer.c index c60fc44649..cc75202746 100644 --- a/bson/buffer.c +++ b/bson/buffer.c @@ -14,6 +14,10 @@ * limitations under the License. */ +/* Include Python.h so we can set Python's error indicator. */ +#define PY_SSIZE_T_CLEAN +#include "Python.h" + #include #include @@ -27,12 +31,19 @@ struct buffer { int position; }; +/* Set Python's error indicator to MemoryError. + * Called after allocation failures. */ +static void set_memory_error(void) { + PyErr_NoMemory(); +} + /* Allocate and return a new buffer. - * Return NULL on allocation failure. */ -buffer_t buffer_new(void) { + * Return NULL and sets MemoryError on allocation failure. */ +buffer_t pymongo_buffer_new(void) { buffer_t buffer; buffer = (buffer_t)malloc(sizeof(struct buffer)); if (buffer == NULL) { + set_memory_error(); return NULL; } @@ -41,6 +52,7 @@ buffer_t buffer_new(void) { buffer->buffer = (char*)malloc(sizeof(char) * INITIAL_BUFFER_SIZE); if (buffer->buffer == NULL) { free(buffer); + set_memory_error(); return NULL; } @@ -49,17 +61,20 @@ buffer_t buffer_new(void) { /* Free the memory allocated for `buffer`. * Return non-zero on failure. */ -int buffer_free(buffer_t buffer) { +int pymongo_buffer_free(buffer_t buffer) { if (buffer == NULL) { return 1; } - free(buffer->buffer); + /* Buffer will be NULL when buffer_grow fails. */ + if (buffer->buffer != NULL) { + free(buffer->buffer); + } free(buffer); return 0; } /* Grow `buffer` to at least `min_length`. - * Return non-zero on allocation failure. */ + * Return non-zero and sets MemoryError on allocation failure. */ static int buffer_grow(buffer_t buffer, int min_length) { int old_size = 0; int size = buffer->size; @@ -79,7 +94,7 @@ static int buffer_grow(buffer_t buffer, int min_length) { buffer->buffer = (char*)realloc(buffer->buffer, sizeof(char) * size); if (buffer->buffer == NULL) { free(old_buffer); - free(buffer); + set_memory_error(); return 1; } buffer->size = size; @@ -87,17 +102,27 @@ static int buffer_grow(buffer_t buffer, int min_length) { } /* Assure that `buffer` has at least `size` free bytes (and grow if needed). - * Return non-zero on allocation failure. */ + * Return non-zero and sets MemoryError on allocation failure. + * Return non-zero and sets ValueError if `size` would exceed 2GiB. */ static int buffer_assure_space(buffer_t buffer, int size) { - if (buffer->position + size <= buffer->size) { + int new_size = buffer->position + size; + /* Check for overflow. */ + if (new_size < buffer->position) { + PyErr_SetString(PyExc_ValueError, + "Document would overflow BSON size limit"); + return 1; + } + + if (new_size <= buffer->size) { return 0; } - return buffer_grow(buffer, buffer->position + size); + return buffer_grow(buffer, new_size); } /* Save `size` bytes from the current position in `buffer` (and grow if needed). - * Return offset for writing, or -1 on allocation failure. */ -buffer_position buffer_save_space(buffer_t buffer, int size) { + * Return offset for writing, or -1 on failure. + * Sets MemoryError or ValueError on failure. */ +buffer_position pymongo_buffer_save_space(buffer_t buffer, int size) { int position = buffer->position; if (buffer_assure_space(buffer, size) != 0) { return -1; @@ -107,8 +132,9 @@ buffer_position buffer_save_space(buffer_t buffer, int size) { } /* Write `size` bytes from `data` to `buffer` (and grow if needed). - * Return non-zero on allocation failure. */ -int buffer_write(buffer_t buffer, const char* data, int size) { + * Return non-zero on failure. + * Sets MemoryError or ValueError on failure. */ +int pymongo_buffer_write(buffer_t buffer, const char* data, int size) { if (buffer_assure_space(buffer, size) != 0) { return 1; } @@ -118,29 +144,14 @@ int buffer_write(buffer_t buffer, const char* data, int size) { return 0; } -/* Write `size` bytes from `data` to `buffer` at position `position`. - * Does not change the internal position of `buffer`. - * Return non-zero if buffer isn't large enough for write. */ -int buffer_write_at_position(buffer_t buffer, buffer_position position, - const char* data, int size) { - if (position + size > buffer->size) { - buffer_free(buffer); - return 1; - } - - memcpy(buffer->buffer + position, data, size); - return 0; -} - - -int buffer_get_position(buffer_t buffer) { +int pymongo_buffer_get_position(buffer_t buffer) { return buffer->position; } -char* buffer_get_buffer(buffer_t buffer) { +char* pymongo_buffer_get_buffer(buffer_t buffer) { return buffer->buffer; } -void buffer_update_position(buffer_t buffer, buffer_position new_position) { +void pymongo_buffer_update_position(buffer_t buffer, buffer_position new_position) { buffer->position = new_position; } diff --git a/bson/buffer.h b/bson/buffer.h index 96b1d0f837..a78e34e4de 100644 --- a/bson/buffer.h +++ b/bson/buffer.h @@ -27,30 +27,25 @@ typedef int buffer_position; /* Allocate and return a new buffer. * Return NULL on allocation failure. */ -buffer_t buffer_new(void); +buffer_t pymongo_buffer_new(void); /* Free the memory allocated for `buffer`. * Return non-zero on failure. */ -int buffer_free(buffer_t buffer); +int pymongo_buffer_free(buffer_t buffer); /* Save `size` bytes from the current position in `buffer` (and grow if needed). * Return offset for writing, or -1 on allocation failure. */ -buffer_position buffer_save_space(buffer_t buffer, int size); +buffer_position pymongo_buffer_save_space(buffer_t buffer, int size); /* Write `size` bytes from `data` to `buffer` (and grow if needed). * Return non-zero on allocation failure. */ -int buffer_write(buffer_t buffer, const char* data, int size); - -/* Write `size` bytes from `data` to `buffer` at position `position`. - * Does not change the internal position of `buffer`. - * Return non-zero if buffer isn't large enough for write. */ -int buffer_write_at_position(buffer_t buffer, buffer_position position, const char* data, int size); +int pymongo_buffer_write(buffer_t buffer, const char* data, int size); /* Getters for the internals of a buffer_t. * Should try to avoid using these as much as possible * since they break the abstraction. */ -buffer_position buffer_get_position(buffer_t buffer); -char* buffer_get_buffer(buffer_t buffer); -void buffer_update_position(buffer_t buffer, buffer_position new_position); +buffer_position pymongo_buffer_get_position(buffer_t buffer); +char* pymongo_buffer_get_buffer(buffer_t buffer); +void pymongo_buffer_update_position(buffer_t buffer, buffer_position new_position); #endif diff --git a/bson/code.py b/bson/code.py index 3f6e504350..689cda4acd 100644 --- a/bson/code.py +++ b/bson/code.py @@ -12,18 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tools for representing JavaScript code in BSON. -""" +"""Tools for representing JavaScript code in BSON.""" +from __future__ import annotations -from bson.py3compat import abc, string_type, PY3, text_type +from collections.abc import Mapping as _Mapping +from typing import Any, Mapping, Optional, Type, Union class Code(str): """BSON's JavaScript code type. Raises :class:`TypeError` if `code` is not an instance of - :class:`basestring` (:class:`str` in python 3) or `scope` - is not ``None`` or an instance of :class:`dict`. + :class:`str` or `scope` is not ``None`` or an instance + of :class:`dict`. Scope variables can be set by passing a dictionary as the `scope` argument or by using keyword arguments. If a variable is set as a @@ -47,53 +48,54 @@ class Code(str): """ _type_marker = 13 + __scope: Union[Mapping[str, Any], None] - def __new__(cls, code, scope=None, **kwargs): - if not isinstance(code, string_type): - raise TypeError("code must be an " - "instance of %s" % (string_type.__name__)) + def __new__( + cls: Type[Code], + code: Union[str, Code], + scope: Optional[Mapping[str, Any]] = None, + **kwargs: Any, + ) -> Code: + if not isinstance(code, str): + raise TypeError("code must be an instance of str") - if not PY3 and isinstance(code, text_type): - self = str.__new__(cls, code.encode('utf8')) - else: - self = str.__new__(cls, code) + self = str.__new__(cls, code) try: - self.__scope = code.scope + self.__scope = code.scope # type: ignore except AttributeError: self.__scope = None if scope is not None: - if not isinstance(scope, abc.Mapping): + if not isinstance(scope, _Mapping): raise TypeError("scope must be an instance of dict") if self.__scope is not None: - self.__scope.update(scope) + self.__scope.update(scope) # type: ignore else: self.__scope = scope if kwargs: if self.__scope is not None: - self.__scope.update(kwargs) + self.__scope.update(kwargs) # type: ignore else: self.__scope = kwargs return self @property - def scope(self): - """Scope dictionary for this instance or ``None``. - """ + def scope(self) -> Optional[Mapping[str, Any]]: + """Scope dictionary for this instance or ``None``.""" return self.__scope - def __repr__(self): - return "Code(%s, %r)" % (str.__repr__(self), self.__scope) + def __repr__(self) -> str: + return f"Code({str.__repr__(self)}, {self.__scope!r})" - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, Code): return (self.__scope, str(self)) == (other.__scope, str(other)) return False - __hash__ = None + __hash__: Any = None - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other diff --git a/bson/codec_options.py b/bson/codec_options.py index 471d695a98..2c64c64600 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -13,29 +13,44 @@ # limitations under the License. """Tools for specifying BSON codec options.""" +from __future__ import annotations +import abc import datetime - -from abc import abstractmethod -from collections import namedtuple - -from bson.py3compat import ABC, abc, abstractproperty, string_type - -from bson.binary import (ALL_UUID_REPRESENTATIONS, - PYTHON_LEGACY, - UUID_REPRESENTATION_NAMES) - +import enum +from collections.abc import MutableMapping as _MutableMapping +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Generic, + Iterable, + Mapping, + NamedTuple, + Optional, + Tuple, + Type, + Union, + cast, +) + +from bson.binary import ( + ALL_UUID_REPRESENTATIONS, + UUID_REPRESENTATION_NAMES, + UuidRepresentation, +) +from bson.typings import _DocumentType _RAW_BSON_DOCUMENT_MARKER = 101 -def _raw_document_class(document_class): +def _raw_document_class(document_class: Any) -> bool: """Determine if a document_class is a RawBSONDocument class.""" - marker = getattr(document_class, '_type_marker', None) + marker = getattr(document_class, "_type_marker", None) return marker == _RAW_BSON_DOCUMENT_MARKER -class TypeEncoder(ABC): +class TypeEncoder(abc.ABC): """Base class for defining type codec classes which describe how a custom type can be transformed to one of the types BSON understands. @@ -44,18 +59,17 @@ class TypeEncoder(ABC): See :ref:`custom-type-type-codec` documentation for an example. """ - @abstractproperty - def python_type(self): + + @abc.abstractproperty + def python_type(self) -> Any: """The Python type to be converted into something serializable.""" - pass - @abstractmethod - def transform_python(self, value): + @abc.abstractmethod + def transform_python(self, value: Any) -> Any: """Convert the given Python object into something serializable.""" - pass -class TypeDecoder(ABC): +class TypeDecoder(abc.ABC): """Base class for defining type codec classes which describe how a BSON type can be transformed to a custom type. @@ -64,15 +78,14 @@ class TypeDecoder(ABC): See :ref:`custom-type-type-codec` documentation for an example. """ - @abstractproperty - def bson_type(self): + + @abc.abstractproperty + def bson_type(self) -> Any: """The BSON type to be converted into our own type.""" - pass - @abstractmethod - def transform_bson(self, value): + @abc.abstractmethod + def transform_bson(self, value: Any) -> Any: """Convert the given BSON value into our own type.""" - pass class TypeCodec(TypeEncoder, TypeDecoder): @@ -87,10 +100,13 @@ class TypeCodec(TypeEncoder, TypeDecoder): See :ref:`custom-type-type-codec` documentation for an example. """ - pass -class TypeRegistry(object): +_Codec = Union[TypeEncoder, TypeDecoder, TypeCodec] +_Fallback = Callable[[Any], Any] + + +class TypeRegistry: """Encapsulates type codecs used in encoding and / or decoding BSON, as well as the fallback encoder. Type registries cannot be modified after instantiation. @@ -116,16 +132,20 @@ class TypeRegistry(object): :mod:`bson` can encode. See :ref:`fallback-encoder-callable` documentation for an example. """ - def __init__(self, type_codecs=None, fallback_encoder=None): + + def __init__( + self, + type_codecs: Optional[Iterable[_Codec]] = None, + fallback_encoder: Optional[_Fallback] = None, + ) -> None: self.__type_codecs = list(type_codecs or []) self._fallback_encoder = fallback_encoder - self._encoder_map = {} - self._decoder_map = {} + self._encoder_map: dict[Any, Any] = {} + self._decoder_map: dict[Any, Any] = {} if self._fallback_encoder is not None: if not callable(fallback_encoder): - raise TypeError("fallback_encoder %r is not a callable" % ( - fallback_encoder)) + raise TypeError("fallback_encoder %r is not a callable" % (fallback_encoder)) for codec in self.__type_codecs: is_valid_codec = False @@ -138,197 +158,350 @@ def __init__(self, type_codecs=None, fallback_encoder=None): self._decoder_map[codec.bson_type] = codec.transform_bson if not is_valid_codec: raise TypeError( - "Expected an instance of %s, %s, or %s, got %r instead" % ( - TypeEncoder.__name__, TypeDecoder.__name__, - TypeCodec.__name__, codec)) + f"Expected an instance of {TypeEncoder.__name__}, {TypeDecoder.__name__}, or {TypeCodec.__name__}, got {codec!r} instead" + ) - def _validate_type_encoder(self, codec): + def _validate_type_encoder(self, codec: _Codec) -> None: from bson import _BUILT_IN_TYPES + for pytype in _BUILT_IN_TYPES: - if issubclass(codec.python_type, pytype): - err_msg = ("TypeEncoders cannot change how built-in types are " - "encoded (encoder %s transforms type %s)" % - (codec, pytype)) + if issubclass(cast(TypeCodec, codec).python_type, pytype): + err_msg = ( + "TypeEncoders cannot change how built-in types are " + f"encoded (encoder {codec} transforms type {pytype})" + ) raise TypeError(err_msg) - def __repr__(self): - return ('%s(type_codecs=%r, fallback_encoder=%r)' % ( - self.__class__.__name__, self.__type_codecs, - self._fallback_encoder)) + def __repr__(self) -> str: + return "{}(type_codecs={!r}, fallback_encoder={!r})".format( + self.__class__.__name__, + self.__type_codecs, + self._fallback_encoder, + ) - def __eq__(self, other): + def __eq__(self, other: Any) -> Any: if not isinstance(other, type(self)): return NotImplemented - return ((self._decoder_map == other._decoder_map) and - (self._encoder_map == other._encoder_map) and - (self._fallback_encoder == other._fallback_encoder)) - - -_options_base = namedtuple( - 'CodecOptions', - ('document_class', 'tz_aware', 'uuid_representation', - 'unicode_decode_error_handler', 'tzinfo', 'type_registry')) - - -class CodecOptions(_options_base): - """Encapsulates options used encoding and / or decoding BSON. - - The `document_class` option is used to define a custom type for use - decoding BSON documents. Access to the underlying raw BSON bytes for - a document is available using the :class:`~bson.raw_bson.RawBSONDocument` - type:: - - >>> from bson.raw_bson import RawBSONDocument - >>> from bson.codec_options import CodecOptions - >>> codec_options = CodecOptions(document_class=RawBSONDocument) - >>> coll = db.get_collection('test', codec_options=codec_options) - >>> doc = coll.find_one() - >>> doc.raw - '\\x16\\x00\\x00\\x00\\x07_id\\x00[0\\x165\\x91\\x10\\xea\\x14\\xe8\\xc5\\x8b\\x93\\x00' - - The document class can be any type that inherits from - :class:`~collections.MutableMapping`:: - - >>> class AttributeDict(dict): - ... # A dict that supports attribute access. - ... def __getattr__(self, key): - ... return self[key] - ... def __setattr__(self, key, value): - ... self[key] = value - ... - >>> codec_options = CodecOptions(document_class=AttributeDict) - >>> coll = db.get_collection('test', codec_options=codec_options) - >>> doc = coll.find_one() - >>> doc._id - ObjectId('5b3016359110ea14e8c58b93') - - See :doc:`/examples/datetimes` for examples using the `tz_aware` and - `tzinfo` options. - - See :class:`~bson.binary.UUIDLegacy` for examples using the - `uuid_representation` option. + return ( + (self._decoder_map == other._decoder_map) + and (self._encoder_map == other._encoder_map) + and (self._fallback_encoder == other._fallback_encoder) + ) - :Parameters: - - `document_class`: BSON documents returned in queries will be decoded - to an instance of this class. Must be a subclass of - :class:`~collections.MutableMapping`. Defaults to :class:`dict`. - - `tz_aware`: If ``True``, BSON datetimes will be decoded to timezone - aware instances of :class:`~datetime.datetime`. Otherwise they will be - naive. Defaults to ``False``. - - `uuid_representation`: The BSON representation to use when encoding - and decoding instances of :class:`~uuid.UUID`. Defaults to - :data:`~bson.binary.PYTHON_LEGACY`. - - `unicode_decode_error_handler`: The error handler to apply when - a Unicode-related error occurs during BSON decoding that would - otherwise raise :exc:`UnicodeDecodeError`. Valid options include - 'strict', 'replace', and 'ignore'. Defaults to 'strict'. - - `tzinfo`: A :class:`~datetime.tzinfo` subclass that specifies the - timezone to/from which :class:`~datetime.datetime` objects should be - encoded/decoded. - - `type_registry`: Instance of :class:`TypeRegistry` used to customize - encoding and decoding behavior. - - .. versionadded:: 3.8 - `type_registry` attribute. - - .. warning:: Care must be taken when changing - `unicode_decode_error_handler` from its default value ('strict'). - The 'replace' and 'ignore' modes should not be used when documents - retrieved from the server will be modified in the client application - and stored back to the server. + +class DatetimeConversion(int, enum.Enum): + """Options for decoding BSON datetimes.""" + + DATETIME = 1 + """Decode a BSON UTC datetime as a :class:`datetime.datetime`. + + BSON UTC datetimes that cannot be represented as a + :class:`~datetime.datetime` will raise an :class:`OverflowError` + or a :class:`ValueError`. + + .. versionadded 4.3 """ - def __new__(cls, document_class=dict, - tz_aware=False, uuid_representation=PYTHON_LEGACY, - unicode_decode_error_handler="strict", - tzinfo=None, type_registry=None): - if not (issubclass(document_class, abc.MutableMapping) or - _raw_document_class(document_class)): - raise TypeError("document_class must be dict, bson.son.SON, " - "bson.raw_bson.RawBSONDocument, or a " - "sublass of collections.MutableMapping") - if not isinstance(tz_aware, bool): - raise TypeError("tz_aware must be True or False") - if uuid_representation not in ALL_UUID_REPRESENTATIONS: - raise ValueError("uuid_representation must be a value " - "from bson.binary.ALL_UUID_REPRESENTATIONS") - if not isinstance(unicode_decode_error_handler, (string_type, None)): - raise ValueError("unicode_decode_error_handler must be a string " - "or None") - if tzinfo is not None: - if not isinstance(tzinfo, datetime.tzinfo): - raise TypeError( - "tzinfo must be an instance of datetime.tzinfo") - if not tz_aware: - raise ValueError( - "cannot specify tzinfo without also setting tz_aware=True") - - type_registry = type_registry or TypeRegistry() - - if not isinstance(type_registry, TypeRegistry): - raise TypeError("type_registry must be an instance of TypeRegistry") - - return tuple.__new__( - cls, (document_class, tz_aware, uuid_representation, - unicode_decode_error_handler, tzinfo, type_registry)) - - def _arguments_repr(self): - """Representation of the arguments used to create this object.""" - document_class_repr = ( - 'dict' if self.document_class is dict - else repr(self.document_class)) - - uuid_rep_repr = UUID_REPRESENTATION_NAMES.get(self.uuid_representation, - self.uuid_representation) - - return ('document_class=%s, tz_aware=%r, uuid_representation=%s, ' - 'unicode_decode_error_handler=%r, tzinfo=%r, ' - 'type_registry=%r' % - (document_class_repr, self.tz_aware, uuid_rep_repr, - self.unicode_decode_error_handler, self.tzinfo, - self.type_registry)) - - def __repr__(self): - return '%s(%s)' % (self.__class__.__name__, self._arguments_repr()) - - def with_options(self, **kwargs): - """Make a copy of this CodecOptions, overriding some options:: - - >>> from bson.codec_options import DEFAULT_CODEC_OPTIONS - >>> DEFAULT_CODEC_OPTIONS.tz_aware - False - >>> options = DEFAULT_CODEC_OPTIONS.with_options(tz_aware=True) - >>> options.tz_aware - True - - .. versionadded:: 3.5 - """ - return CodecOptions( - kwargs.get('document_class', self.document_class), - kwargs.get('tz_aware', self.tz_aware), - kwargs.get('uuid_representation', self.uuid_representation), - kwargs.get('unicode_decode_error_handler', - self.unicode_decode_error_handler), - kwargs.get('tzinfo', self.tzinfo), - kwargs.get('type_registry', self.type_registry) - ) + DATETIME_CLAMP = 2 + """Decode a BSON UTC datetime as a :class:`datetime.datetime`, clamping + to :attr:`~datetime.datetime.min` and :attr:`~datetime.datetime.max`. + + .. versionadded 4.3 + """ + + DATETIME_MS = 3 + """Decode a BSON UTC datetime as a :class:`~bson.datetime_ms.DatetimeMS` + object. + .. versionadded 4.3 + """ -DEFAULT_CODEC_OPTIONS = CodecOptions() + DATETIME_AUTO = 4 + """Decode a BSON UTC datetime as a :class:`datetime.datetime` if possible, + and a :class:`~bson.datetime_ms.DatetimeMS` if not. + .. versionadded 4.3 + """ -def _parse_codec_options(options): + +class _BaseCodecOptions(NamedTuple): + document_class: Type[Mapping[str, Any]] + tz_aware: bool + uuid_representation: int + unicode_decode_error_handler: str + tzinfo: Optional[datetime.tzinfo] + type_registry: TypeRegistry + datetime_conversion: Optional[DatetimeConversion] + + +if TYPE_CHECKING: + + class CodecOptions(Tuple[_DocumentType], Generic[_DocumentType]): + document_class: Type[_DocumentType] + tz_aware: bool + uuid_representation: int + unicode_decode_error_handler: Optional[str] + tzinfo: Optional[datetime.tzinfo] + type_registry: TypeRegistry + datetime_conversion: Optional[int] + + def __new__( + cls: Type[CodecOptions[_DocumentType]], + document_class: Optional[Type[_DocumentType]] = ..., + tz_aware: bool = ..., + uuid_representation: Optional[int] = ..., + unicode_decode_error_handler: Optional[str] = ..., + tzinfo: Optional[datetime.tzinfo] = ..., + type_registry: Optional[TypeRegistry] = ..., + datetime_conversion: Optional[int] = ..., + ) -> CodecOptions[_DocumentType]: + ... + + # CodecOptions API + def with_options(self, **kwargs: Any) -> CodecOptions[Any]: + ... + + def _arguments_repr(self) -> str: + ... + + def _options_dict(self) -> dict[Any, Any]: + ... + + # NamedTuple API + @classmethod + def _make(cls, obj: Iterable[Any]) -> CodecOptions[_DocumentType]: + ... + + def _asdict(self) -> dict[str, Any]: + ... + + def _replace(self, **kwargs: Any) -> CodecOptions[_DocumentType]: + ... + + _source: str + _fields: Tuple[str] + +else: + + class CodecOptions(_BaseCodecOptions): + """Encapsulates options used encoding and / or decoding BSON.""" + + def __init__(self, *args, **kwargs): + """Encapsulates options used encoding and / or decoding BSON. + + The `document_class` option is used to define a custom type for use + decoding BSON documents. Access to the underlying raw BSON bytes for + a document is available using the :class:`~bson.raw_bson.RawBSONDocument` + type:: + + >>> from bson.raw_bson import RawBSONDocument + >>> from bson.codec_options import CodecOptions + >>> codec_options = CodecOptions(document_class=RawBSONDocument) + >>> coll = db.get_collection('test', codec_options=codec_options) + >>> doc = coll.find_one() + >>> doc.raw + '\\x16\\x00\\x00\\x00\\x07_id\\x00[0\\x165\\x91\\x10\\xea\\x14\\xe8\\xc5\\x8b\\x93\\x00' + + The document class can be any type that inherits from + :class:`~collections.abc.MutableMapping`:: + + >>> class AttributeDict(dict): + ... # A dict that supports attribute access. + ... def __getattr__(self, key): + ... return self[key] + ... def __setattr__(self, key, value): + ... self[key] = value + ... + >>> codec_options = CodecOptions(document_class=AttributeDict) + >>> coll = db.get_collection('test', codec_options=codec_options) + >>> doc = coll.find_one() + >>> doc._id + ObjectId('5b3016359110ea14e8c58b93') + + See :doc:`/examples/datetimes` for examples using the `tz_aware` and + `tzinfo` options. + + See :doc:`/examples/uuid` for examples using the `uuid_representation` + option. + + :Parameters: + - `document_class`: BSON documents returned in queries will be decoded + to an instance of this class. Must be a subclass of + :class:`~collections.abc.MutableMapping`. Defaults to :class:`dict`. + - `tz_aware`: If ``True``, BSON datetimes will be decoded to timezone + aware instances of :class:`~datetime.datetime`. Otherwise they will be + naive. Defaults to ``False``. + - `uuid_representation`: The BSON representation to use when encoding + and decoding instances of :class:`~uuid.UUID`. Defaults to + :data:`~bson.binary.UuidRepresentation.UNSPECIFIED`. New + applications should consider setting this to + :data:`~bson.binary.UuidRepresentation.STANDARD` for cross language + compatibility. See :ref:`handling-uuid-data-example` for details. + - `unicode_decode_error_handler`: The error handler to apply when + a Unicode-related error occurs during BSON decoding that would + otherwise raise :exc:`UnicodeDecodeError`. Valid options include + 'strict', 'replace', 'backslashreplace', 'surrogateescape', and + 'ignore'. Defaults to 'strict'. + - `tzinfo`: A :class:`~datetime.tzinfo` subclass that specifies the + timezone to/from which :class:`~datetime.datetime` objects should be + encoded/decoded. + - `type_registry`: Instance of :class:`TypeRegistry` used to customize + encoding and decoding behavior. + - `datetime_conversion`: Specifies how UTC datetimes should be decoded + within BSON. Valid options include 'datetime_ms' to return as a + DatetimeMS, 'datetime' to return as a datetime.datetime and + raising a ValueError for out-of-range values, 'datetime_auto' to + return DatetimeMS objects when the underlying datetime is + out-of-range and 'datetime_clamp' to clamp to the minimum and + maximum possible datetimes. Defaults to 'datetime'. + + .. versionchanged:: 4.0 + The default for `uuid_representation` was changed from + :const:`~bson.binary.UuidRepresentation.PYTHON_LEGACY` to + :const:`~bson.binary.UuidRepresentation.UNSPECIFIED`. + + .. versionadded:: 3.8 + `type_registry` attribute. + + .. warning:: Care must be taken when changing + `unicode_decode_error_handler` from its default value ('strict'). + The 'replace' and 'ignore' modes should not be used when documents + retrieved from the server will be modified in the client application + and stored back to the server. + """ + super().__init__() + + def __new__( + cls: Type[CodecOptions], + document_class: Optional[Type[Mapping[str, Any]]] = None, + tz_aware: bool = False, + uuid_representation: Optional[int] = UuidRepresentation.UNSPECIFIED, + unicode_decode_error_handler: str = "strict", + tzinfo: Optional[datetime.tzinfo] = None, + type_registry: Optional[TypeRegistry] = None, + datetime_conversion: Optional[DatetimeConversion] = DatetimeConversion.DATETIME, + ) -> CodecOptions: + doc_class = document_class or dict + # issubclass can raise TypeError for generic aliases like SON[str, Any]. + # In that case we can use the base class for the comparison. + is_mapping = False + try: + is_mapping = issubclass(doc_class, _MutableMapping) + except TypeError: + if hasattr(doc_class, "__origin__"): + is_mapping = issubclass(doc_class.__origin__, _MutableMapping) + if not (is_mapping or _raw_document_class(doc_class)): + raise TypeError( + "document_class must be dict, bson.son.SON, " + "bson.raw_bson.RawBSONDocument, or a " + "subclass of collections.abc.MutableMapping" + ) + if not isinstance(tz_aware, bool): + raise TypeError(f"tz_aware must be True or False, was: tz_aware={tz_aware}") + if uuid_representation not in ALL_UUID_REPRESENTATIONS: + raise ValueError( + "uuid_representation must be a value from bson.binary.UuidRepresentation" + ) + if not isinstance(unicode_decode_error_handler, str): + raise ValueError("unicode_decode_error_handler must be a string") + if tzinfo is not None: + if not isinstance(tzinfo, datetime.tzinfo): + raise TypeError("tzinfo must be an instance of datetime.tzinfo") + if not tz_aware: + raise ValueError("cannot specify tzinfo without also setting tz_aware=True") + + type_registry = type_registry or TypeRegistry() + + if not isinstance(type_registry, TypeRegistry): + raise TypeError("type_registry must be an instance of TypeRegistry") + + return tuple.__new__( + cls, + ( + doc_class, + tz_aware, + uuid_representation, + unicode_decode_error_handler, + tzinfo, + type_registry, + datetime_conversion, + ), + ) + + def _arguments_repr(self) -> str: + """Representation of the arguments used to create this object.""" + document_class_repr = ( + "dict" if self.document_class is dict else repr(self.document_class) + ) + + uuid_rep_repr = UUID_REPRESENTATION_NAMES.get( + self.uuid_representation, self.uuid_representation + ) + + return ( + "document_class={}, tz_aware={!r}, uuid_representation={}, " + "unicode_decode_error_handler={!r}, tzinfo={!r}, " + "type_registry={!r}, datetime_conversion={!s}".format( + document_class_repr, + self.tz_aware, + uuid_rep_repr, + self.unicode_decode_error_handler, + self.tzinfo, + self.type_registry, + self.datetime_conversion, + ) + ) + + def _options_dict(self) -> dict[str, Any]: + """Dictionary of the arguments used to create this object.""" + # TODO: PYTHON-2442 use _asdict() instead + return { + "document_class": self.document_class, + "tz_aware": self.tz_aware, + "uuid_representation": self.uuid_representation, + "unicode_decode_error_handler": self.unicode_decode_error_handler, + "tzinfo": self.tzinfo, + "type_registry": self.type_registry, + "datetime_conversion": self.datetime_conversion, + } + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self._arguments_repr()})" + + def with_options(self, **kwargs: Any) -> CodecOptions: + """Make a copy of this CodecOptions, overriding some options:: + + >>> from bson.codec_options import DEFAULT_CODEC_OPTIONS + >>> DEFAULT_CODEC_OPTIONS.tz_aware + False + >>> options = DEFAULT_CODEC_OPTIONS.with_options(tz_aware=True) + >>> options.tz_aware + True + + .. versionadded:: 3.5 + """ + opts = self._options_dict() + opts.update(kwargs) + return CodecOptions(**opts) + + +DEFAULT_CODEC_OPTIONS: CodecOptions[dict[str, Any]] = CodecOptions() + + +def _parse_codec_options(options: Any) -> CodecOptions[Any]: """Parse BSON codec options.""" - return CodecOptions( - document_class=options.get( - 'document_class', DEFAULT_CODEC_OPTIONS.document_class), - tz_aware=options.get( - 'tz_aware', DEFAULT_CODEC_OPTIONS.tz_aware), - uuid_representation=options.get( - 'uuidrepresentation', DEFAULT_CODEC_OPTIONS.uuid_representation), - unicode_decode_error_handler=options.get( - 'unicode_decode_error_handler', - DEFAULT_CODEC_OPTIONS.unicode_decode_error_handler), - tzinfo=options.get('tzinfo', DEFAULT_CODEC_OPTIONS.tzinfo), - type_registry=options.get( - 'type_registry', DEFAULT_CODEC_OPTIONS.type_registry)) + kwargs = {} + for k in set(options) & { + "document_class", + "tz_aware", + "uuidrepresentation", + "unicode_decode_error_handler", + "tzinfo", + "type_registry", + "datetime_conversion", + }: + if k == "uuidrepresentation": + kwargs["uuid_representation"] = options[k] + else: + kwargs[k] = options[k] + return CodecOptions(**kwargs) diff --git a/bson/datetime_ms.py b/bson/datetime_ms.py new file mode 100644 index 0000000000..b6aebd05d0 --- /dev/null +++ b/bson/datetime_ms.py @@ -0,0 +1,173 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Tools for representing the BSON datetime type. + +.. versionadded:: 4.3 +""" +from __future__ import annotations + +import calendar +import datetime +import functools +from typing import Any, Union, cast + +from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions, DatetimeConversion +from bson.errors import InvalidBSON +from bson.tz_util import utc + +EPOCH_AWARE = datetime.datetime.fromtimestamp(0, utc) +EPOCH_NAIVE = EPOCH_AWARE.replace(tzinfo=None) +_DATETIME_ERROR_SUGGESTION = ( + "(Consider Using CodecOptions(datetime_conversion=DATETIME_AUTO)" + " or MongoClient(datetime_conversion='DATETIME_AUTO'))." + " See: https://pymongo.readthedocs.io/en/stable/examples/datetimes.html#handling-out-of-range-datetimes" +) + + +class DatetimeMS: + """Represents a BSON UTC datetime.""" + + __slots__ = ("_value",) + + def __init__(self, value: Union[int, datetime.datetime]): + """Represents a BSON UTC datetime. + + BSON UTC datetimes are defined as an int64 of milliseconds since the + Unix epoch. The principal use of DatetimeMS is to represent + datetimes outside the range of the Python builtin + :class:`~datetime.datetime` class when + encoding/decoding BSON. + + To decode UTC datetimes as a ``DatetimeMS``, `datetime_conversion` in + :class:`~bson.CodecOptions` must be set to 'datetime_ms' or + 'datetime_auto'. See :ref:`handling-out-of-range-datetimes` for + details. + + :Parameters: + - `value`: An instance of :class:`datetime.datetime` to be + represented as milliseconds since the Unix epoch, or int of + milliseconds since the Unix epoch. + """ + if isinstance(value, int): + if not (-(2**63) <= value <= 2**63 - 1): + raise OverflowError("Must be a 64-bit integer of milliseconds") + self._value = value + elif isinstance(value, datetime.datetime): + self._value = _datetime_to_millis(value) + else: + raise TypeError(f"{type(value)} is not a valid type for DatetimeMS") + + def __hash__(self) -> int: + return hash(self._value) + + def __repr__(self) -> str: + return type(self).__name__ + "(" + str(self._value) + ")" + + def __lt__(self, other: Union[DatetimeMS, int]) -> bool: + return self._value < other + + def __le__(self, other: Union[DatetimeMS, int]) -> bool: + return self._value <= other + + def __eq__(self, other: Any) -> bool: + if isinstance(other, DatetimeMS): + return self._value == other._value + return False + + def __ne__(self, other: Any) -> bool: + if isinstance(other, DatetimeMS): + return self._value != other._value + return True + + def __gt__(self, other: Union[DatetimeMS, int]) -> bool: + return self._value > other + + def __ge__(self, other: Union[DatetimeMS, int]) -> bool: + return self._value >= other + + _type_marker = 9 + + def as_datetime( + self, codec_options: CodecOptions[Any] = DEFAULT_CODEC_OPTIONS + ) -> datetime.datetime: + """Create a Python :class:`~datetime.datetime` from this DatetimeMS object. + + :Parameters: + - `codec_options`: A CodecOptions instance for specifying how the + resulting DatetimeMS object will be formatted using ``tz_aware`` + and ``tz_info``. Defaults to + :const:`~bson.codec_options.DEFAULT_CODEC_OPTIONS`. + """ + return cast(datetime.datetime, _millis_to_datetime(self._value, codec_options)) + + def __int__(self) -> int: + return self._value + + +# Inclusive and exclusive min and max for timezones. +# Timezones are hashed by their offset, which is a timedelta +# and therefore there are more than 24 possible timezones. +@functools.lru_cache(maxsize=None) +def _min_datetime_ms(tz: datetime.timezone = datetime.timezone.utc) -> int: + return _datetime_to_millis(datetime.datetime.min.replace(tzinfo=tz)) + + +@functools.lru_cache(maxsize=None) +def _max_datetime_ms(tz: datetime.timezone = datetime.timezone.utc) -> int: + return _datetime_to_millis(datetime.datetime.max.replace(tzinfo=tz)) + + +def _millis_to_datetime( + millis: int, opts: CodecOptions[Any] +) -> Union[datetime.datetime, DatetimeMS]: + """Convert milliseconds since epoch UTC to datetime.""" + if ( + opts.datetime_conversion == DatetimeConversion.DATETIME + or opts.datetime_conversion == DatetimeConversion.DATETIME_CLAMP + or opts.datetime_conversion == DatetimeConversion.DATETIME_AUTO + ): + tz = opts.tzinfo or datetime.timezone.utc + if opts.datetime_conversion == DatetimeConversion.DATETIME_CLAMP: + millis = max(_min_datetime_ms(tz), min(millis, _max_datetime_ms(tz))) + elif opts.datetime_conversion == DatetimeConversion.DATETIME_AUTO: + if not (_min_datetime_ms(tz) <= millis <= _max_datetime_ms(tz)): + return DatetimeMS(millis) + + diff = ((millis % 1000) + 1000) % 1000 + seconds = (millis - diff) // 1000 + micros = diff * 1000 + + try: + if opts.tz_aware: + dt = EPOCH_AWARE + datetime.timedelta(seconds=seconds, microseconds=micros) + if opts.tzinfo: + dt = dt.astimezone(tz) + return dt + else: + return EPOCH_NAIVE + datetime.timedelta(seconds=seconds, microseconds=micros) + except ArithmeticError as err: + raise InvalidBSON(f"{err} {_DATETIME_ERROR_SUGGESTION}") from err + + elif opts.datetime_conversion == DatetimeConversion.DATETIME_MS: + return DatetimeMS(millis) + else: + raise ValueError("datetime_conversion must be an element of DatetimeConversion") + + +def _datetime_to_millis(dtm: datetime.datetime) -> int: + """Convert datetime to milliseconds since epoch UTC.""" + if dtm.utcoffset() is not None: + dtm = dtm - dtm.utcoffset() # type: ignore + return int(calendar.timegm(dtm.timetuple()) * 1000 + dtm.microsecond // 1000) diff --git a/bson/dbref.py b/bson/dbref.py index 3ec5463492..50fcf6c02f 100644 --- a/bson/dbref.py +++ b/bson/dbref.py @@ -13,28 +13,39 @@ # limitations under the License. """Tools for manipulating DBRefs (references to MongoDB documents).""" +from __future__ import annotations from copy import deepcopy +from typing import Any, Mapping, Optional -from bson.py3compat import iteritems, string_type +from bson._helpers import _getstate_slots, _setstate_slots from bson.son import SON -class DBRef(object): - """A reference to a document stored in MongoDB. - """ +class DBRef: + """A reference to a document stored in MongoDB.""" + __slots__ = "__collection", "__id", "__database", "__kwargs" + __getstate__ = _getstate_slots + __setstate__ = _setstate_slots # DBRef isn't actually a BSON "type" so this number was arbitrarily chosen. _type_marker = 100 - def __init__(self, collection, id, database=None, _extra={}, **kwargs): + def __init__( + self, + collection: str, + id: Any, + database: Optional[str] = None, + _extra: Optional[Mapping[str, Any]] = None, + **kwargs: Any, + ) -> None: """Initialize a new :class:`DBRef`. Raises :class:`TypeError` if `collection` or `database` is not - an instance of :class:`basestring` (:class:`str` in python 3). - `database` is optional and allows references to documents to work - across databases. Any additional keyword arguments will create - additional fields in the resultant embedded document. + an instance of :class:`str`. `database` is optional and allows + references to documents to work across databases. Any additional + keyword arguments will create additional fields in the resultant + embedded document. :Parameters: - `collection`: name of the collection the document is stored in @@ -43,93 +54,81 @@ def __init__(self, collection, id, database=None, _extra={}, **kwargs): - `**kwargs` (optional): additional keyword arguments will create additional, custom fields - .. mongodoc:: dbrefs + .. seealso:: The MongoDB documentation on `dbrefs `_. """ - if not isinstance(collection, string_type): - raise TypeError("collection must be an " - "instance of %s" % string_type.__name__) - if database is not None and not isinstance(database, string_type): - raise TypeError("database must be an " - "instance of %s" % string_type.__name__) + if not isinstance(collection, str): + raise TypeError("collection must be an instance of str") + if database is not None and not isinstance(database, str): + raise TypeError("database must be an instance of str") self.__collection = collection self.__id = id self.__database = database - kwargs.update(_extra) + kwargs.update(_extra or {}) self.__kwargs = kwargs @property - def collection(self): - """Get the name of this DBRef's collection as unicode. - """ + def collection(self) -> str: + """Get the name of this DBRef's collection.""" return self.__collection @property - def id(self): - """Get this DBRef's _id. - """ + def id(self) -> Any: + """Get this DBRef's _id.""" return self.__id @property - def database(self): + def database(self) -> Optional[str]: """Get the name of this DBRef's database. Returns None if this DBRef doesn't specify a database. """ return self.__database - def __getattr__(self, key): + def __getattr__(self, key: Any) -> Any: try: return self.__kwargs[key] except KeyError: - raise AttributeError(key) - - # Have to provide __setstate__ to avoid - # infinite recursion since we override - # __getattr__. - def __setstate__(self, state): - self.__dict__.update(state) + raise AttributeError(key) from None - def as_doc(self): + def as_doc(self) -> SON[str, Any]: """Get the SON document representation of this DBRef. Generally not needed by application developers """ - doc = SON([("$ref", self.collection), - ("$id", self.id)]) + doc = SON([("$ref", self.collection), ("$id", self.id)]) if self.database is not None: doc["$db"] = self.database doc.update(self.__kwargs) return doc - def __repr__(self): - extra = "".join([", %s=%r" % (k, v) - for k, v in iteritems(self.__kwargs)]) + def __repr__(self) -> str: + extra = "".join([f", {k}={v!r}" for k, v in self.__kwargs.items()]) if self.database is None: - return "DBRef(%r, %r%s)" % (self.collection, self.id, extra) - return "DBRef(%r, %r, %r%s)" % (self.collection, self.id, - self.database, extra) + return f"DBRef({self.collection!r}, {self.id!r}{extra})" + return f"DBRef({self.collection!r}, {self.id!r}, {self.database!r}{extra})" - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, DBRef): - us = (self.__database, self.__collection, - self.__id, self.__kwargs) - them = (other.__database, other.__collection, - other.__id, other.__kwargs) + us = (self.__database, self.__collection, self.__id, self.__kwargs) + them = (other.__database, other.__collection, other.__id, other.__kwargs) return us == them return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other - def __hash__(self): + def __hash__(self) -> int: """Get a hash value for this :class:`DBRef`.""" - return hash((self.__collection, self.__id, self.__database, - tuple(sorted(self.__kwargs.items())))) + return hash( + (self.__collection, self.__id, self.__database, tuple(sorted(self.__kwargs.items()))) + ) - def __deepcopy__(self, memo): + def __deepcopy__(self, memo: Any) -> DBRef: """Support function for `copy.deepcopy()`.""" - return DBRef(deepcopy(self.__collection, memo), - deepcopy(self.__id, memo), - deepcopy(self.__database, memo), - deepcopy(self.__kwargs, memo)) + return DBRef( + deepcopy(self.__collection, memo), + deepcopy(self.__id, memo), + deepcopy(self.__database, memo), + deepcopy(self.__kwargs, memo), + ) diff --git a/bson/decimal128.py b/bson/decimal128.py index 0c0fc10c67..f807452a6c 100644 --- a/bson/decimal128.py +++ b/bson/decimal128.py @@ -15,26 +15,12 @@ """Tools for working with the BSON decimal128 type. .. versionadded:: 3.4 - -.. note:: The Decimal128 BSON type requires MongoDB 3.4+. """ +from __future__ import annotations import decimal import struct -import sys - -from bson.py3compat import (PY3 as _PY3, - string_type as _string_type) - - -if _PY3: - _from_bytes = int.from_bytes # pylint: disable=no-member, invalid-name -else: - import binascii - def _from_bytes(value, dummy, _int=int, _hexlify=binascii.hexlify): - "An implementation of int.from_bytes for python 2.x." - return _int(_hexlify(value), 16) - +from typing import Any, Sequence, Tuple, Type, Union _PACK_64 = struct.Struct("= 3.3, cdecimal - decimal.Context(clamp=1) # pylint: disable=unexpected-keyword-arg - _CTX_OPTIONS['clamp'] = 1 -except TypeError: - # Python < 3.3 - _CTX_OPTIONS['_clamp'] = 1 - -_DEC128_CTX = decimal.Context(**_CTX_OPTIONS.copy()) +_DEC128_CTX = decimal.Context(**_CTX_OPTIONS.copy()) # type: ignore +_VALUE_OPTIONS = Union[decimal.Decimal, float, str, Tuple[int, Sequence[int], int]] -def create_decimal128_context(): +def create_decimal128_context() -> decimal.Context: """Returns an instance of :class:`decimal.Context` appropriate for working with IEEE-754 128-bit decimal floating point values. """ opts = _CTX_OPTIONS.copy() - opts['traps'] = [] - return decimal.Context(**opts) + opts["traps"] = [] + return decimal.Context(**opts) # type: ignore -def _decimal_to_128(value): +def _decimal_to_128(value: _VALUE_OPTIONS) -> Tuple[int, int]: """Converts a decimal.Decimal to BID (high bits, low bits). :Parameters: @@ -123,12 +101,12 @@ def _decimal_to_128(value): if significand & (1 << i): high |= 1 << (i - 64) - biased_exponent = exponent + _EXPONENT_BIAS + biased_exponent = exponent + _EXPONENT_BIAS # type: ignore[operator] if high >> 49 == 1: - high = high & 0x7fffffffffff + high = high & 0x7FFFFFFFFFFF high |= _EXPONENT_MASK - high |= (biased_exponent & 0x3fff) << 47 + high |= (biased_exponent & 0x3FFF) << 47 else: high |= biased_exponent << 49 @@ -138,7 +116,7 @@ def _decimal_to_128(value): return high, low -class Decimal128(object): +class Decimal128: """BSON Decimal128 type:: >>> Decimal128(Decimal("0.0005")) @@ -232,23 +210,26 @@ class Decimal128(object): >>> Decimal('NaN') == Decimal('NaN') False """ - __slots__ = ('__high', '__low') + + __slots__ = ("__high", "__low") _type_marker = 19 - def __init__(self, value): - if isinstance(value, (_string_type, decimal.Decimal)): + def __init__(self, value: _VALUE_OPTIONS) -> None: + if isinstance(value, (str, decimal.Decimal)): self.__high, self.__low = _decimal_to_128(value) elif isinstance(value, (list, tuple)): if len(value) != 2: - raise ValueError('Invalid size for creation of Decimal128 ' - 'from list or tuple. Must have exactly 2 ' - 'elements.') - self.__high, self.__low = value + raise ValueError( + "Invalid size for creation of Decimal128 " + "from list or tuple. Must have exactly 2 " + "elements." + ) + self.__high, self.__low = value # type: ignore else: - raise TypeError("Cannot convert %r to Decimal128" % (value,)) + raise TypeError(f"Cannot convert {value!r} to Decimal128") - def to_decimal(self): + def to_decimal(self) -> decimal.Decimal: """Returns an instance of :class:`decimal.Decimal` for this :class:`Decimal128`. """ @@ -257,25 +238,25 @@ def to_decimal(self): sign = 1 if (high & _SIGN) else 0 if (high & _SNAN) == _SNAN: - return decimal.Decimal((sign, (), 'N')) + return decimal.Decimal((sign, (), "N")) # type: ignore elif (high & _NAN) == _NAN: - return decimal.Decimal((sign, (), 'n')) + return decimal.Decimal((sign, (), "n")) # type: ignore elif (high & _INF) == _INF: - return decimal.Decimal((sign, (), 'F')) + return decimal.Decimal((sign, (), "F")) # type: ignore if (high & _EXPONENT_MASK) == _EXPONENT_MASK: - exponent = ((high & 0x1fffe00000000000) >> 47) - _EXPONENT_BIAS + exponent = ((high & 0x1FFFE00000000000) >> 47) - _EXPONENT_BIAS return decimal.Decimal((sign, (0,), exponent)) else: - exponent = ((high & 0x7fff800000000000) >> 49) - _EXPONENT_BIAS + exponent = ((high & 0x7FFF800000000000) >> 49) - _EXPONENT_BIAS arr = bytearray(15) - mask = 0x00000000000000ff + mask = 0x00000000000000FF for i in range(14, 6, -1): arr[i] = (low & mask) >> ((14 - i) << 3) mask = mask << 8 - mask = 0x00000000000000ff + mask = 0x00000000000000FF for i in range(6, 0, -1): arr[i] = (high & mask) >> ((6 - i) << 3) mask = mask << 8 @@ -284,14 +265,13 @@ def to_decimal(self): arr[0] = (high & mask) >> 48 # cdecimal only accepts a tuple for digits. - digits = tuple( - int(digit) for digit in str(_from_bytes(arr, 'big'))) + digits = tuple(int(digit) for digit in str(int.from_bytes(arr, "big"))) with decimal.localcontext(_DEC128_CTX) as ctx: return ctx.create_decimal((sign, digits, exponent)) @classmethod - def from_bid(cls, value): + def from_bid(cls: Type[Decimal128], value: bytes) -> Decimal128: """Create an instance of :class:`Decimal128` from Binary Integer Decimal string. @@ -303,33 +283,33 @@ def from_bid(cls, value): raise TypeError("value must be an instance of bytes") if len(value) != 16: raise ValueError("value must be exactly 16 bytes") - return cls((_UNPACK_64(value[8:])[0], _UNPACK_64(value[:8])[0])) + return cls((_UNPACK_64(value[8:])[0], _UNPACK_64(value[:8])[0])) # type: ignore @property - def bid(self): + def bid(self) -> bytes: """The Binary Integer Decimal (BID) encoding of this instance.""" return _PACK_64(self.__low) + _PACK_64(self.__high) - def __str__(self): + def __str__(self) -> str: dec = self.to_decimal() if dec.is_nan(): # Required by the drivers spec to match MongoDB behavior. return "NaN" return str(dec) - def __repr__(self): - return "Decimal128('%s')" % (str(self),) + def __repr__(self) -> str: + return f"Decimal128('{self!s}')" - def __setstate__(self, value): + def __setstate__(self, value: Tuple[int, int]) -> None: self.__high, self.__low = value - def __getstate__(self): + def __getstate__(self) -> Tuple[int, int]: return self.__high, self.__low - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, Decimal128): return self.bid == other.bid return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other diff --git a/bson/encoding_helpers.c b/bson/encoding_helpers.c deleted file mode 100644 index ea96810878..0000000000 --- a/bson/encoding_helpers.c +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright 2009-2015 MongoDB, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "encoding_helpers.h" - -/* - * Portions Copyright 2001 Unicode, Inc. - * - * Disclaimer - * - * This source code is provided as is by Unicode, Inc. No claims are - * made as to fitness for any particular purpose. No warranties of any - * kind are expressed or implied. The recipient agrees to determine - * applicability of information provided. If this file has been - * purchased on magnetic or optical media from Unicode, Inc., the - * sole remedy for any claim will be exchange of defective media - * within 90 days of receipt. - * - * Limitations on Rights to Redistribute This Code - * - * Unicode, Inc. hereby grants the right to freely use the information - * supplied in this file in the creation of products supporting the - * Unicode Standard, and to make copies of this file in any form - * for internal or external distribution as long as this notice - * remains attached. - */ - -/* - * Index into the table below with the first byte of a UTF-8 sequence to - * get the number of trailing bytes that are supposed to follow it. - */ -static const char trailingBytesForUTF8[256] = { - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3,4,4,4,4,5,5,5,5 -}; - -/* --------------------------------------------------------------------- */ - -/* - * Utility routine to tell whether a sequence of bytes is legal UTF-8. - * This must be called with the length pre-determined by the first byte. - * The length can be set by: - * length = trailingBytesForUTF8[*source]+1; - * and the sequence is illegal right away if there aren't that many bytes - * available. - * If presented with a length > 4, this returns 0. The Unicode - * definition of UTF-8 goes up to 4-byte sequences. - */ -static unsigned char isLegalUTF8(const unsigned char* source, int length) { - unsigned char a; - const unsigned char* srcptr = source + length; - switch (length) { - default: return 0; - /* Everything else falls through when "true"... */ - case 4: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return 0; - case 3: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return 0; - case 2: if ((a = (*--srcptr)) > 0xBF) return 0; - switch (*source) { - /* no fall-through in this inner switch */ - case 0xE0: if (a < 0xA0) return 0; break; - case 0xF0: if (a < 0x90) return 0; break; - case 0xF4: if ((a > 0x8F) || (a < 0x80)) return 0; break; - default: if (a < 0x80) return 0; - } - case 1: if (*source >= 0x80 && *source < 0xC2) return 0; - if (*source > 0xF4) return 0; - } - return 1; -} - -result_t check_string(const unsigned char* string, const int length, - const char check_utf8, const char check_null) { - int position = 0; - /* By default we go character by character. Will be different for checking - * UTF-8 */ - int sequence_length = 1; - - if (!check_utf8 && !check_null) { - return VALID; - } - - while (position < length) { - if (check_null && *(string + position) == 0) { - return HAS_NULL; - } - if (check_utf8) { - sequence_length = trailingBytesForUTF8[*(string + position)] + 1; - if ((position + sequence_length) > length) { - return NOT_UTF_8; - } - if (!isLegalUTF8(string + position, sequence_length)) { - return NOT_UTF_8; - } - } - position += sequence_length; - } - - return VALID; -} diff --git a/bson/encoding_helpers.h b/bson/encoding_helpers.h deleted file mode 100644 index b1a90fa510..0000000000 --- a/bson/encoding_helpers.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2009-2015 MongoDB, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ENCODING_HELPERS_H -#define ENCODING_HELPERS_H - -typedef enum { - VALID, - NOT_UTF_8, - HAS_NULL -} result_t; - -result_t check_string(const unsigned char* string, const int length, - const char check_utf8, const char check_null); - -#endif diff --git a/bson/errors.py b/bson/errors.py index 9bdb741371..a3699e704c 100644 --- a/bson/errors.py +++ b/bson/errors.py @@ -13,28 +13,24 @@ # limitations under the License. """Exceptions raised by the BSON package.""" +from __future__ import annotations class BSONError(Exception): - """Base class for all BSON exceptions. - """ + """Base class for all BSON exceptions.""" class InvalidBSON(BSONError): - """Raised when trying to create a BSON object from invalid data. - """ + """Raised when trying to create a BSON object from invalid data.""" class InvalidStringData(BSONError): - """Raised when trying to encode a string containing non-UTF8 data. - """ + """Raised when trying to encode a string containing non-UTF8 data.""" class InvalidDocument(BSONError): - """Raised when trying to create a BSON object from an invalid document. - """ + """Raised when trying to create a BSON object from an invalid document.""" class InvalidId(BSONError): - """Raised when trying to create an ObjectId from invalid data. - """ + """Raised when trying to create an ObjectId from invalid data.""" diff --git a/bson/int64.py b/bson/int64.py index 77e9812304..c0676839ab 100644 --- a/bson/int64.py +++ b/bson/int64.py @@ -13,14 +13,12 @@ # limitations under the License. """A BSON wrapper for long (int in python3)""" +from __future__ import annotations -from bson.py3compat import PY3 +from typing import Any -if PY3: - long = int - -class Int64(long): +class Int64(int): """Representation of the BSON int64 type. This is necessary because every integral number is an :class:`int` in @@ -31,4 +29,12 @@ class Int64(long): - `value`: the numeric value to represent """ + __slots__ = () + _type_marker = 18 + + def __getstate__(self) -> Any: + return {} + + def __setstate__(self, state: Any) -> None: + pass diff --git a/bson/json_util.py b/bson/json_util.py index 35bdc3070d..1a74a81368 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -17,9 +17,9 @@ This module provides two helper methods `dumps` and `loads` that wrap the native :mod:`json` methods and provide explicit BSON conversion to and from JSON. :class:`~bson.json_util.JSONOptions` provides a way to control how JSON -is emitted and parsed, with the default being the legacy PyMongo format. -:mod:`~bson.json_util` can also generate Canonical or Relaxed `Extended JSON`_ -when :const:`CANONICAL_JSON_OPTIONS` or :const:`RELAXED_JSON_OPTIONS` is +is emitted and parsed, with the default being the Relaxed Extended JSON format. +:mod:`~bson.json_util` can also generate Canonical or legacy `Extended JSON`_ +when :const:`CANONICAL_JSON_OPTIONS` or :const:`LEGACY_JSON_OPTIONS` is provided, respectively. .. _Extended JSON: https://github.com/mongodb/specifications/blob/master/source/extended-json.rst @@ -29,20 +29,26 @@ .. doctest:: >>> from bson.json_util import loads - >>> loads('[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$scope": {}, "$code": "function x() { return 1; }"}}, {"bin": {"$type": "80", "$binary": "AQIDBA=="}}]') - [{u'foo': [1, 2]}, {u'bar': {u'hello': u'world'}}, {u'code': Code('function x() { return 1; }', {})}, {u'bin': Binary('...', 128)}] + >>> loads( + ... '[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$scope": {}, "$code": "function x() { return 1; }"}}, {"bin": {"$type": "80", "$binary": "AQIDBA=="}}]' + ... ) + [{'foo': [1, 2]}, {'bar': {'hello': 'world'}}, {'code': Code('function x() { return 1; }', {})}, {'bin': Binary(b'...', 128)}] -Example usage (serialization): +Example usage with :const:`RELAXED_JSON_OPTIONS` (the default): .. doctest:: >>> from bson import Binary, Code >>> from bson.json_util import dumps - >>> dumps([{'foo': [1, 2]}, - ... {'bar': {'hello': 'world'}}, - ... {'code': Code("function x() { return 1; }", {})}, - ... {'bin': Binary(b"\x01\x02\x03\x04")}]) - '[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$code": "function x() { return 1; }", "$scope": {}}}, {"bin": {"$binary": "AQIDBA==", "$type": "00"}}]' + >>> dumps( + ... [ + ... {"foo": [1, 2]}, + ... {"bar": {"hello": "world"}}, + ... {"code": Code("function x() { return 1; }")}, + ... {"bin": Binary(b"\x01\x02\x03\x04")}, + ... ] + ... ) + '[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$code": "function x() { return 1; }"}}, {"bin": {"$binary": {"base64": "AQIDBA==", "subType": "00"}}}]' Example usage (with :const:`CANONICAL_JSON_OPTIONS`): @@ -50,25 +56,33 @@ >>> from bson import Binary, Code >>> from bson.json_util import dumps, CANONICAL_JSON_OPTIONS - >>> dumps([{'foo': [1, 2]}, - ... {'bar': {'hello': 'world'}}, - ... {'code': Code("function x() { return 1; }")}, - ... {'bin': Binary(b"\x01\x02\x03\x04")}], - ... json_options=CANONICAL_JSON_OPTIONS) + >>> dumps( + ... [ + ... {"foo": [1, 2]}, + ... {"bar": {"hello": "world"}}, + ... {"code": Code("function x() { return 1; }")}, + ... {"bin": Binary(b"\x01\x02\x03\x04")}, + ... ], + ... json_options=CANONICAL_JSON_OPTIONS, + ... ) '[{"foo": [{"$numberInt": "1"}, {"$numberInt": "2"}]}, {"bar": {"hello": "world"}}, {"code": {"$code": "function x() { return 1; }"}}, {"bin": {"$binary": {"base64": "AQIDBA==", "subType": "00"}}}]' -Example usage (with :const:`RELAXED_JSON_OPTIONS`): +Example usage (with :const:`LEGACY_JSON_OPTIONS`): .. doctest:: >>> from bson import Binary, Code - >>> from bson.json_util import dumps, RELAXED_JSON_OPTIONS - >>> dumps([{'foo': [1, 2]}, - ... {'bar': {'hello': 'world'}}, - ... {'code': Code("function x() { return 1; }")}, - ... {'bin': Binary(b"\x01\x02\x03\x04")}], - ... json_options=RELAXED_JSON_OPTIONS) - '[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$code": "function x() { return 1; }"}}, {"bin": {"$binary": {"base64": "AQIDBA==", "subType": "00"}}}]' + >>> from bson.json_util import dumps, LEGACY_JSON_OPTIONS + >>> dumps( + ... [ + ... {"foo": [1, 2]}, + ... {"bar": {"hello": "world"}}, + ... {"code": Code("function x() { return 1; }", {})}, + ... {"bin": Binary(b"\x01\x02\x03\x04")}, + ... ], + ... json_options=LEGACY_JSON_OPTIONS, + ... ) + '[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$code": "function x() { return 1; }", "$scope": {}}}, {"bin": {"$binary": "AQIDBA==", "$type": "00"}}]' Alternatively, you can manually pass the `default` to :func:`json.dumps`. It won't handle :class:`~bson.binary.Binary` and :class:`~bson.code.Code` @@ -84,56 +98,49 @@ Extended JSON converter for Python built on top of `libbson `_. `python-bsonjs` works best with PyMongo when using :class:`~bson.raw_bson.RawBSONDocument`. - -.. versionchanged:: 2.8 - The output format for :class:`~bson.timestamp.Timestamp` has changed from - '{"t": , "i": }' to '{"$timestamp": {"t": , "i": }}'. - This new format will be decoded to an instance of - :class:`~bson.timestamp.Timestamp`. The old format will continue to be - decoded to a python dict as before. Encoding to the old format is no longer - supported as it was never correct and loses type information. - Added support for $numberLong and $undefined - new in MongoDB 2.6 - and - parsing $date in ISO-8601 format. - -.. versionchanged:: 2.7 - Preserves order when rendering SON, Timestamp, Code, Binary, and DBRef - instances. - -.. versionchanged:: 2.3 - Added dumps and loads helpers to automatically handle conversion to and - from json and supports :class:`~bson.binary.Binary` and - :class:`~bson.code.Code` """ +from __future__ import annotations import base64 import datetime import json import math import re -import sys import uuid - -from pymongo.errors import ConfigurationError - -import bson -from bson import EPOCH_AWARE, EPOCH_NAIVE, RE_TYPE, SON -from bson.binary import (Binary, JAVA_LEGACY, CSHARP_LEGACY, OLD_UUID_SUBTYPE, - UUID_SUBTYPE) +from typing import ( + TYPE_CHECKING, + Any, + Mapping, + MutableMapping, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) + +from bson.binary import ALL_UUID_SUBTYPES, UUID_SUBTYPE, Binary, UuidRepresentation from bson.code import Code -from bson.codec_options import CodecOptions +from bson.codec_options import CodecOptions, DatetimeConversion +from bson.datetime_ms import ( + EPOCH_AWARE, + DatetimeMS, + _datetime_to_millis, + _max_datetime_ms, + _millis_to_datetime, +) from bson.dbref import DBRef from bson.decimal128 import Decimal128 from bson.int64 import Int64 from bson.max_key import MaxKey from bson.min_key import MinKey from bson.objectid import ObjectId -from bson.py3compat import (PY3, iteritems, integer_types, string_type, - text_type) from bson.regex import Regex +from bson.son import RE_TYPE, SON from bson.timestamp import Timestamp from bson.tz_util import utc - _RE_OPT_TABLE = { "i": re.I, "l": re.L, @@ -143,9 +150,6 @@ "x": re.X, } -# Dollar-prefixed keys which may appear in DBRefs. -_DBREF_KEYS = frozenset(['$id', '$ref', '$db']) - class DatetimeRepresentation: LEGACY = 0 @@ -226,94 +230,190 @@ class JSONMode: """ -class JSONOptions(CodecOptions): - """Encapsulates JSON options for :func:`dumps` and :func:`loads`. - - :Parameters: - - `strict_number_long`: If ``True``, :class:`~bson.int64.Int64` objects - are encoded to MongoDB Extended JSON's *Strict mode* type - `NumberLong`, ie ``'{"$numberLong": "" }'``. Otherwise they - will be encoded as an `int`. Defaults to ``False``. - - `datetime_representation`: The representation to use when encoding - instances of :class:`datetime.datetime`. Defaults to - :const:`~DatetimeRepresentation.LEGACY`. - - `strict_uuid`: If ``True``, :class:`uuid.UUID` object are encoded to - MongoDB Extended JSON's *Strict mode* type `Binary`. Otherwise it - will be encoded as ``'{"$uuid": "" }'``. Defaults to ``False``. - - `json_mode`: The :class:`JSONMode` to use when encoding BSON types to - Extended JSON. Defaults to :const:`~JSONMode.LEGACY`. - - `document_class`: BSON documents returned by :func:`loads` will be - decoded to an instance of this class. Must be a subclass of - :class:`collections.MutableMapping`. Defaults to :class:`dict`. - - `uuid_representation`: The BSON representation to use when encoding - and decoding instances of :class:`uuid.UUID`. Defaults to - :const:`~bson.binary.PYTHON_LEGACY`. - - `tz_aware`: If ``True``, MongoDB Extended JSON's *Strict mode* type - `Date` will be decoded to timezone aware instances of - :class:`datetime.datetime`. Otherwise they will be naive. Defaults - to ``True``. - - `tzinfo`: A :class:`datetime.tzinfo` subclass that specifies the - timezone from which :class:`~datetime.datetime` objects should be - decoded. Defaults to :const:`~bson.tz_util.utc`. - - `args`: arguments to :class:`~bson.codec_options.CodecOptions` - - `kwargs`: arguments to :class:`~bson.codec_options.CodecOptions` - - .. seealso:: The specification for Relaxed and Canonical `Extended JSON`_. - - .. versionadded:: 3.4 - - .. versionchanged:: 3.5 - Accepts the optional parameter `json_mode`. - - """ - - def __new__(cls, strict_number_long=False, - datetime_representation=DatetimeRepresentation.LEGACY, - strict_uuid=False, json_mode=JSONMode.LEGACY, - *args, **kwargs): - kwargs["tz_aware"] = kwargs.get("tz_aware", True) +if TYPE_CHECKING: + _BASE_CLASS = CodecOptions[MutableMapping[str, Any]] +else: + _BASE_CLASS = CodecOptions + + +class JSONOptions(_BASE_CLASS): + json_mode: int + strict_number_long: bool + datetime_representation: int + strict_uuid: bool + document_class: Type[MutableMapping[str, Any]] + + def __init__(self, *args: Any, **kwargs: Any): + """Encapsulates JSON options for :func:`dumps` and :func:`loads`. + + :Parameters: + - `strict_number_long`: If ``True``, :class:`~bson.int64.Int64` objects + are encoded to MongoDB Extended JSON's *Strict mode* type + `NumberLong`, ie ``'{"$numberLong": "" }'``. Otherwise they + will be encoded as an `int`. Defaults to ``False``. + - `datetime_representation`: The representation to use when encoding + instances of :class:`datetime.datetime`. Defaults to + :const:`~DatetimeRepresentation.LEGACY`. + - `strict_uuid`: If ``True``, :class:`uuid.UUID` object are encoded to + MongoDB Extended JSON's *Strict mode* type `Binary`. Otherwise it + will be encoded as ``'{"$uuid": "" }'``. Defaults to ``False``. + - `json_mode`: The :class:`JSONMode` to use when encoding BSON types to + Extended JSON. Defaults to :const:`~JSONMode.LEGACY`. + - `document_class`: BSON documents returned by :func:`loads` will be + decoded to an instance of this class. Must be a subclass of + :class:`collections.MutableMapping`. Defaults to :class:`dict`. + - `uuid_representation`: The :class:`~bson.binary.UuidRepresentation` + to use when encoding and decoding instances of :class:`uuid.UUID`. + Defaults to :const:`~bson.binary.UuidRepresentation.UNSPECIFIED`. + - `tz_aware`: If ``True``, MongoDB Extended JSON's *Strict mode* type + `Date` will be decoded to timezone aware instances of + :class:`datetime.datetime`. Otherwise they will be naive. Defaults + to ``False``. + - `tzinfo`: A :class:`datetime.tzinfo` subclass that specifies the + timezone from which :class:`~datetime.datetime` objects should be + decoded. Defaults to :const:`~bson.tz_util.utc`. + - `datetime_conversion`: Specifies how UTC datetimes should be decoded + within BSON. Valid options include 'datetime_ms' to return as a + DatetimeMS, 'datetime' to return as a datetime.datetime and + raising a ValueError for out-of-range values, 'datetime_auto' to + return DatetimeMS objects when the underlying datetime is + out-of-range and 'datetime_clamp' to clamp to the minimum and + maximum possible datetimes. Defaults to 'datetime'. See + :ref:`handling-out-of-range-datetimes` for details. + - `args`: arguments to :class:`~bson.codec_options.CodecOptions` + - `kwargs`: arguments to :class:`~bson.codec_options.CodecOptions` + + .. seealso:: The specification for Relaxed and Canonical `Extended JSON`_. + + .. versionchanged:: 4.0 + The default for `json_mode` was changed from :const:`JSONMode.LEGACY` + to :const:`JSONMode.RELAXED`. + The default for `uuid_representation` was changed from + :const:`~bson.binary.UuidRepresentation.PYTHON_LEGACY` to + :const:`~bson.binary.UuidRepresentation.UNSPECIFIED`. + + .. versionchanged:: 3.5 + Accepts the optional parameter `json_mode`. + + .. versionchanged:: 4.0 + Changed default value of `tz_aware` to False. + """ + super().__init__() + + def __new__( + cls: Type[JSONOptions], + strict_number_long: Optional[bool] = None, + datetime_representation: Optional[int] = None, + strict_uuid: Optional[bool] = None, + json_mode: int = JSONMode.RELAXED, + *args: Any, + **kwargs: Any, + ) -> JSONOptions: + kwargs["tz_aware"] = kwargs.get("tz_aware", False) if kwargs["tz_aware"]: kwargs["tzinfo"] = kwargs.get("tzinfo", utc) - if datetime_representation not in (DatetimeRepresentation.LEGACY, - DatetimeRepresentation.NUMBERLONG, - DatetimeRepresentation.ISO8601): - raise ConfigurationError( + if datetime_representation not in ( + DatetimeRepresentation.LEGACY, + DatetimeRepresentation.NUMBERLONG, + DatetimeRepresentation.ISO8601, + None, + ): + raise ValueError( "JSONOptions.datetime_representation must be one of LEGACY, " - "NUMBERLONG, or ISO8601 from DatetimeRepresentation.") - self = super(JSONOptions, cls).__new__(cls, *args, **kwargs) - if json_mode not in (JSONMode.LEGACY, - JSONMode.RELAXED, - JSONMode.CANONICAL): - raise ConfigurationError( + "NUMBERLONG, or ISO8601 from DatetimeRepresentation." + ) + self = cast(JSONOptions, super().__new__(cls, *args, **kwargs)) # type:ignore[arg-type] + if json_mode not in (JSONMode.LEGACY, JSONMode.RELAXED, JSONMode.CANONICAL): + raise ValueError( "JSONOptions.json_mode must be one of LEGACY, RELAXED, " - "or CANONICAL from JSONMode.") + "or CANONICAL from JSONMode." + ) self.json_mode = json_mode if self.json_mode == JSONMode.RELAXED: + if strict_number_long: + raise ValueError("Cannot specify strict_number_long=True with JSONMode.RELAXED") + if datetime_representation not in (None, DatetimeRepresentation.ISO8601): + raise ValueError( + "datetime_representation must be DatetimeRepresentation." + "ISO8601 or omitted with JSONMode.RELAXED" + ) + if strict_uuid not in (None, True): + raise ValueError("Cannot specify strict_uuid=False with JSONMode.RELAXED") self.strict_number_long = False self.datetime_representation = DatetimeRepresentation.ISO8601 self.strict_uuid = True elif self.json_mode == JSONMode.CANONICAL: + if strict_number_long not in (None, True): + raise ValueError("Cannot specify strict_number_long=False with JSONMode.RELAXED") + if datetime_representation not in (None, DatetimeRepresentation.NUMBERLONG): + raise ValueError( + "datetime_representation must be DatetimeRepresentation." + "NUMBERLONG or omitted with JSONMode.RELAXED" + ) + if strict_uuid not in (None, True): + raise ValueError("Cannot specify strict_uuid=False with JSONMode.RELAXED") self.strict_number_long = True self.datetime_representation = DatetimeRepresentation.NUMBERLONG self.strict_uuid = True - else: - self.strict_number_long = strict_number_long - self.datetime_representation = datetime_representation - self.strict_uuid = strict_uuid + else: # JSONMode.LEGACY + self.strict_number_long = False + self.datetime_representation = DatetimeRepresentation.LEGACY + self.strict_uuid = False + if strict_number_long is not None: + self.strict_number_long = strict_number_long + if datetime_representation is not None: + self.datetime_representation = datetime_representation + if strict_uuid is not None: + self.strict_uuid = strict_uuid return self - def _arguments_repr(self): - return ('strict_number_long=%r, ' - 'datetime_representation=%r, ' - 'strict_uuid=%r, json_mode=%r, %s' % ( - self.strict_number_long, - self.datetime_representation, - self.strict_uuid, - self.json_mode, - super(JSONOptions, self)._arguments_repr())) - - -LEGACY_JSON_OPTIONS = JSONOptions(json_mode=JSONMode.LEGACY) + def _arguments_repr(self) -> str: + return ( + "strict_number_long={!r}, " + "datetime_representation={!r}, " + "strict_uuid={!r}, json_mode={!r}, {}".format( + self.strict_number_long, + self.datetime_representation, + self.strict_uuid, + self.json_mode, + super()._arguments_repr(), + ) + ) + + def _options_dict(self) -> dict[Any, Any]: + # TODO: PYTHON-2442 use _asdict() instead + options_dict = super()._options_dict() + options_dict.update( + { + "strict_number_long": self.strict_number_long, + "datetime_representation": self.datetime_representation, + "strict_uuid": self.strict_uuid, + "json_mode": self.json_mode, + } + ) + return options_dict + + def with_options(self, **kwargs: Any) -> JSONOptions: + """ + Make a copy of this JSONOptions, overriding some options:: + + >>> from bson.json_util import CANONICAL_JSON_OPTIONS + >>> CANONICAL_JSON_OPTIONS.tz_aware + True + >>> json_options = CANONICAL_JSON_OPTIONS.with_options(tz_aware=False, tzinfo=None) + >>> json_options.tz_aware + False + + .. versionadded:: 3.12 + """ + opts = self._options_dict() + for opt in ("strict_number_long", "datetime_representation", "strict_uuid", "json_mode"): + opts[opt] = kwargs.get(opt, getattr(self, opt)) + opts.update(kwargs) + return JSONOptions(**opts) + + +LEGACY_JSON_OPTIONS: JSONOptions = JSONOptions(json_mode=JSONMode.LEGACY) """:class:`JSONOptions` for encoding to PyMongo's legacy JSON format. .. seealso:: The documentation for :const:`bson.json_util.JSONMode.LEGACY`. @@ -321,16 +421,7 @@ def _arguments_repr(self): .. versionadded:: 3.5 """ -DEFAULT_JSON_OPTIONS = LEGACY_JSON_OPTIONS -"""The default :class:`JSONOptions` for JSON encoding/decoding. - -The same as :const:`LEGACY_JSON_OPTIONS`. This will change to -:const:`RELAXED_JSON_OPTIONS` in a future release. - -.. versionadded:: 3.4 -""" - -CANONICAL_JSON_OPTIONS = JSONOptions(json_mode=JSONMode.CANONICAL) +CANONICAL_JSON_OPTIONS: JSONOptions = JSONOptions(json_mode=JSONMode.CANONICAL) """:class:`JSONOptions` for Canonical Extended JSON. .. seealso:: The documentation for :const:`bson.json_util.JSONMode.CANONICAL`. @@ -338,7 +429,7 @@ def _arguments_repr(self): .. versionadded:: 3.5 """ -RELAXED_JSON_OPTIONS = JSONOptions(json_mode=JSONMode.RELAXED) +RELAXED_JSON_OPTIONS: JSONOptions = JSONOptions(json_mode=JSONMode.RELAXED) """:class:`JSONOptions` for Relaxed Extended JSON. .. seealso:: The documentation for :const:`bson.json_util.JSONMode.RELAXED`. @@ -346,22 +437,20 @@ def _arguments_repr(self): .. versionadded:: 3.5 """ -STRICT_JSON_OPTIONS = JSONOptions( - strict_number_long=True, - datetime_representation=DatetimeRepresentation.ISO8601, - strict_uuid=True) -"""**DEPRECATED** - :class:`JSONOptions` for MongoDB Extended JSON's *Strict -mode* encoding. +DEFAULT_JSON_OPTIONS: JSONOptions = RELAXED_JSON_OPTIONS +"""The default :class:`JSONOptions` for JSON encoding/decoding. -.. versionadded:: 3.4 +The same as :const:`RELAXED_JSON_OPTIONS`. -.. versionchanged:: 3.5 - Deprecated. Use :const:`RELAXED_JSON_OPTIONS` or - :const:`CANONICAL_JSON_OPTIONS` instead. +.. versionchanged:: 4.0 + Changed from :const:`LEGACY_JSON_OPTIONS` to + :const:`RELAXED_JSON_OPTIONS`. + +.. versionadded:: 3.4 """ -def dumps(obj, *args, **kwargs): +def dumps(obj: Any, *args: Any, **kwargs: Any) -> str: """Helper function that wraps :func:`json.dumps`. Recursive function that handles all BSON types including @@ -372,18 +461,18 @@ def dumps(obj, *args, **kwargs): encoding of MongoDB Extended JSON types. Defaults to :const:`DEFAULT_JSON_OPTIONS`. + .. versionchanged:: 4.0 + Now outputs MongoDB Relaxed Extended JSON by default (using + :const:`DEFAULT_JSON_OPTIONS`). + .. versionchanged:: 3.4 Accepts optional parameter `json_options`. See :class:`JSONOptions`. - - .. versionchanged:: 2.7 - Preserves order when rendering SON, Timestamp, Code, Binary, and DBRef - instances. """ json_options = kwargs.pop("json_options", DEFAULT_JSON_OPTIONS) return json.dumps(_json_convert(obj, json_options), *args, **kwargs) -def loads(s, *args, **kwargs): +def loads(s: Union[str, bytes, bytearray], *args: Any, **kwargs: Any) -> Any: """Helper function that wraps :func:`json.loads`. Automatically passes the object_hook for BSON type conversion. @@ -396,6 +485,11 @@ def loads(s, *args, **kwargs): decoding of MongoDB Extended JSON types. Defaults to :const:`DEFAULT_JSON_OPTIONS`. + .. versionchanged:: 4.0 + Now loads :class:`datetime.datetime` instances as naive by default. To + load timezone aware instances utilize the `json_options` parameter. + See :ref:`tz_aware_default_change` for an example. + .. versionchanged:: 3.5 Parses Relaxed and Canonical Extended JSON as well as PyMongo's legacy format. Now raises ``TypeError`` or ``ValueError`` when parsing JSON @@ -405,34 +499,38 @@ def loads(s, *args, **kwargs): Accepts optional parameter `json_options`. See :class:`JSONOptions`. """ json_options = kwargs.pop("json_options", DEFAULT_JSON_OPTIONS) - kwargs["object_pairs_hook"] = lambda pairs: object_pairs_hook( - pairs, json_options) + kwargs["object_pairs_hook"] = lambda pairs: object_pairs_hook(pairs, json_options) return json.loads(s, *args, **kwargs) -def _json_convert(obj, json_options=DEFAULT_JSON_OPTIONS): +def _json_convert(obj: Any, json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: """Recursive helper method that converts BSON types so they can be converted into json. """ - if hasattr(obj, 'iteritems') or hasattr(obj, 'items'): # PY3 support - return SON(((k, _json_convert(v, json_options)) - for k, v in iteritems(obj))) - elif hasattr(obj, '__iter__') and not isinstance(obj, (text_type, bytes)): - return list((_json_convert(v, json_options) for v in obj)) + if hasattr(obj, "items"): + return SON(((k, _json_convert(v, json_options)) for k, v in obj.items())) + elif hasattr(obj, "__iter__") and not isinstance(obj, (str, bytes)): + return [_json_convert(v, json_options) for v in obj] try: return default(obj, json_options) except TypeError: return obj -def object_pairs_hook(pairs, json_options=DEFAULT_JSON_OPTIONS): - return object_hook(json_options.document_class(pairs), json_options) +def object_pairs_hook( + pairs: Sequence[Tuple[str, Any]], json_options: JSONOptions = DEFAULT_JSON_OPTIONS +) -> Any: + return object_hook(json_options.document_class(pairs), json_options) # type:ignore[call-arg] -def object_hook(dct, json_options=DEFAULT_JSON_OPTIONS): +def object_hook(dct: Mapping[str, Any], json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: if "$oid" in dct: return _parse_canonical_oid(dct) - if "$ref" in dct: + if ( + isinstance(dct.get("$ref"), str) + and "$id" in dct + and isinstance(dct.get("$db"), (str, type(None))) + ): return _parse_canonical_dbref(dct) if "$date" in dct: return _parse_canonical_datetime(dct, json_options) @@ -450,7 +548,7 @@ def object_hook(dct, json_options=DEFAULT_JSON_OPTIONS): if "$code" in dct: return _parse_canonical_code(dct) if "$uuid" in dct: - return _parse_legacy_uuid(dct) + return _parse_legacy_uuid(dct, json_options) if "$undefined" in dct: return None if "$numberLong" in dct: @@ -473,10 +571,10 @@ def object_hook(dct, json_options=DEFAULT_JSON_OPTIONS): return dct -def _parse_legacy_regex(doc): +def _parse_legacy_regex(doc: Any) -> Any: pattern = doc["$regex"] # Check if this is the $regex query operator. - if isinstance(pattern, Regex): + if not isinstance(pattern, (str, bytes)): return doc flags = 0 # PyMongo always adds $options but some other tools may not. @@ -485,99 +583,110 @@ def _parse_legacy_regex(doc): return Regex(pattern, flags) -def _parse_legacy_uuid(doc): +def _parse_legacy_uuid(doc: Any, json_options: JSONOptions) -> Union[Binary, uuid.UUID]: """Decode a JSON legacy $uuid to Python UUID.""" if len(doc) != 1: - raise TypeError('Bad $uuid, extra field(s): %s' % (doc,)) - return uuid.UUID(doc["$uuid"]) + raise TypeError(f"Bad $uuid, extra field(s): {doc}") + if not isinstance(doc["$uuid"], str): + raise TypeError(f"$uuid must be a string: {doc}") + if json_options.uuid_representation == UuidRepresentation.UNSPECIFIED: + return Binary.from_uuid(uuid.UUID(doc["$uuid"])) + else: + return uuid.UUID(doc["$uuid"]) -def _binary_or_uuid(data, subtype, json_options): +def _binary_or_uuid(data: Any, subtype: int, json_options: JSONOptions) -> Union[Binary, uuid.UUID]: # special handling for UUID - if subtype == OLD_UUID_SUBTYPE: - if json_options.uuid_representation == CSHARP_LEGACY: - return uuid.UUID(bytes_le=data) - if json_options.uuid_representation == JAVA_LEGACY: - data = data[7::-1] + data[:7:-1] - return uuid.UUID(bytes=data) - if subtype == UUID_SUBTYPE: - return uuid.UUID(bytes=data) - if PY3 and subtype == 0: - return data + if subtype in ALL_UUID_SUBTYPES: + uuid_representation = json_options.uuid_representation + binary_value = Binary(data, subtype) + if uuid_representation == UuidRepresentation.UNSPECIFIED: + return binary_value + if subtype == UUID_SUBTYPE: + # Legacy behavior: use STANDARD with binary subtype 4. + uuid_representation = UuidRepresentation.STANDARD + elif uuid_representation == UuidRepresentation.STANDARD: + # subtype == OLD_UUID_SUBTYPE + # Legacy behavior: STANDARD is the same as PYTHON_LEGACY. + uuid_representation = UuidRepresentation.PYTHON_LEGACY + return binary_value.as_uuid(uuid_representation) + + if subtype == 0: + return cast(uuid.UUID, data) return Binary(data, subtype) -def _parse_legacy_binary(doc, json_options): +def _parse_legacy_binary(doc: Any, json_options: JSONOptions) -> Union[Binary, uuid.UUID]: if isinstance(doc["$type"], int): doc["$type"] = "%02x" % doc["$type"] subtype = int(doc["$type"], 16) - if subtype >= 0xffffff80: # Handle mongoexport values + if subtype >= 0xFFFFFF80: # Handle mongoexport values subtype = int(doc["$type"][6:], 16) data = base64.b64decode(doc["$binary"].encode()) return _binary_or_uuid(data, subtype, json_options) -def _parse_canonical_binary(doc, json_options): +def _parse_canonical_binary(doc: Any, json_options: JSONOptions) -> Union[Binary, uuid.UUID]: binary = doc["$binary"] b64 = binary["base64"] subtype = binary["subType"] - if not isinstance(b64, string_type): - raise TypeError('$binary base64 must be a string: %s' % (doc,)) - if not isinstance(subtype, string_type) or len(subtype) > 2: - raise TypeError('$binary subType must be a string at most 2 ' - 'characters: %s' % (doc,)) + if not isinstance(b64, str): + raise TypeError(f"$binary base64 must be a string: {doc}") + if not isinstance(subtype, str) or len(subtype) > 2: + raise TypeError(f"$binary subType must be a string at most 2 characters: {doc}") if len(binary) != 2: - raise TypeError('$binary must include only "base64" and "subType" ' - 'components: %s' % (doc,)) + raise TypeError(f'$binary must include only "base64" and "subType" components: {doc}') data = base64.b64decode(b64.encode()) return _binary_or_uuid(data, int(subtype, 16), json_options) -def _parse_canonical_datetime(doc, json_options): +def _parse_canonical_datetime( + doc: Any, json_options: JSONOptions +) -> Union[datetime.datetime, DatetimeMS]: """Decode a JSON datetime to python datetime.datetime.""" dtm = doc["$date"] if len(doc) != 1: - raise TypeError('Bad $date, extra field(s): %s' % (doc,)) + raise TypeError(f"Bad $date, extra field(s): {doc}") # mongoexport 2.6 and newer - if isinstance(dtm, string_type): + if isinstance(dtm, str): # Parse offset - if dtm[-1] == 'Z': + if dtm[-1] == "Z": dt = dtm[:-1] - offset = 'Z' - elif dtm[-6] in ('+', '-') and dtm[-3] == ':': + offset = "Z" + elif dtm[-6] in ("+", "-") and dtm[-3] == ":": # (+|-)HH:MM dt = dtm[:-6] offset = dtm[-6:] - elif dtm[-5] in ('+', '-'): + elif dtm[-5] in ("+", "-"): # (+|-)HHMM dt = dtm[:-5] offset = dtm[-5:] - elif dtm[-3] in ('+', '-'): + elif dtm[-3] in ("+", "-"): # (+|-)HH dt = dtm[:-3] offset = dtm[-3:] else: dt = dtm - offset = '' + offset = "" # Parse the optional factional seconds portion. - dot_index = dt.rfind('.') + dot_index = dt.rfind(".") microsecond = 0 if dot_index != -1: microsecond = int(float(dt[dot_index:]) * 1000000) dt = dt[:dot_index] - aware = datetime.datetime.strptime( - dt, "%Y-%m-%dT%H:%M:%S").replace(microsecond=microsecond, - tzinfo=utc) + aware = datetime.datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S").replace( + microsecond=microsecond, tzinfo=utc + ) - if offset and offset != 'Z': + if offset and offset != "Z": if len(offset) == 6: - hours, minutes = offset[1:].split(':') - secs = (int(hours) * 3600 + int(minutes) * 60) + hours, minutes = offset[1:].split(":") + secs = int(hours) * 3600 + int(minutes) * 60 elif len(offset) == 5: - secs = (int(offset[1:3]) * 3600 + int(offset[3:]) * 60) + secs = int(offset[1:3]) * 3600 + int(offset[3:]) * 60 elif len(offset) == 3: secs = int(offset[1:3]) * 3600 if offset[0] == "-": @@ -587,145 +696,146 @@ def _parse_canonical_datetime(doc, json_options): if json_options.tz_aware: if json_options.tzinfo: aware = aware.astimezone(json_options.tzinfo) + if json_options.datetime_conversion == DatetimeConversion.DATETIME_MS: + return DatetimeMS(aware) return aware else: - return aware.replace(tzinfo=None) - return bson._millis_to_datetime(int(dtm), json_options) + aware_tzinfo_none = aware.replace(tzinfo=None) + if json_options.datetime_conversion == DatetimeConversion.DATETIME_MS: + return DatetimeMS(aware_tzinfo_none) + return aware_tzinfo_none + return _millis_to_datetime(int(dtm), cast("CodecOptions[Any]", json_options)) -def _parse_canonical_oid(doc): +def _parse_canonical_oid(doc: Any) -> ObjectId: """Decode a JSON ObjectId to bson.objectid.ObjectId.""" if len(doc) != 1: - raise TypeError('Bad $oid, extra field(s): %s' % (doc,)) - return ObjectId(doc['$oid']) + raise TypeError(f"Bad $oid, extra field(s): {doc}") + return ObjectId(doc["$oid"]) -def _parse_canonical_symbol(doc): +def _parse_canonical_symbol(doc: Any) -> str: """Decode a JSON symbol to Python string.""" - symbol = doc['$symbol'] + symbol = doc["$symbol"] if len(doc) != 1: - raise TypeError('Bad $symbol, extra field(s): %s' % (doc,)) - return text_type(symbol) + raise TypeError(f"Bad $symbol, extra field(s): {doc}") + return str(symbol) -def _parse_canonical_code(doc): +def _parse_canonical_code(doc: Any) -> Code: """Decode a JSON code to bson.code.Code.""" for key in doc: - if key not in ('$code', '$scope'): - raise TypeError('Bad $code, extra field(s): %s' % (doc,)) - return Code(doc['$code'], scope=doc.get('$scope')) + if key not in ("$code", "$scope"): + raise TypeError(f"Bad $code, extra field(s): {doc}") + return Code(doc["$code"], scope=doc.get("$scope")) -def _parse_canonical_regex(doc): +def _parse_canonical_regex(doc: Any) -> Regex[str]: """Decode a JSON regex to bson.regex.Regex.""" - regex = doc['$regularExpression'] + regex = doc["$regularExpression"] if len(doc) != 1: - raise TypeError('Bad $regularExpression, extra field(s): %s' % (doc,)) + raise TypeError(f"Bad $regularExpression, extra field(s): {doc}") if len(regex) != 2: - raise TypeError('Bad $regularExpression must include only "pattern"' - 'and "options" components: %s' % (doc,)) - return Regex(regex['pattern'], regex['options']) - - -def _parse_canonical_dbref(doc): + raise TypeError( + f'Bad $regularExpression must include only "pattern and "options" components: {doc}' + ) + opts = regex["options"] + if not isinstance(opts, str): + raise TypeError( + "Bad $regularExpression options, options must be string, was type %s" % (type(opts)) + ) + return Regex(regex["pattern"], opts) + + +def _parse_canonical_dbref(doc: Any) -> DBRef: """Decode a JSON DBRef to bson.dbref.DBRef.""" - for key in doc: - if key.startswith('$') and key not in _DBREF_KEYS: - # Other keys start with $, so dct cannot be parsed as a DBRef. - return doc - return DBRef(doc.pop('$ref'), doc.pop('$id'), - database=doc.pop('$db', None), **doc) + return DBRef(doc.pop("$ref"), doc.pop("$id"), database=doc.pop("$db", None), **doc) -def _parse_canonical_dbpointer(doc): +def _parse_canonical_dbpointer(doc: Any) -> Any: """Decode a JSON (deprecated) DBPointer to bson.dbref.DBRef.""" - dbref = doc['$dbPointer'] + dbref = doc["$dbPointer"] if len(doc) != 1: - raise TypeError('Bad $dbPointer, extra field(s): %s' % (doc,)) + raise TypeError(f"Bad $dbPointer, extra field(s): {doc}") if isinstance(dbref, DBRef): dbref_doc = dbref.as_doc() # DBPointer must not contain $db in its value. if dbref.database is not None: - raise TypeError( - 'Bad $dbPointer, extra field $db: %s' % (dbref_doc,)) + raise TypeError(f"Bad $dbPointer, extra field $db: {dbref_doc}") if not isinstance(dbref.id, ObjectId): - raise TypeError( - 'Bad $dbPointer, $id must be an ObjectId: %s' % (dbref_doc,)) + raise TypeError(f"Bad $dbPointer, $id must be an ObjectId: {dbref_doc}") if len(dbref_doc) != 2: - raise TypeError( - 'Bad $dbPointer, extra field(s) in DBRef: %s' % (dbref_doc,)) + raise TypeError(f"Bad $dbPointer, extra field(s) in DBRef: {dbref_doc}") return dbref else: - raise TypeError('Bad $dbPointer, expected a DBRef: %s' % (doc,)) + raise TypeError(f"Bad $dbPointer, expected a DBRef: {doc}") -def _parse_canonical_int32(doc): +def _parse_canonical_int32(doc: Any) -> int: """Decode a JSON int32 to python int.""" - i_str = doc['$numberInt'] + i_str = doc["$numberInt"] if len(doc) != 1: - raise TypeError('Bad $numberInt, extra field(s): %s' % (doc,)) - if not isinstance(i_str, string_type): - raise TypeError('$numberInt must be string: %s' % (doc,)) + raise TypeError(f"Bad $numberInt, extra field(s): {doc}") + if not isinstance(i_str, str): + raise TypeError(f"$numberInt must be string: {doc}") return int(i_str) -def _parse_canonical_int64(doc): +def _parse_canonical_int64(doc: Any) -> Int64: """Decode a JSON int64 to bson.int64.Int64.""" - l_str = doc['$numberLong'] + l_str = doc["$numberLong"] if len(doc) != 1: - raise TypeError('Bad $numberLong, extra field(s): %s' % (doc,)) + raise TypeError(f"Bad $numberLong, extra field(s): {doc}") return Int64(l_str) -def _parse_canonical_double(doc): +def _parse_canonical_double(doc: Any) -> float: """Decode a JSON double to python float.""" - d_str = doc['$numberDouble'] + d_str = doc["$numberDouble"] if len(doc) != 1: - raise TypeError('Bad $numberDouble, extra field(s): %s' % (doc,)) - if not isinstance(d_str, string_type): - raise TypeError('$numberDouble must be string: %s' % (doc,)) + raise TypeError(f"Bad $numberDouble, extra field(s): {doc}") + if not isinstance(d_str, str): + raise TypeError(f"$numberDouble must be string: {doc}") return float(d_str) -def _parse_canonical_decimal128(doc): +def _parse_canonical_decimal128(doc: Any) -> Decimal128: """Decode a JSON decimal128 to bson.decimal128.Decimal128.""" - d_str = doc['$numberDecimal'] + d_str = doc["$numberDecimal"] if len(doc) != 1: - raise TypeError('Bad $numberDecimal, extra field(s): %s' % (doc,)) - if not isinstance(d_str, string_type): - raise TypeError('$numberDecimal must be string: %s' % (doc,)) + raise TypeError(f"Bad $numberDecimal, extra field(s): {doc}") + if not isinstance(d_str, str): + raise TypeError(f"$numberDecimal must be string: {doc}") return Decimal128(d_str) -def _parse_canonical_minkey(doc): +def _parse_canonical_minkey(doc: Any) -> MinKey: """Decode a JSON MinKey to bson.min_key.MinKey.""" - if type(doc['$minKey']) is not int or doc['$minKey'] != 1: - raise TypeError('$minKey value must be 1: %s' % (doc,)) + if type(doc["$minKey"]) is not int or doc["$minKey"] != 1: # noqa: E721 + raise TypeError(f"$minKey value must be 1: {doc}") if len(doc) != 1: - raise TypeError('Bad $minKey, extra field(s): %s' % (doc,)) + raise TypeError(f"Bad $minKey, extra field(s): {doc}") return MinKey() -def _parse_canonical_maxkey(doc): +def _parse_canonical_maxkey(doc: Any) -> MaxKey: """Decode a JSON MaxKey to bson.max_key.MaxKey.""" - if type(doc['$maxKey']) is not int or doc['$maxKey'] != 1: - raise TypeError('$maxKey value must be 1: %s', (doc,)) + if type(doc["$maxKey"]) is not int or doc["$maxKey"] != 1: # noqa: E721 + raise TypeError("$maxKey value must be 1: %s", (doc,)) if len(doc) != 1: - raise TypeError('Bad $minKey, extra field(s): %s' % (doc,)) + raise TypeError(f"Bad $minKey, extra field(s): {doc}") return MaxKey() -def _encode_binary(data, subtype, json_options): +def _encode_binary(data: bytes, subtype: int, json_options: JSONOptions) -> Any: if json_options.json_mode == JSONMode.LEGACY: - return SON([ - ('$binary', base64.b64encode(data).decode()), - ('$type', "%02x" % subtype)]) - return {'$binary': SON([ - ('base64', base64.b64encode(data).decode()), - ('subType', "%02x" % subtype)])} + return SON([("$binary", base64.b64encode(data).decode()), ("$type", "%02x" % subtype)]) + return { + "$binary": SON([("base64", base64.b64encode(data).decode()), ("subType", "%02x" % subtype)]) + } -def default(obj, json_options=DEFAULT_JSON_OPTIONS): +def default(obj: Any, json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: # We preserve key order when rendering SON, DBRef, etc. as JSON by # returning a SON for those types instead of a dict. if isinstance(obj, ObjectId): @@ -733,26 +843,35 @@ def default(obj, json_options=DEFAULT_JSON_OPTIONS): if isinstance(obj, DBRef): return _json_convert(obj.as_doc(), json_options=json_options) if isinstance(obj, datetime.datetime): - if (json_options.datetime_representation == - DatetimeRepresentation.ISO8601): + if json_options.datetime_representation == DatetimeRepresentation.ISO8601: if not obj.tzinfo: obj = obj.replace(tzinfo=utc) + assert obj.tzinfo is not None if obj >= EPOCH_AWARE: off = obj.tzinfo.utcoffset(obj) - if (off.days, off.seconds, off.microseconds) == (0, 0, 0): - tz_string = 'Z' + if (off.days, off.seconds, off.microseconds) == (0, 0, 0): # type: ignore + tz_string = "Z" else: - tz_string = obj.strftime('%z') + tz_string = obj.strftime("%z") millis = int(obj.microsecond / 1000) fracsecs = ".%03d" % (millis,) if millis else "" - return {"$date": "%s%s%s" % ( - obj.strftime("%Y-%m-%dT%H:%M:%S"), fracsecs, tz_string)} + return { + "$date": "{}{}{}".format(obj.strftime("%Y-%m-%dT%H:%M:%S"), fracsecs, tz_string) + } - millis = bson._datetime_to_millis(obj) - if (json_options.datetime_representation == - DatetimeRepresentation.LEGACY): + millis = _datetime_to_millis(obj) + if json_options.datetime_representation == DatetimeRepresentation.LEGACY: return {"$date": millis} return {"$date": {"$numberLong": str(millis)}} + if isinstance(obj, DatetimeMS): + if ( + json_options.datetime_representation == DatetimeRepresentation.ISO8601 + and 0 <= int(obj) <= _max_datetime_ms() + ): + return default(obj.as_datetime(), json_options) + elif json_options.datetime_representation == DatetimeRepresentation.LEGACY: + return {"$date": str(int(obj))} + return {"$date": {"$numberLong": str(int(obj))}} if json_options.strict_number_long and isinstance(obj, Int64): return {"$numberLong": str(obj)} if isinstance(obj, (RE_TYPE, Regex)): @@ -769,14 +888,13 @@ def default(obj, json_options=DEFAULT_JSON_OPTIONS): flags += "u" if obj.flags & re.VERBOSE: flags += "x" - if isinstance(obj.pattern, text_type): + if isinstance(obj.pattern, str): pattern = obj.pattern else: - pattern = obj.pattern.decode('utf-8') + pattern = obj.pattern.decode("utf-8") if json_options.json_mode == JSONMode.LEGACY: return SON([("$regex", pattern), ("$options", flags)]) - return {'$regularExpression': SON([("pattern", pattern), - ("options", flags)])} + return {"$regularExpression": SON([("pattern", pattern), ("options", flags)])} if isinstance(obj, MinKey): return {"$minKey": 1} if isinstance(obj, MaxKey): @@ -785,45 +903,34 @@ def default(obj, json_options=DEFAULT_JSON_OPTIONS): return {"$timestamp": SON([("t", obj.time), ("i", obj.inc)])} if isinstance(obj, Code): if obj.scope is None: - return {'$code': str(obj)} - return SON([ - ('$code', str(obj)), - ('$scope', _json_convert(obj.scope, json_options))]) + return {"$code": str(obj)} + return SON([("$code", str(obj)), ("$scope", _json_convert(obj.scope, json_options))]) if isinstance(obj, Binary): return _encode_binary(obj, obj.subtype, json_options) - if PY3 and isinstance(obj, bytes): + if isinstance(obj, bytes): return _encode_binary(obj, 0, json_options) if isinstance(obj, uuid.UUID): if json_options.strict_uuid: - data = obj.bytes - subtype = OLD_UUID_SUBTYPE - if json_options.uuid_representation == CSHARP_LEGACY: - data = obj.bytes_le - elif json_options.uuid_representation == JAVA_LEGACY: - data = data[7::-1] + data[:7:-1] - elif json_options.uuid_representation == UUID_SUBTYPE: - subtype = UUID_SUBTYPE - return _encode_binary(data, subtype, json_options) + binval = Binary.from_uuid(obj, uuid_representation=json_options.uuid_representation) + return _encode_binary(binval, binval.subtype, json_options) else: return {"$uuid": obj.hex} if isinstance(obj, Decimal128): return {"$numberDecimal": str(obj)} if isinstance(obj, bool): return obj - if (json_options.json_mode == JSONMode.CANONICAL and - isinstance(obj, integer_types)): - if -2 ** 31 <= obj < 2 ** 31: - return {'$numberInt': text_type(obj)} - return {'$numberLong': text_type(obj)} + if json_options.json_mode == JSONMode.CANONICAL and isinstance(obj, int): + if -(2**31) <= obj < 2**31: + return {"$numberInt": str(obj)} + return {"$numberLong": str(obj)} if json_options.json_mode != JSONMode.LEGACY and isinstance(obj, float): if math.isnan(obj): - return {'$numberDouble': 'NaN'} + return {"$numberDouble": "NaN"} elif math.isinf(obj): - representation = 'Infinity' if obj > 0 else '-Infinity' - return {'$numberDouble': representation} + representation = "Infinity" if obj > 0 else "-Infinity" + return {"$numberDouble": representation} elif json_options.json_mode == JSONMode.CANONICAL: # repr() will return the shortest string guaranteed to produce the - # original value, when float() is called on it. str produces a - # shorter string in Python 2. - return {'$numberDouble': text_type(repr(obj))} + # original value, when float() is called on it. + return {"$numberDouble": str(repr(obj))} raise TypeError("%r is not JSON serializable" % obj) diff --git a/bson/max_key.py b/bson/max_key.py index 7e89dd70de..445e12f519 100644 --- a/bson/max_key.py +++ b/bson/max_key.py @@ -12,39 +12,45 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Representation for the MongoDB internal MaxKey type. -""" +"""Representation for the MongoDB internal MaxKey type.""" +from __future__ import annotations +from typing import Any -class MaxKey(object): - """MongoDB internal MaxKey type. - .. versionchanged:: 2.7 - ``MaxKey`` now implements comparison operators. - """ +class MaxKey: + """MongoDB internal MaxKey type.""" + + __slots__ = () _type_marker = 127 - def __eq__(self, other): + def __getstate__(self) -> Any: + return {} + + def __setstate__(self, state: Any) -> None: + pass + + def __eq__(self, other: Any) -> bool: return isinstance(other, MaxKey) - def __hash__(self): + def __hash__(self) -> int: return hash(self._type_marker) - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other - def __le__(self, other): + def __le__(self, other: Any) -> bool: return isinstance(other, MaxKey) - def __lt__(self, dummy): + def __lt__(self, dummy: Any) -> bool: return False - def __ge__(self, dummy): + def __ge__(self, dummy: Any) -> bool: return True - def __gt__(self, other): + def __gt__(self, other: Any) -> bool: return not isinstance(other, MaxKey) - def __repr__(self): + def __repr__(self) -> str: return "MaxKey()" diff --git a/bson/min_key.py b/bson/min_key.py index b03520e9c2..37828dcf74 100644 --- a/bson/min_key.py +++ b/bson/min_key.py @@ -12,39 +12,45 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Representation for the MongoDB internal MinKey type. -""" +"""Representation for the MongoDB internal MinKey type.""" +from __future__ import annotations +from typing import Any -class MinKey(object): - """MongoDB internal MinKey type. - .. versionchanged:: 2.7 - ``MinKey`` now implements comparison operators. - """ +class MinKey: + """MongoDB internal MinKey type.""" + + __slots__ = () _type_marker = 255 - def __eq__(self, other): + def __getstate__(self) -> Any: + return {} + + def __setstate__(self, state: Any) -> None: + pass + + def __eq__(self, other: Any) -> bool: return isinstance(other, MinKey) - def __hash__(self): + def __hash__(self) -> int: return hash(self._type_marker) - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other - def __le__(self, dummy): + def __le__(self, dummy: Any) -> bool: return True - def __lt__(self, other): + def __lt__(self, other: Any) -> bool: return not isinstance(other, MinKey) - def __ge__(self, other): + def __ge__(self, other: Any) -> bool: return isinstance(other, MinKey) - def __gt__(self, dummy): + def __gt__(self, dummy: Any) -> bool: return False - def __repr__(self): + def __repr__(self) -> str: return "MinKey()" diff --git a/bson/objectid.py b/bson/objectid.py index c6fa652f3b..2a3d9ebf5b 100644 --- a/bson/objectid.py +++ b/bson/objectid.py @@ -12,9 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tools for working with MongoDB `ObjectIds -`_. -""" +"""Tools for working with MongoDB ObjectIds.""" +from __future__ import annotations import binascii import calendar @@ -23,31 +22,29 @@ import struct import threading import time - from random import SystemRandom +from typing import Any, NoReturn, Optional, Type, Union from bson.errors import InvalidId -from bson.py3compat import PY3, bytes_from_hex, string_type, text_type from bson.tz_util import utc - _MAX_COUNTER_VALUE = 0xFFFFFF -def _raise_invalid_id(oid): +def _raise_invalid_id(oid: str) -> NoReturn: raise InvalidId( "%r is not a valid ObjectId, it must be a 12-byte input" - " or a 24-character hex string" % oid) + " or a 24-character hex string" % oid + ) -def _random_bytes(): +def _random_bytes() -> bytes: """Get the 5-byte random field of an ObjectId.""" return os.urandom(5) -class ObjectId(object): - """A MongoDB ObjectId. - """ +class ObjectId: + """A MongoDB ObjectId.""" _pid = os.getpid() @@ -56,11 +53,11 @@ class ObjectId(object): __random = _random_bytes() - __slots__ = ('__id',) + __slots__ = ("__id",) _type_marker = 7 - def __init__(self, oid=None): + def __init__(self, oid: Optional[Union[str, ObjectId, bytes]] = None) -> None: """Initialize a new ObjectId. An ObjectId is a 12-byte unique identifier consisting of: @@ -71,7 +68,7 @@ def __init__(self, oid=None): By default, ``ObjectId()`` creates a new unique identifier. The optional parameter `oid` can be an :class:`ObjectId`, or any 12 - :class:`bytes` or, in Python 2, any 12-character :class:`str`. + :class:`bytes`. For example, the 12 bytes b'foo-bar-quux' do not follow the ObjectId specification but they are acceptable input:: @@ -79,14 +76,10 @@ def __init__(self, oid=None): >>> ObjectId(b'foo-bar-quux') ObjectId('666f6f2d6261722d71757578') - `oid` can also be a :class:`unicode` or :class:`str` of 24 hex digits:: + `oid` can also be a :class:`str` of 24 hex digits:: >>> ObjectId('0123456789ab0123456789ab') ObjectId('0123456789ab0123456789ab') - >>> - >>> # A u-prefixed unicode literal: - >>> ObjectId(u'0123456789ab0123456789ab') - ObjectId('0123456789ab0123456789ab') Raises :class:`~bson.errors.InvalidId` if `oid` is not 12 bytes nor 24 hex digits, or :class:`TypeError` if `oid` is not an accepted type. @@ -94,7 +87,7 @@ def __init__(self, oid=None): :Parameters: - `oid` (optional): a valid ObjectId. - .. mongodoc:: objectids + .. seealso:: The MongoDB documentation on `ObjectIds `_. .. versionchanged:: 3.8 :class:`~bson.objectid.ObjectId` now implements the `ObjectID @@ -110,7 +103,7 @@ def __init__(self, oid=None): self.__validate(oid) @classmethod - def from_datetime(cls, generation_time): + def from_datetime(cls: Type[ObjectId], generation_time: datetime.datetime) -> ObjectId: """Create a dummy ObjectId instance with a specific generation time. This method is useful for doing range queries on a field @@ -137,15 +130,15 @@ def from_datetime(cls, generation_time): - `generation_time`: :class:`~datetime.datetime` to be used as the generation time for the resulting ObjectId. """ - if generation_time.utcoffset() is not None: - generation_time = generation_time - generation_time.utcoffset() + offset = generation_time.utcoffset() + if offset is not None: + generation_time = generation_time - offset timestamp = calendar.timegm(generation_time.timetuple()) - oid = struct.pack( - ">I", int(timestamp)) + b"\x00\x00\x00\x00\x00\x00\x00\x00" + oid = struct.pack(">I", int(timestamp)) + b"\x00\x00\x00\x00\x00\x00\x00\x00" return cls(oid) @classmethod - def is_valid(cls, oid): + def is_valid(cls: Type[ObjectId], oid: Any) -> bool: """Checks if a `oid` string is valid or not. :Parameters: @@ -163,19 +156,16 @@ def is_valid(cls, oid): return False @classmethod - def _random(cls): - """Generate a 5-byte random number once per process. - """ + def _random(cls) -> bytes: + """Generate a 5-byte random number once per process.""" pid = os.getpid() if pid != cls._pid: cls._pid = pid cls.__random = _random_bytes() return cls.__random - def __generate(self): - """Generate a new value for this ObjectId. - """ - + def __generate(self) -> None: + """Generate a new value for this ObjectId.""" # 4 bytes current time oid = struct.pack(">I", int(time.time())) @@ -189,12 +179,11 @@ def __generate(self): self.__id = oid - def __validate(self, oid): + def __validate(self, oid: Any) -> None: """Validate and use the given id for this ObjectId. - Raises TypeError if id is not an instance of - (:class:`basestring` (:class:`str` or :class:`bytes` - in python 3), ObjectId) and InvalidId if it is not a + Raises TypeError if id is not an instance of :class:`str`, + :class:`bytes`, or ObjectId. Raises InvalidId if it is not a valid ObjectId. :Parameters: @@ -202,27 +191,24 @@ def __validate(self, oid): """ if isinstance(oid, ObjectId): self.__id = oid.binary - # bytes or unicode in python 2, str in python 3 - elif isinstance(oid, string_type): + elif isinstance(oid, str): if len(oid) == 24: try: - self.__id = bytes_from_hex(oid) + self.__id = bytes.fromhex(oid) except (TypeError, ValueError): _raise_invalid_id(oid) else: _raise_invalid_id(oid) else: - raise TypeError("id must be an instance of (bytes, %s, ObjectId), " - "not %s" % (text_type.__name__, type(oid))) + raise TypeError(f"id must be an instance of (bytes, str, ObjectId), not {type(oid)}") @property - def binary(self): - """12-byte binary representation of this ObjectId. - """ + def binary(self) -> bytes: + """12-byte binary representation of this ObjectId.""" return self.__id @property - def generation_time(self): + def generation_time(self) -> datetime.datetime: """A :class:`datetime.datetime` instance representing the time of generation for this :class:`ObjectId`. @@ -233,16 +219,15 @@ def generation_time(self): timestamp = struct.unpack(">I", self.__id[0:4])[0] return datetime.datetime.fromtimestamp(timestamp, utc) - def __getstate__(self): - """return value of object for pickling. + def __getstate__(self) -> bytes: + """Return value of object for pickling. needed explicitly because __slots__() defined. """ return self.__id - def __setstate__(self, value): - """explicit state set from pickling - """ - # Provide backwards compatability with OIDs + def __setstate__(self, value: Any) -> None: + """Explicit state set from pickling""" + # Provide backwards compatibility with OIDs # pickled with pymongo-1.9 or older. if isinstance(value, dict): oid = value["_ObjectId__id"] @@ -251,49 +236,47 @@ def __setstate__(self, value): # ObjectIds pickled in python 2.x used `str` for __id. # In python 3.x this has to be converted to `bytes` # by encoding latin-1. - if PY3 and isinstance(oid, text_type): - self.__id = oid.encode('latin-1') + if isinstance(oid, str): + self.__id = oid.encode("latin-1") else: self.__id = oid - def __str__(self): - if PY3: - return binascii.hexlify(self.__id).decode() - return binascii.hexlify(self.__id) + def __str__(self) -> str: + return binascii.hexlify(self.__id).decode() - def __repr__(self): - return "ObjectId('%s')" % (str(self),) + def __repr__(self) -> str: + return f"ObjectId('{self!s}')" - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, ObjectId): return self.__id == other.binary return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: if isinstance(other, ObjectId): return self.__id != other.binary return NotImplemented - def __lt__(self, other): + def __lt__(self, other: Any) -> bool: if isinstance(other, ObjectId): return self.__id < other.binary return NotImplemented - def __le__(self, other): + def __le__(self, other: Any) -> bool: if isinstance(other, ObjectId): return self.__id <= other.binary return NotImplemented - def __gt__(self, other): + def __gt__(self, other: Any) -> bool: if isinstance(other, ObjectId): return self.__id > other.binary return NotImplemented - def __ge__(self, other): + def __ge__(self, other: Any) -> bool: if isinstance(other, ObjectId): return self.__id >= other.binary return NotImplemented - def __hash__(self): + def __hash__(self) -> int: """Get a hash value for this :class:`ObjectId`.""" return hash(self.__id) diff --git a/bson/py.typed b/bson/py.typed new file mode 100644 index 0000000000..0f4057061a --- /dev/null +++ b/bson/py.typed @@ -0,0 +1,2 @@ +# PEP-561 Support File. +# "Package maintainers who wish to support type checking of their code MUST add a marker file named py.typed to their package supporting typing". diff --git a/bson/py3compat.py b/bson/py3compat.py deleted file mode 100644 index 84d1ea00fd..0000000000 --- a/bson/py3compat.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright 2009-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you -# may not use this file except in compliance with the License. You -# may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. See the License for the specific language governing -# permissions and limitations under the License. - -"""Utility functions and definitions for python3 compatibility.""" - -import sys - -PY3 = sys.version_info[0] == 3 - -if PY3: - import codecs - import collections.abc as abc - import _thread as thread - from abc import ABC, abstractmethod - from io import BytesIO as StringIO - - def abstractproperty(func): - return property(abstractmethod(func)) - - MAXSIZE = sys.maxsize - - imap = map - - def b(s): - # BSON and socket operations deal in binary data. In - # python 3 that means instances of `bytes`. In python - # 2.7 you can create an alias for `bytes` using - # the b prefix (e.g. b'foo'). - # See http://python3porting.com/problems.html#nicer-solutions - return codecs.latin_1_encode(s)[0] - - def bytes_from_hex(h): - return bytes.fromhex(h) - - def iteritems(d): - return iter(d.items()) - - def itervalues(d): - return iter(d.values()) - - def reraise(exctype, value, trace=None): - raise exctype(str(value)).with_traceback(trace) - - def reraise_instance(exc_instance, trace=None): - raise exc_instance.with_traceback(trace) - - def _unicode(s): - return s - - text_type = str - string_type = str - integer_types = int -else: - import collections as abc - import thread - from abc import ABCMeta, abstractproperty - - from itertools import imap - try: - from cStringIO import StringIO - except ImportError: - from StringIO import StringIO - - ABC = ABCMeta('ABC', (object,), {}) - - MAXSIZE = sys.maxint - - def b(s): - # See comments above. In python 2.x b('foo') is just 'foo'. - return s - - def bytes_from_hex(h): - return h.decode('hex') - - def iteritems(d): - return d.iteritems() - - def itervalues(d): - return d.itervalues() - - def reraise(exctype, value, trace=None): - _reraise(exctype, str(value), trace) - - def reraise_instance(exc_instance, trace=None): - _reraise(exc_instance, None, trace) - - # "raise x, y, z" raises SyntaxError in Python 3 - exec("""def _reraise(exc, value, trace): - raise exc, value, trace -""") - - _unicode = unicode - - string_type = basestring - text_type = unicode - integer_types = (int, long) diff --git a/bson/raw_bson.py b/bson/raw_bson.py index 6a7cf5045d..50362398a3 100644 --- a/bson/raw_bson.py +++ b/bson/raw_bson.py @@ -13,16 +13,71 @@ # limitations under the License. """Tools for representing raw BSON documents. + +Inserting and Retrieving RawBSONDocuments +========================================= + +Example: Moving a document between different databases/collections + +.. doctest:: + + >>> import bson + >>> from pymongo import MongoClient + >>> from bson.raw_bson import RawBSONDocument + >>> client = MongoClient(document_class=RawBSONDocument) + >>> client.drop_database("db") + >>> client.drop_database("replica_db") + >>> db = client.db + >>> result = db.test.insert_many( + ... [{"_id": 1, "a": 1}, {"_id": 2, "b": 1}, {"_id": 3, "c": 1}, {"_id": 4, "d": 1}] + ... ) + >>> replica_db = client.replica_db + >>> for doc in db.test.find(): + ... print(f"raw document: {doc.raw}") + ... print(f"decoded document: {bson.decode(doc.raw)}") + ... result = replica_db.test.insert_one(doc) + ... + raw document: b'...' + decoded document: {'_id': 1, 'a': 1} + raw document: b'...' + decoded document: {'_id': 2, 'b': 1} + raw document: b'...' + decoded document: {'_id': 3, 'c': 1} + raw document: b'...' + decoded document: {'_id': 4, 'd': 1} + +For use cases like moving documents across different databases or writing binary +blobs to disk, using raw BSON documents provides better speed and avoids the +overhead of decoding or encoding BSON. """ +from __future__ import annotations + +from typing import Any, ItemsView, Iterator, Mapping, MutableMapping, Optional -from bson import _raw_to_dict, _get_object_size -from bson.py3compat import abc, iteritems -from bson.codec_options import ( - DEFAULT_CODEC_OPTIONS as DEFAULT, _RAW_BSON_DOCUMENT_MARKER) +from bson import _get_object_size, _raw_to_dict +from bson.codec_options import _RAW_BSON_DOCUMENT_MARKER, CodecOptions +from bson.codec_options import DEFAULT_CODEC_OPTIONS as DEFAULT from bson.son import SON -class RawBSONDocument(abc.Mapping): +def _inflate_bson( + bson_bytes: bytes, codec_options: CodecOptions[RawBSONDocument], raw_array: bool = False +) -> MutableMapping[str, Any]: + """Inflates the top level fields of a BSON document. + + :Parameters: + - `bson_bytes`: the BSON bytes that compose this document + - `codec_options`: An instance of + :class:`~bson.codec_options.CodecOptions` whose ``document_class`` + must be :class:`RawBSONDocument`. + """ + # Use SON to preserve ordering of elements. + return _raw_to_dict( + bson_bytes, 4, len(bson_bytes) - 1, codec_options, SON(), raw_array=raw_array + ) + + +class RawBSONDocument(Mapping[str, Any]): """Representation for a MongoDB document that provides access to the raw BSON bytes that compose it. @@ -30,10 +85,13 @@ class RawBSONDocument(abc.Mapping): RawBSONDocument decode its bytes. """ - __slots__ = ('__raw', '__inflated_doc', '__codec_options') + __slots__ = ("__raw", "__inflated_doc", "__codec_options") _type_marker = _RAW_BSON_DOCUMENT_MARKER + __codec_options: CodecOptions[RawBSONDocument] - def __init__(self, bson_bytes, codec_options=None): + def __init__( + self, bson_bytes: bytes, codec_options: Optional[CodecOptions[RawBSONDocument]] = None + ) -> None: """Create a new :class:`RawBSONDocument` :class:`RawBSONDocument` is a representation of a BSON document that @@ -68,72 +126,78 @@ class from the standard library so it can be used like a read-only `document_class` must be :class:`RawBSONDocument`. """ self.__raw = bson_bytes - self.__inflated_doc = None + self.__inflated_doc: Optional[Mapping[str, Any]] = None # Can't default codec_options to DEFAULT_RAW_BSON_OPTIONS in signature, # it refers to this class RawBSONDocument. if codec_options is None: codec_options = DEFAULT_RAW_BSON_OPTIONS - elif codec_options.document_class is not RawBSONDocument: + elif not issubclass(codec_options.document_class, RawBSONDocument): raise TypeError( "RawBSONDocument cannot use CodecOptions with document " - "class %s" % (codec_options.document_class, )) + f"class {codec_options.document_class}" + ) self.__codec_options = codec_options # Validate the bson object size. _get_object_size(bson_bytes, 0, len(bson_bytes)) @property - def raw(self): + def raw(self) -> bytes: """The raw BSON bytes composing this document.""" return self.__raw - def items(self): + def items(self) -> ItemsView[str, Any]: """Lazily decode and iterate elements in this document.""" - return iteritems(self.__inflated) + return self.__inflated.items() @property - def __inflated(self): + def __inflated(self) -> Mapping[str, Any]: if self.__inflated_doc is None: # We already validated the object's size when this document was # created, so no need to do that again. # Use SON to preserve ordering of elements. - self.__inflated_doc = _inflate_bson( - self.__raw, self.__codec_options) + self.__inflated_doc = self._inflate_bson(self.__raw, self.__codec_options) return self.__inflated_doc - def __getitem__(self, item): + @staticmethod + def _inflate_bson( + bson_bytes: bytes, codec_options: CodecOptions[RawBSONDocument] + ) -> Mapping[str, Any]: + return _inflate_bson(bson_bytes, codec_options) + + def __getitem__(self, item: str) -> Any: return self.__inflated[item] - def __iter__(self): + def __iter__(self) -> Iterator[str]: return iter(self.__inflated) - def __len__(self): + def __len__(self) -> int: return len(self.__inflated) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, RawBSONDocument): return self.__raw == other.raw return NotImplemented - def __repr__(self): - return ("RawBSONDocument(%r, codec_options=%r)" - % (self.raw, self.__codec_options)) + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.raw!r}, codec_options={self.__codec_options!r})" -def _inflate_bson(bson_bytes, codec_options): - """Inflates the top level fields of a BSON document. +class _RawArrayBSONDocument(RawBSONDocument): + """A RawBSONDocument that only expands sub-documents and arrays when accessed.""" - :Parameters: - - `bson_bytes`: the BSON bytes that compose this document - - `codec_options`: An instance of - :class:`~bson.codec_options.CodecOptions` whose ``document_class`` - must be :class:`RawBSONDocument`. - """ - # Use SON to preserve ordering of elements. - return _raw_to_dict( - bson_bytes, 4, len(bson_bytes)-1, codec_options, SON()) + @staticmethod + def _inflate_bson( + bson_bytes: bytes, codec_options: CodecOptions[RawBSONDocument] + ) -> Mapping[str, Any]: + return _inflate_bson(bson_bytes, codec_options, raw_array=True) -DEFAULT_RAW_BSON_OPTIONS = DEFAULT.with_options(document_class=RawBSONDocument) +DEFAULT_RAW_BSON_OPTIONS: CodecOptions[RawBSONDocument] = DEFAULT.with_options( + document_class=RawBSONDocument +) +_RAW_ARRAY_BSON_OPTIONS: CodecOptions[_RawArrayBSONDocument] = DEFAULT.with_options( + document_class=_RawArrayBSONDocument +) """The default :class:`~bson.codec_options.CodecOptions` for :class:`RawBSONDocument`. """ diff --git a/bson/regex.py b/bson/regex.py index f9d39ad83d..e3ca1ab69f 100644 --- a/bson/regex.py +++ b/bson/regex.py @@ -12,16 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tools for representing MongoDB regular expressions. -""" +"""Tools for representing MongoDB regular expressions.""" +from __future__ import annotations import re +from typing import Any, Generic, Pattern, Type, TypeVar, Union +from bson._helpers import _getstate_slots, _setstate_slots from bson.son import RE_TYPE -from bson.py3compat import string_type, text_type -def str_flags_to_int(str_flags): +def str_flags_to_int(str_flags: str) -> int: flags = 0 if "i" in str_flags: flags |= re.IGNORECASE @@ -39,12 +40,21 @@ def str_flags_to_int(str_flags): return flags -class Regex(object): +_T = TypeVar("_T", str, bytes) + + +class Regex(Generic[_T]): """BSON regular expression data.""" + + __slots__ = ("pattern", "flags") + + __getstate__ = _getstate_slots + __setstate__ = _setstate_slots + _type_marker = 11 @classmethod - def from_native(cls, regex): + def from_native(cls: Type[Regex[Any]], regex: Pattern[_T]) -> Regex[_T]: """Convert a Python regular expression into a ``Regex`` instance. Note that in Python 3, a regular expression compiled from a @@ -54,7 +64,7 @@ def from_native(cls, regex): >>> pattern = re.compile('.*') >>> regex = Regex.from_native(pattern) >>> regex.flags ^= re.UNICODE - >>> db.collection.insert({'pattern': regex}) + >>> db.collection.insert_one({'pattern': regex}) :Parameters: - `regex`: A regular expression object from ``re.compile()``. @@ -69,13 +79,11 @@ def from_native(cls, regex): .. _PCRE: http://www.pcre.org/ """ if not isinstance(regex, RE_TYPE): - raise TypeError( - "regex must be a compiled regular expression, not %s" - % type(regex)) + raise TypeError("regex must be a compiled regular expression, not %s" % type(regex)) return Regex(regex.pattern, regex.flags) - def __init__(self, pattern, flags=0): + def __init__(self, pattern: _T, flags: Union[str, int] = 0) -> None: """BSON regular expression data. This class is useful to store and retrieve regular expressions that are @@ -86,33 +94,32 @@ def __init__(self, pattern, flags=0): - `flags`: (optional) an integer bitmask, or a string of flag characters like "im" for IGNORECASE and MULTILINE """ - if not isinstance(pattern, (text_type, bytes)): + if not isinstance(pattern, (str, bytes)): raise TypeError("pattern must be a string, not %s" % type(pattern)) - self.pattern = pattern + self.pattern: _T = pattern - if isinstance(flags, string_type): + if isinstance(flags, str): self.flags = str_flags_to_int(flags) elif isinstance(flags, int): self.flags = flags else: - raise TypeError( - "flags must be a string or int, not %s" % type(flags)) + raise TypeError("flags must be a string or int, not %s" % type(flags)) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, Regex): return self.pattern == other.pattern and self.flags == other.flags else: return NotImplemented - __hash__ = None + __hash__ = None # type: ignore - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other - def __repr__(self): - return "Regex(%r, %r)" % (self.pattern, self.flags) + def __repr__(self) -> str: + return f"Regex({self.pattern!r}, {self.flags!r})" - def try_compile(self): + def try_compile(self) -> Pattern[_T]: """Compile this :class:`Regex` as a Python regular expression. .. warning:: diff --git a/bson/son.py b/bson/son.py index 701cb23186..c5df4e5972 100644 --- a/bson/son.py +++ b/bson/son.py @@ -16,105 +16,116 @@ Regular dictionaries can be used instead of SON objects, but not when the order of keys is important. A SON object can be used just like a normal Python -dictionary.""" +dictionary. +""" +from __future__ import annotations import copy import re - -from bson.py3compat import abc, iteritems - +from collections.abc import Mapping as _Mapping +from typing import ( + Any, + Dict, + Iterable, + Iterator, + Mapping, + Optional, + Pattern, + Tuple, + Type, + TypeVar, + Union, + cast, +) # This sort of sucks, but seems to be as good as it gets... # This is essentially the same as re._pattern_type -RE_TYPE = type(re.compile("")) +RE_TYPE: Type[Pattern[Any]] = type(re.compile("")) +_Key = TypeVar("_Key") +_Value = TypeVar("_Value") +_T = TypeVar("_T") -class SON(dict): + +class SON(Dict[_Key, _Value]): """SON data. A subclass of dict that maintains ordering of keys and provides a few extra niceties for dealing with SON. SON provides an API - similar to collections.OrderedDict from Python 2.7+. + similar to collections.OrderedDict. """ - def __init__(self, data=None, **kwargs): + __keys: list[Any] + + def __init__( + self, + data: Optional[Union[Mapping[_Key, _Value], Iterable[Tuple[_Key, _Value]]]] = None, + **kwargs: Any, + ) -> None: self.__keys = [] dict.__init__(self) self.update(data) self.update(kwargs) - def __new__(cls, *args, **kwargs): - instance = super(SON, cls).__new__(cls, *args, **kwargs) + def __new__(cls: Type[SON[_Key, _Value]], *args: Any, **kwargs: Any) -> SON[_Key, _Value]: + instance = super().__new__(cls, *args, **kwargs) # type: ignore[type-var] instance.__keys = [] return instance - def __repr__(self): + def __repr__(self) -> str: result = [] for key in self.__keys: - result.append("(%r, %r)" % (key, self[key])) + result.append(f"({key!r}, {self[key]!r})") return "SON([%s])" % ", ".join(result) - def __setitem__(self, key, value): + def __setitem__(self, key: _Key, value: _Value) -> None: if key not in self.__keys: self.__keys.append(key) dict.__setitem__(self, key, value) - def __delitem__(self, key): + def __delitem__(self, key: _Key) -> None: self.__keys.remove(key) dict.__delitem__(self, key) - def keys(self): - return list(self.__keys) - - def copy(self): - other = SON() + def copy(self) -> SON[_Key, _Value]: + other: SON[_Key, _Value] = SON() other.update(self) return other # TODO this is all from UserDict.DictMixin. it could probably be made more # efficient. # second level definitions support higher levels - def __iter__(self): - for k in self.__keys: - yield k + def __iter__(self) -> Iterator[_Key]: + yield from self.__keys - def has_key(self, key): + def has_key(self, key: _Key) -> bool: return key in self.__keys - # third level takes advantage of second level definitions - def iteritems(self): - for k in self: - yield (k, self[k]) - - def iterkeys(self): + def iterkeys(self) -> Iterator[_Key]: return self.__iter__() # fourth level uses definitions from lower levels - def itervalues(self): - for _, v in self.iteritems(): + def itervalues(self) -> Iterator[_Value]: + for _, v in self.items(): yield v - def values(self): - return [v for _, v in self.iteritems()] - - def items(self): - return [(key, self[key]) for key in self] + def values(self) -> list[_Value]: # type: ignore[override] + return [v for _, v in self.items()] - def clear(self): + def clear(self) -> None: self.__keys = [] - super(SON, self).clear() + super().clear() - def setdefault(self, key, default=None): + def setdefault(self, key: _Key, default: _Value) -> _Value: try: return self[key] except KeyError: self[key] = default return default - def pop(self, key, *args): + def pop(self, key: _Key, *args: Union[_Value, _T]) -> Union[_Value, _T]: if len(args) > 1: - raise TypeError("pop expected at most 2 arguments, got "\ - + repr(1 + len(args))) + raise TypeError("pop expected at most 2 arguments, got " + repr(1 + len(args))) try: value = self[key] except KeyError: @@ -124,23 +135,23 @@ def pop(self, key, *args): del self[key] return value - def popitem(self): + def popitem(self) -> Tuple[_Key, _Value]: try: - k, v = next(self.iteritems()) + k, v = next(iter(self.items())) except StopIteration: - raise KeyError('container is empty') + raise KeyError("container is empty") from None del self[k] return (k, v) - def update(self, other=None, **kwargs): + def update(self, other: Optional[Any] = None, **kwargs: _Value) -> None: # type: ignore[override] # Make progressively weaker assumptions about "other" if other is None: pass - elif hasattr(other, 'iteritems'): # iteritems saves memory and lookups - for k, v in other.iteritems(): + elif hasattr(other, "items"): + for k, v in other.items(): self[k] = v - elif hasattr(other, 'keys'): - for k in other.keys(): + elif hasattr(other, "keys"): + for k in other: self[k] = other[k] else: for k, v in other: @@ -148,53 +159,51 @@ def update(self, other=None, **kwargs): if kwargs: self.update(kwargs) - def get(self, key, default=None): + def get(self, key: _Key, default: Optional[Union[_Value, _T]] = None) -> Union[_Value, _T, None]: # type: ignore[override] try: return self[key] except KeyError: return default - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: """Comparison to another SON is order-sensitive while comparison to a regular dictionary is order-insensitive. """ if isinstance(other, SON): - return len(self) == len(other) and self.items() == other.items() - return self.to_dict() == other + return len(self) == len(other) and list(self.items()) == list(other.items()) + return cast(bool, self.to_dict() == other) - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other - def __len__(self): + def __len__(self) -> int: return len(self.__keys) - def to_dict(self): + def to_dict(self) -> dict[_Key, _Value]: """Convert a SON document to a normal Python dictionary instance. This is trickier than just *dict(...)* because it needs to be recursive. """ - def transform_value(value): + def transform_value(value: Any) -> Any: if isinstance(value, list): return [transform_value(v) for v in value] - elif isinstance(value, abc.Mapping): - return dict([ - (k, transform_value(v)) - for k, v in iteritems(value)]) + elif isinstance(value, _Mapping): + return {k: transform_value(v) for k, v in value.items()} else: return value - return transform_value(dict(self)) + return cast("dict[_Key, _Value]", transform_value(dict(self))) - def __deepcopy__(self, memo): - out = SON() + def __deepcopy__(self, memo: dict[int, SON[_Key, _Value]]) -> SON[_Key, _Value]: + out: SON[_Key, _Value] = SON() val_id = id(self) if val_id in memo: - return memo.get(val_id) + return memo[val_id] memo[val_id] = out - for k, v in self.iteritems(): + for k, v in self.items(): if not isinstance(v, RE_TYPE): - v = copy.deepcopy(v, memo) + v = copy.deepcopy(v, memo) # noqa: PLW2901 out[k] = v return out diff --git a/bson/time64.c b/bson/time64.c index bad6b51dc1..a21fbb90bd 100644 --- a/bson/time64.c +++ b/bson/time64.c @@ -29,13 +29,13 @@ THE SOFTWARE. /* Programmers who have available to them 64-bit time values as a 'long -long' type can use localtime64_r() and gmtime64_r() which correctly +long' type can use cbson_localtime64_r() and cbson_gmtime64_r() which correctly converts the time even on 32-bit systems. Whether you have 64-bit time values will depend on the operating system. -localtime64_r() is a 64-bit equivalent of localtime_r(). +cbson_localtime64_r() is a 64-bit equivalent of localtime_r(). -gmtime64_r() is a 64-bit equivalent of gmtime_r(). +cbson_gmtime64_r() is a 64-bit equivalent of gmtime_r(). */ @@ -73,7 +73,7 @@ static const Year years_in_gregorian_cycle = 400; #define days_in_gregorian_cycle ((365 * 400) + 100 - 4 + 1) static const Time64_T seconds_in_gregorian_cycle = days_in_gregorian_cycle * 60LL * 60LL * 24LL; -/* Year range we can trust the time funcitons with */ +/* Year range we can trust the time functions with */ #define MAX_SAFE_YEAR 2037 #define MIN_SAFE_YEAR 1971 @@ -158,7 +158,7 @@ static int is_exception_century(Year year) The result is like cmp. Ignores things like gmtoffset and dst */ -int cmp_date( const struct TM* left, const struct tm* right ) { +int cbson_cmp_date( const struct TM* left, const struct tm* right ) { if( left->tm_year > right->tm_year ) return 1; else if( left->tm_year < right->tm_year ) @@ -196,11 +196,11 @@ int cmp_date( const struct TM* left, const struct tm* right ) { /* Check if a date is safely inside a range. The intention is to check if its a few days inside. */ -int date_in_safe_range( const struct TM* date, const struct tm* min, const struct tm* max ) { - if( cmp_date(date, min) == -1 ) +int cbson_date_in_safe_range( const struct TM* date, const struct tm* min, const struct tm* max ) { + if( cbson_cmp_date(date, min) == -1 ) return 0; - if( cmp_date(date, max) == 1 ) + if( cbson_cmp_date(date, max) == 1 ) return 0; return 1; @@ -209,9 +209,9 @@ int date_in_safe_range( const struct TM* date, const struct tm* min, const struc /* timegm() is not in the C or POSIX spec, but it is such a useful extension I would be remiss in leaving it out. Also I need it - for localtime64() + for cbson_localtime64() */ -Time64_T timegm64(const struct TM *date) { +Time64_T cbson_timegm64(const struct TM *date) { Time64_T days = 0; Time64_T seconds = 0; Year year; @@ -376,7 +376,7 @@ static int safe_year(const Year year) } -void copy_tm_to_TM64(const struct tm *src, struct TM *dest) { +void pymongo_copy_tm_to_TM64(const struct tm *src, struct TM *dest) { if( src == NULL ) { memset(dest, 0, sizeof(*dest)); } @@ -408,7 +408,7 @@ void copy_tm_to_TM64(const struct tm *src, struct TM *dest) { } -void copy_TM64_to_tm(const struct TM *src, struct tm *dest) { +void cbson_copy_TM64_to_tm(const struct TM *src, struct tm *dest) { if( src == NULL ) { memset(dest, 0, sizeof(*dest)); } @@ -441,7 +441,7 @@ void copy_TM64_to_tm(const struct TM *src, struct tm *dest) { /* Simulate localtime_r() to the best of our ability */ -struct tm * fake_localtime_r(const time_t *time, struct tm *result) { +struct tm * cbson_fake_localtime_r(const time_t *time, struct tm *result) { const struct tm *static_result = localtime(time); assert(result != NULL); @@ -458,7 +458,7 @@ struct tm * fake_localtime_r(const time_t *time, struct tm *result) { /* Simulate gmtime_r() to the best of our ability */ -struct tm * fake_gmtime_r(const time_t *time, struct tm *result) { +struct tm * cbson_fake_gmtime_r(const time_t *time, struct tm *result) { const struct tm *static_result = gmtime(time); assert(result != NULL); @@ -499,22 +499,22 @@ static Time64_T seconds_between_years(Year left_year, Year right_year) { } -Time64_T mktime64(const struct TM *input_date) { +Time64_T cbson_mktime64(const struct TM *input_date) { struct tm safe_date; struct TM date; Time64_T time; Year year = input_date->tm_year + 1900; - if( date_in_safe_range(input_date, &SYSTEM_MKTIME_MIN, &SYSTEM_MKTIME_MAX) ) + if( cbson_date_in_safe_range(input_date, &SYSTEM_MKTIME_MIN, &SYSTEM_MKTIME_MAX) ) { - copy_TM64_to_tm(input_date, &safe_date); + cbson_copy_TM64_to_tm(input_date, &safe_date); return (Time64_T)mktime(&safe_date); } /* Have to make the year safe in date else it won't fit in safe_date */ date = *input_date; date.tm_year = safe_year(year) - 1900; - copy_TM64_to_tm(&date, &safe_date); + cbson_copy_TM64_to_tm(&date, &safe_date); time = (Time64_T)mktime(&safe_date); @@ -526,11 +526,11 @@ Time64_T mktime64(const struct TM *input_date) { /* Because I think mktime() is a crappy name */ Time64_T timelocal64(const struct TM *date) { - return mktime64(date); + return cbson_mktime64(date); } -struct TM *gmtime64_r (const Time64_T *in_time, struct TM *p) +struct TM *cbson_gmtime64_r (const Time64_T *in_time, struct TM *p) { int v_tm_sec, v_tm_min, v_tm_hour, v_tm_mon, v_tm_wday; Time64_T v_tm_tday; @@ -549,7 +549,7 @@ struct TM *gmtime64_r (const Time64_T *in_time, struct TM *p) struct tm safe_date; GMTIME_R(&safe_time, &safe_date); - copy_tm_to_TM64(&safe_date, p); + pymongo_copy_tm_to_TM64(&safe_date, p); assert(check_tm(p)); return p; @@ -659,7 +659,7 @@ struct TM *gmtime64_r (const Time64_T *in_time, struct TM *p) } -struct TM *localtime64_r (const Time64_T *time, struct TM *local_tm) +struct TM *cbson_localtime64_r (const Time64_T *time, struct TM *local_tm) { time_t safe_time; struct tm safe_date; @@ -678,15 +678,15 @@ struct TM *localtime64_r (const Time64_T *time, struct TM *local_tm) LOCALTIME_R(&safe_time, &safe_date); - copy_tm_to_TM64(&safe_date, local_tm); + pymongo_copy_tm_to_TM64(&safe_date, local_tm); assert(check_tm(local_tm)); return local_tm; } #endif - if( gmtime64_r(time, &gm_tm) == NULL ) { - TIME64_TRACE1("gmtime64_r returned null for %lld\n", *time); + if( cbson_gmtime64_r(time, &gm_tm) == NULL ) { + TIME64_TRACE1("cbson_gmtime64_r returned null for %lld\n", *time); return NULL; } @@ -700,13 +700,13 @@ struct TM *localtime64_r (const Time64_T *time, struct TM *local_tm) gm_tm.tm_year = safe_year((Year)(gm_tm.tm_year + 1900)) - 1900; } - safe_time = (time_t)timegm64(&gm_tm); + safe_time = (time_t)cbson_timegm64(&gm_tm); if( LOCALTIME_R(&safe_time, &safe_date) == NULL ) { TIME64_TRACE1("localtime_r(%d) returned NULL\n", (int)safe_time); return NULL; } - copy_tm_to_TM64(&safe_date, local_tm); + pymongo_copy_tm_to_TM64(&safe_date, local_tm); local_tm->tm_year = (int)orig_year; if( local_tm->tm_year != orig_year ) { @@ -739,7 +739,7 @@ struct TM *localtime64_r (const Time64_T *time, struct TM *local_tm) /* GMT is Jan 1st, xx01 year, but localtime is still Dec 31st in a non-leap xx00. There is one point in the cycle we can't account for which the safe xx00 year is a leap - year. So we need to correct for Dec 31st comming out as + year. So we need to correct for Dec 31st coming out as the 366th day of the year. */ if( !IS_LEAP(local_tm->tm_year) && local_tm->tm_yday == 365 ) @@ -751,14 +751,14 @@ struct TM *localtime64_r (const Time64_T *time, struct TM *local_tm) } -int valid_tm_wday( const struct TM* date ) { +int cbson_valid_tm_wday( const struct TM* date ) { if( 0 <= date->tm_wday && date->tm_wday <= 6 ) return 1; else return 0; } -int valid_tm_mon( const struct TM* date ) { +int cbson_valid_tm_mon( const struct TM* date ) { if( 0 <= date->tm_mon && date->tm_mon <= 11 ) return 1; else @@ -767,15 +767,15 @@ int valid_tm_mon( const struct TM* date ) { /* Non-thread safe versions of the above */ -struct TM *localtime64(const Time64_T *time) { +struct TM *cbson_localtime64(const Time64_T *time) { #ifdef _MSC_VER _tzset(); #else tzset(); #endif - return localtime64_r(time, &Static_Return_Date); + return cbson_localtime64_r(time, &Static_Return_Date); } -struct TM *gmtime64(const Time64_T *time) { - return gmtime64_r(time, &Static_Return_Date); +struct TM *cbson_gmtime64(const Time64_T *time) { + return cbson_gmtime64_r(time, &Static_Return_Date); } diff --git a/bson/time64.h b/bson/time64.h index 61d9776926..6321eb307e 100644 --- a/bson/time64.h +++ b/bson/time64.h @@ -41,13 +41,13 @@ struct TM64 { /* Declare public functions */ -struct TM *gmtime64_r (const Time64_T *, struct TM *); -struct TM *localtime64_r (const Time64_T *, struct TM *); -struct TM *gmtime64 (const Time64_T *); -struct TM *localtime64 (const Time64_T *); +struct TM *cbson_gmtime64_r (const Time64_T *, struct TM *); +struct TM *cbson_localtime64_r (const Time64_T *, struct TM *); +struct TM *cbson_gmtime64 (const Time64_T *); +struct TM *cbson_localtime64 (const Time64_T *); -Time64_T timegm64 (const struct TM *); -Time64_T mktime64 (const struct TM *); +Time64_T cbson_timegm64 (const struct TM *); +Time64_T cbson_mktime64 (const struct TM *); Time64_T timelocal64 (const struct TM *); @@ -55,12 +55,12 @@ Time64_T timelocal64 (const struct TM *); #ifdef HAS_LOCALTIME_R # define LOCALTIME_R(clock, result) localtime_r(clock, result) #else -# define LOCALTIME_R(clock, result) fake_localtime_r(clock, result) +# define LOCALTIME_R(clock, result) cbson_fake_localtime_r(clock, result) #endif #ifdef HAS_GMTIME_R # define GMTIME_R(clock, result) gmtime_r(clock, result) #else -# define GMTIME_R(clock, result) fake_gmtime_r(clock, result) +# define GMTIME_R(clock, result) cbson_fake_gmtime_r(clock, result) #endif diff --git a/bson/timestamp.py b/bson/timestamp.py index 7ea755117a..9bc6a715b6 100644 --- a/bson/timestamp.py +++ b/bson/timestamp.py @@ -12,25 +12,30 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tools for representing MongoDB internal Timestamps. -""" +"""Tools for representing MongoDB internal Timestamps.""" +from __future__ import annotations import calendar import datetime +from typing import Any, Union -from bson.py3compat import integer_types +from bson._helpers import _getstate_slots, _setstate_slots from bson.tz_util import utc UPPERBOUND = 4294967296 -class Timestamp(object): - """MongoDB internal timestamps used in the opLog. - """ +class Timestamp: + """MongoDB internal timestamps used in the opLog.""" + + __slots__ = ("__time", "__inc") + + __getstate__ = _getstate_slots + __setstate__ = _setstate_slots _type_marker = 17 - def __init__(self, time, inc): + def __init__(self, time: Union[datetime.datetime, int], inc: int) -> None: """Create a new :class:`Timestamp`. This class is only for use with the MongoDB opLog. If you need @@ -49,12 +54,13 @@ def __init__(self, time, inc): - `inc`: the incrementing counter """ if isinstance(time, datetime.datetime): - if time.utcoffset() is not None: - time = time - time.utcoffset() + offset = time.utcoffset() + if offset is not None: + time = time - offset time = int(calendar.timegm(time.timetuple())) - if not isinstance(time, integer_types): + if not isinstance(time, int): raise TypeError("time must be an instance of int") - if not isinstance(inc, integer_types): + if not isinstance(inc, int): raise TypeError("inc must be an instance of int") if not 0 <= time < UPPERBOUND: raise ValueError("time must be contained in [0, 2**32)") @@ -65,53 +71,51 @@ def __init__(self, time, inc): self.__inc = inc @property - def time(self): - """Get the time portion of this :class:`Timestamp`. - """ + def time(self) -> int: + """Get the time portion of this :class:`Timestamp`.""" return self.__time @property - def inc(self): - """Get the inc portion of this :class:`Timestamp`. - """ + def inc(self) -> int: + """Get the inc portion of this :class:`Timestamp`.""" return self.__inc - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, Timestamp): - return (self.__time == other.time and self.__inc == other.inc) + return self.__time == other.time and self.__inc == other.inc else: return NotImplemented - def __hash__(self): + def __hash__(self) -> int: return hash(self.time) ^ hash(self.inc) - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other - def __lt__(self, other): + def __lt__(self, other: Any) -> bool: if isinstance(other, Timestamp): return (self.time, self.inc) < (other.time, other.inc) return NotImplemented - def __le__(self, other): + def __le__(self, other: Any) -> bool: if isinstance(other, Timestamp): return (self.time, self.inc) <= (other.time, other.inc) return NotImplemented - def __gt__(self, other): + def __gt__(self, other: Any) -> bool: if isinstance(other, Timestamp): return (self.time, self.inc) > (other.time, other.inc) return NotImplemented - def __ge__(self, other): + def __ge__(self, other: Any) -> bool: if isinstance(other, Timestamp): return (self.time, self.inc) >= (other.time, other.inc) return NotImplemented - def __repr__(self): - return "Timestamp(%s, %s)" % (self.__time, self.__inc) + def __repr__(self) -> str: + return f"Timestamp({self.__time}, {self.__inc})" - def as_datetime(self): + def as_datetime(self) -> datetime.datetime: """Return a :class:`~datetime.datetime` instance corresponding to the time portion of this :class:`Timestamp`. diff --git a/bson/typings.py b/bson/typings.py new file mode 100644 index 0000000000..b80c661454 --- /dev/null +++ b/bson/typings.py @@ -0,0 +1,31 @@ +# Copyright 2023-Present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Type aliases used by bson""" +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Mapping, MutableMapping, TypeVar, Union + +if TYPE_CHECKING: + from array import array + from mmap import mmap + + from bson.raw_bson import RawBSONDocument + + +# Common Shared Types. +_DocumentOut = Union[MutableMapping[str, Any], "RawBSONDocument"] +_DocumentType = TypeVar("_DocumentType", bound=Mapping[str, Any]) +_DocumentTypeArg = TypeVar("_DocumentTypeArg", bound=Mapping[str, Any]) +_ReadableBuffer = Union[bytes, memoryview, "mmap", "array"] diff --git a/bson/tz_util.py b/bson/tz_util.py index 6ec918fb2b..a21d3c1736 100644 --- a/bson/tz_util.py +++ b/bson/tz_util.py @@ -13,11 +13,12 @@ # limitations under the License. """Timezone related utilities for BSON.""" +from __future__ import annotations -from datetime import (timedelta, - tzinfo) +from datetime import datetime, timedelta, tzinfo +from typing import Optional, Tuple, Union -ZERO = timedelta(0) +ZERO: timedelta = timedelta(0) class FixedOffset(tzinfo): @@ -28,25 +29,25 @@ class FixedOffset(tzinfo): Defining __getinitargs__ enables pickling / copying. """ - def __init__(self, offset, name): + def __init__(self, offset: Union[float, timedelta], name: str) -> None: if isinstance(offset, timedelta): self.__offset = offset else: self.__offset = timedelta(minutes=offset) self.__name = name - def __getinitargs__(self): + def __getinitargs__(self) -> Tuple[timedelta, str]: return self.__offset, self.__name - def utcoffset(self, dt): + def utcoffset(self, dt: Optional[datetime]) -> timedelta: return self.__offset - def tzname(self, dt): + def tzname(self, dt: Optional[datetime]) -> str: return self.__name - def dst(self, dt): + def dst(self, dt: Optional[datetime]) -> timedelta: return ZERO -utc = FixedOffset(0, "UTC") +utc: FixedOffset = FixedOffset(0, "UTC") """Fixed offset timezone representing UTC.""" diff --git a/cdecimal_test.py b/cdecimal_test.py deleted file mode 100644 index 88262ffd98..0000000000 --- a/cdecimal_test.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright 2017 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Test PyMongo with cdecimal monkey-patched over stdlib decimal.""" - -import getopt -import sys - -try: - import cdecimal - _HAVE_CDECIMAL = True -except ImportError: - _HAVE_CDECIMAL = False - - -def run(args): - """Run tests with cdecimal monkey-patched over stdlib decimal.""" - # Monkey-patch. - sys.modules['decimal'] = cdecimal - - # Run the tests. - sys.argv[:] = ['setup.py', 'test'] + list(args) - import setup - - -def main(): - """Parse options and run tests.""" - usage = """python %s - -Test PyMongo with cdecimal monkey-patched over decimal.""" % (sys.argv[0],) - - try: - opts, args = getopt.getopt( - sys.argv[1:], "h", ["help"]) - except getopt.GetoptError as err: - print(str(err)) - print(usage) - sys.exit(2) - - for option_name, _ in opts: - if option_name in ("-h", "--help"): - print(usage) - sys.exit() - else: - assert False, "unhandled option" - - if not _HAVE_CDECIMAL: - print("The cdecimal package is not installed.") - sys.exit(1) - - run(args) # Command line args to setup.py, like what test to run. - - -if __name__ == '__main__': - main() diff --git a/doc/Makefile b/doc/Makefile index 9fa6e3a48c..d4bb2cbb9e 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -1,89 +1,20 @@ -# Makefile for Sphinx documentation +# Minimal makefile for Sphinx documentation # -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -PAPER = +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . BUILDDIR = _build -# Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . - -.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest - +# Put it first so that "make" without argument is like "make help". help: - @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" - -clean: - -rm -rf $(BUILDDIR)/* - -html: - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." - -dirhtml: - $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." - -pickle: - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle - @echo - @echo "Build finished; now you can process the pickle files." - -json: - $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json - @echo - @echo "Build finished; now you can process the JSON files." - -htmlhelp: - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in $(BUILDDIR)/htmlhelp." - -qthelp: - $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp - @echo - @echo "Build finished; now you can run "qcollectiongenerator" with the" \ - ".qhcp project file in $(BUILDDIR)/qthelp, like this:" - @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/PyMongo.qhcp" - @echo "To view the help file:" - @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/PyMongo.qhc" - -latex: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo - @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ - "run these through (pdf)latex." - -changes: - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes - @echo - @echo "The overview file is in $(BUILDDIR)/changes." + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -linkcheck: - $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck - @echo - @echo "Link check complete; look for any errors in the above output " \ - "or in $(BUILDDIR)/linkcheck/output.txt." +.PHONY: help Makefile -doctest: - $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest - @echo "Testing of doctests in the sources finished, look at the " \ - "results in $(BUILDDIR)/doctest/output.txt." +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/doc/_templates/layout.html b/doc/_templates/layout.html index ed7851bdef..6141284a48 100644 --- a/doc/_templates/layout.html +++ b/doc/_templates/layout.html @@ -4,14 +4,14 @@ {% if theme_googletag %} - + })(window,document,'script','dataLayer','GTM-GDFN'); {% endif %} diff --git a/doc/api/bson/binary.rst b/doc/api/bson/binary.rst index ab9d58f819..c933a687b9 100644 --- a/doc/api/bson/binary.rst +++ b/doc/api/bson/binary.rst @@ -14,12 +14,13 @@ .. autodata:: JAVA_LEGACY .. autodata:: CSHARP_LEGACY .. autodata:: MD5_SUBTYPE + .. autodata:: COLUMN_SUBTYPE + .. autodata:: SENSITIVE_SUBTYPE .. autodata:: USER_DEFINED_SUBTYPE - .. autoclass:: Binary(data, subtype=BINARY_SUBTYPE) + .. autoclass:: UuidRepresentation :members: - :show-inheritance: - .. autoclass:: UUIDLegacy(obj) + .. autoclass:: Binary(data, subtype=BINARY_SUBTYPE) :members: :show-inheritance: diff --git a/doc/api/bson/datetime_ms.rst b/doc/api/bson/datetime_ms.rst new file mode 100644 index 0000000000..1afaad69fc --- /dev/null +++ b/doc/api/bson/datetime_ms.rst @@ -0,0 +1,6 @@ +:mod:`datetime_ms` -- Support for BSON UTC Datetime +=================================================== + +.. automodule:: bson.datetime_ms + :synopsis: Support for BSON UTC datetimes. + :members: diff --git a/doc/api/bson/index.rst b/doc/api/bson/index.rst index 5f15ed99eb..d5b69607de 100644 --- a/doc/api/bson/index.rst +++ b/doc/api/bson/index.rst @@ -3,7 +3,7 @@ .. automodule:: bson :synopsis: BSON (Binary JSON) Encoding and Decoding - :members: + :members: BSON, decode, decode_all, decode_file_iter, decode_iter, encode, gen_list_name, has_c, is_valid Sub-modules: @@ -13,6 +13,7 @@ Sub-modules: binary code codec_options + datetime_ms dbref decimal128 errors diff --git a/doc/api/gridfs/index.rst b/doc/api/gridfs/index.rst index 6764ef622b..b81fbde782 100644 --- a/doc/api/gridfs/index.rst +++ b/doc/api/gridfs/index.rst @@ -3,7 +3,7 @@ .. automodule:: gridfs :synopsis: Tools for working with GridFS - :members: + :members: GridFS, GridFSBucket Sub-modules: diff --git a/doc/api/index.rst b/doc/api/index.rst index 64c407fd04..30ae3608ca 100644 --- a/doc/api/index.rst +++ b/doc/api/index.rst @@ -6,7 +6,7 @@ interacting with MongoDB. :mod:`bson` is an implementation of the `BSON format `_, :mod:`pymongo` is a full-featured driver for MongoDB, and :mod:`gridfs` is a set of tools for working with the `GridFS -`_ storage +`_ storage specification. .. toctree:: diff --git a/doc/api/pymongo/bulk.rst b/doc/api/pymongo/bulk.rst deleted file mode 100644 index 0d597c26df..0000000000 --- a/doc/api/pymongo/bulk.rst +++ /dev/null @@ -1,6 +0,0 @@ -:mod:`bulk` -- The bulk write operations interface -================================================== - -.. automodule:: pymongo.bulk - :synopsis: The bulk write operations interface. - :members: diff --git a/doc/api/pymongo/client_options.rst b/doc/api/pymongo/client_options.rst new file mode 100644 index 0000000000..3ffc10bad6 --- /dev/null +++ b/doc/api/pymongo/client_options.rst @@ -0,0 +1,7 @@ +:mod:`client_options` -- Read only configuration options for a MongoClient. +=========================================================================== + +.. automodule:: pymongo.client_options + + .. autoclass:: pymongo.client_options.ClientOptions() + :members: diff --git a/doc/api/pymongo/collection.rst b/doc/api/pymongo/collection.rst index d7b1f0dd8b..a75c0ac586 100644 --- a/doc/api/pymongo/collection.rst +++ b/doc/api/pymongo/collection.rst @@ -7,7 +7,6 @@ .. autodata:: pymongo.ASCENDING .. autodata:: pymongo.DESCENDING .. autodata:: pymongo.GEO2D - .. autodata:: pymongo.GEOHAYSTACK .. autodata:: pymongo.GEOSPHERE .. autodata:: pymongo.HASHED .. autodata:: pymongo.TEXT @@ -47,12 +46,12 @@ .. automethod:: aggregate .. automethod:: aggregate_raw_batches .. automethod:: watch - .. automethod:: find(filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, modifiers=None, batch_size=0, manipulate=True, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None, session=None) - .. automethod:: find_raw_batches(filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, modifiers=None, batch_size=0, manipulate=True, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None) + .. automethod:: find(filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, batch_size=0, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None, session=None, allow_disk_use=None) + .. automethod:: find_raw_batches(filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, batch_size=0, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None, session=None, allow_disk_use=None) .. automethod:: find_one(filter=None, *args, **kwargs) .. automethod:: find_one_and_delete - .. automethod:: find_one_and_replace(filter, replacement, projection=None, sort=None, return_document=ReturnDocument.BEFORE, session=None, **kwargs) - .. automethod:: find_one_and_update(filter, update, projection=None, sort=None, return_document=ReturnDocument.BEFORE, array_filters=None, session=None, **kwargs) + .. automethod:: find_one_and_replace(filter, replacement, projection=None, sort=None, return_document=ReturnDocument.BEFORE, hint=None, session=None, **kwargs) + .. automethod:: find_one_and_update(filter, update, projection=None, sort=None, return_document=ReturnDocument.BEFORE, array_filters=None, hint=None, session=None, **kwargs) .. automethod:: count_documents .. automethod:: estimated_document_count .. automethod:: distinct @@ -60,22 +59,13 @@ .. automethod:: create_indexes .. automethod:: drop_index .. automethod:: drop_indexes - .. automethod:: reindex .. automethod:: list_indexes .. automethod:: index_information + .. automethod:: create_search_index + .. automethod:: create_search_indexes + .. automethod:: drop_search_index + .. automethod:: list_search_indexes + .. automethod:: update_search_index .. automethod:: drop .. automethod:: rename .. automethod:: options - .. automethod:: map_reduce - .. automethod:: inline_map_reduce - .. automethod:: parallel_scan - .. automethod:: initialize_unordered_bulk_op - .. automethod:: initialize_ordered_bulk_op - .. automethod:: group - .. automethod:: count - .. automethod:: insert(doc_or_docs, manipulate=True, check_keys=True, continue_on_error=False, **kwargs) - .. automethod:: save(to_save, manipulate=True, check_keys=True, **kwargs) - .. automethod:: update(spec, document, upsert=False, manipulate=False, multi=False, check_keys=True, **kwargs) - .. automethod:: remove(spec_or_id=None, multi=True, **kwargs) - .. automethod:: find_and_modify - .. automethod:: ensure_index diff --git a/doc/api/pymongo/cursor.rst b/doc/api/pymongo/cursor.rst index 4b9943f9a2..513f051abb 100644 --- a/doc/api/pymongo/cursor.rst +++ b/doc/api/pymongo/cursor.rst @@ -15,13 +15,13 @@ .. autoattribute:: EXHAUST :annotation: - .. autoclass:: pymongo.cursor.Cursor(collection, filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, modifiers=None, batch_size=0, manipulate=True, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None) + .. autoclass:: pymongo.cursor.Cursor(collection, filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, batch_size=0, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None, session=None, allow_disk_use=None) :members: .. describe:: c[index] - See :meth:`__getitem__`. + See :meth:`__getitem__` and read the warning. .. automethod:: __getitem__ - .. autoclass:: pymongo.cursor.RawBatchCursor(collection, filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, modifiers=None, batch_size=0, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None) + .. autoclass:: pymongo.cursor.RawBatchCursor(collection, filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, batch_size=0, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None, allow_disk_use=None) diff --git a/doc/api/pymongo/cursor_manager.rst b/doc/api/pymongo/cursor_manager.rst deleted file mode 100644 index 8851b66f70..0000000000 --- a/doc/api/pymongo/cursor_manager.rst +++ /dev/null @@ -1,6 +0,0 @@ -:mod:`cursor_manager` -- Managers to handle when cursors are killed after being closed -====================================================================================== - -.. automodule:: pymongo.cursor_manager - :synopsis: Managers to handle when cursors are killed after being closed - :members: diff --git a/doc/api/pymongo/database.rst b/doc/api/pymongo/database.rst index b6b0aeba58..b40a77dff3 100644 --- a/doc/api/pymongo/database.rst +++ b/doc/api/pymongo/database.rst @@ -5,9 +5,6 @@ :synopsis: Database level operations .. autodata:: pymongo.auth.MECHANISMS - .. autodata:: pymongo.OFF - .. autodata:: pymongo.SLOW_ONLY - .. autodata:: pymongo.ALL .. autoclass:: pymongo.database.Database :members: @@ -27,7 +24,3 @@ .. autoattribute:: read_preference .. autoattribute:: write_concern .. autoattribute:: read_concern - - - .. autoclass:: pymongo.database.SystemJS - :members: diff --git a/doc/api/pymongo/encryption_options.rst b/doc/api/pymongo/encryption_options.rst index 08bfc157a9..b8a886ea68 100644 --- a/doc/api/pymongo/encryption_options.rst +++ b/doc/api/pymongo/encryption_options.rst @@ -3,6 +3,4 @@ .. automodule:: pymongo.encryption_options :synopsis: Support for automatic client-side field level encryption - - .. autoclass:: pymongo.encryption_options.AutoEncryptionOpts - :members: + :members: diff --git a/doc/api/pymongo/event_loggers.rst b/doc/api/pymongo/event_loggers.rst new file mode 100644 index 0000000000..9be0779c20 --- /dev/null +++ b/doc/api/pymongo/event_loggers.rst @@ -0,0 +1,7 @@ +:mod:`event_loggers` -- Example loggers +=========================================== + + +.. automodule:: pymongo.event_loggers + :synopsis: A collection of simple listeners for monitoring driver events. + :members: diff --git a/doc/api/pymongo/index.rst b/doc/api/pymongo/index.rst index 1b6dedfa8d..625c138170 100644 --- a/doc/api/pymongo/index.rst +++ b/doc/api/pymongo/index.rst @@ -9,10 +9,6 @@ Alias for :class:`pymongo.mongo_client.MongoClient`. - .. data:: MongoReplicaSetClient - - Alias for :class:`pymongo.mongo_replica_set_client.MongoReplicaSetClient`. - .. data:: ReadPreference Alias for :class:`pymongo.read_preferences.ReadPreference`. @@ -26,33 +22,35 @@ The maximum wire protocol version PyMongo supports. + .. autofunction:: timeout + Sub-modules: .. toctree:: :maxdepth: 2 - bulk change_stream + client_options client_session collation collection command_cursor cursor - cursor_manager database driver_info encryption encryption_options errors - message mongo_client - mongo_replica_set_client monitoring operations pool read_concern read_preferences results - son_manipulator + server_api + server_description + topology_description uri_parser write_concern + event_loggers diff --git a/doc/api/pymongo/ismaster.rst b/doc/api/pymongo/ismaster.rst deleted file mode 100644 index 881e874e17..0000000000 --- a/doc/api/pymongo/ismaster.rst +++ /dev/null @@ -1,10 +0,0 @@ -:orphan: - -:mod:`ismaster` -- A wrapper for ismaster command responses. -============================================================ - -.. automodule:: pymongo.ismaster - - .. autoclass:: pymongo.ismaster.IsMaster(doc) - - .. autoattribute:: document diff --git a/doc/api/pymongo/message.rst b/doc/api/pymongo/message.rst deleted file mode 100644 index 0a28052fb6..0000000000 --- a/doc/api/pymongo/message.rst +++ /dev/null @@ -1,6 +0,0 @@ -:mod:`message` -- Tools for creating messages to be sent to MongoDB -=================================================================== - -.. automodule:: pymongo.message - :synopsis: Tools for creating messages to be sent to MongoDB - :members: diff --git a/doc/api/pymongo/mongo_client.rst b/doc/api/pymongo/mongo_client.rst index ed110e8f93..83dab27f2c 100644 --- a/doc/api/pymongo/mongo_client.rst +++ b/doc/api/pymongo/mongo_client.rst @@ -14,38 +14,24 @@ Raises :class:`~pymongo.errors.InvalidName` if an invalid database name is used. - .. autoattribute:: event_listeners + .. autoattribute:: topology_description .. autoattribute:: address .. autoattribute:: primary .. autoattribute:: secondaries .. autoattribute:: arbiters .. autoattribute:: is_primary .. autoattribute:: is_mongos - .. autoattribute:: max_pool_size - .. autoattribute:: min_pool_size - .. autoattribute:: max_idle_time_ms .. autoattribute:: nodes - .. autoattribute:: max_bson_size - .. autoattribute:: max_message_size - .. autoattribute:: max_write_batch_size - .. autoattribute:: local_threshold_ms - .. autoattribute:: server_selection_timeout .. autoattribute:: codec_options .. autoattribute:: read_preference .. autoattribute:: write_concern .. autoattribute:: read_concern - .. autoattribute:: is_locked + .. autoattribute:: options .. automethod:: start_session .. automethod:: list_databases .. automethod:: list_database_names - .. automethod:: database_names .. automethod:: drop_database .. automethod:: get_default_database .. automethod:: get_database .. automethod:: server_info - .. automethod:: close_cursor - .. automethod:: kill_cursors - .. automethod:: set_cursor_manager .. automethod:: watch - .. automethod:: fsync - .. automethod:: unlock diff --git a/doc/api/pymongo/mongo_replica_set_client.rst b/doc/api/pymongo/mongo_replica_set_client.rst deleted file mode 100644 index b92e53186a..0000000000 --- a/doc/api/pymongo/mongo_replica_set_client.rst +++ /dev/null @@ -1,33 +0,0 @@ -:mod:`mongo_replica_set_client` -- Tools for connecting to a MongoDB replica set -================================================================================ - -.. automodule:: pymongo.mongo_replica_set_client - :synopsis: Tools for connecting to a MongoDB replica set - - .. autoclass:: pymongo.mongo_replica_set_client.MongoReplicaSetClient(hosts_or_uri, document_class=dict, tz_aware=False, connect=True, **kwargs) - - .. automethod:: close - - .. describe:: c[db_name] || c.db_name - - Get the `db_name` :class:`~pymongo.database.Database` on :class:`MongoReplicaSetClient` `c`. - - Raises :class:`~pymongo.errors.InvalidName` if an invalid database name is used. - - .. autoattribute:: primary - .. autoattribute:: secondaries - .. autoattribute:: arbiters - .. autoattribute:: max_pool_size - .. autoattribute:: max_bson_size - .. autoattribute:: max_message_size - .. autoattribute:: local_threshold_ms - .. autoattribute:: codec_options - .. autoattribute:: read_preference - .. autoattribute:: write_concern - .. automethod:: database_names - .. automethod:: drop_database - .. automethod:: get_database - .. automethod:: close_cursor - .. automethod:: kill_cursors - .. automethod:: set_cursor_manager - .. automethod:: get_default_database diff --git a/doc/api/pymongo/pool.rst b/doc/api/pymongo/pool.rst index 4e37de4a35..78274e8f8b 100644 --- a/doc/api/pymongo/pool.rst +++ b/doc/api/pymongo/pool.rst @@ -2,5 +2,6 @@ ============================================================== .. automodule:: pymongo.pool - :synopsis: Pool module for use with a MongoDB client. - :members: + + .. autoclass:: pymongo.pool.PoolOptions() + :members: diff --git a/doc/api/pymongo/server_api.rst b/doc/api/pymongo/server_api.rst new file mode 100644 index 0000000000..de74411aa4 --- /dev/null +++ b/doc/api/pymongo/server_api.rst @@ -0,0 +1,11 @@ +:mod:`server_api` -- Support for MongoDB Stable API +====================================================== + +.. automodule:: pymongo.server_api + :synopsis: Support for MongoDB Stable API + + .. autoclass:: pymongo.server_api.ServerApi + :members: + + .. autoclass:: pymongo.server_api.ServerApiVersion + :members: diff --git a/doc/api/pymongo/server_description.rst b/doc/api/pymongo/server_description.rst index 2d354fca6f..fc6b55ec74 100644 --- a/doc/api/pymongo/server_description.rst +++ b/doc/api/pymongo/server_description.rst @@ -6,8 +6,4 @@ .. automodule:: pymongo.server_description .. autoclass:: pymongo.server_description.ServerDescription() - - .. autoattribute:: address - .. autoattribute:: all_hosts - .. autoattribute:: server_type - .. autoattribute:: server_type_name + :members: diff --git a/doc/api/pymongo/son_manipulator.rst b/doc/api/pymongo/son_manipulator.rst deleted file mode 100644 index 87503e6f83..0000000000 --- a/doc/api/pymongo/son_manipulator.rst +++ /dev/null @@ -1,6 +0,0 @@ -:mod:`son_manipulator` -- Manipulators that can edit SON documents as they are saved or retrieved -================================================================================================= - -.. automodule:: pymongo.son_manipulator - :synopsis: Manipulators that can edit SON documents as they are saved or retrieved - :members: diff --git a/doc/api/pymongo/topology_description.rst b/doc/api/pymongo/topology_description.rst index b14f1bf2c2..24353db2a9 100644 --- a/doc/api/pymongo/topology_description.rst +++ b/doc/api/pymongo/topology_description.rst @@ -6,9 +6,4 @@ .. automodule:: pymongo.topology_description .. autoclass:: pymongo.topology_description.TopologyDescription() - - .. automethod:: has_readable_server(read_preference=ReadPreference.PRIMARY) - .. automethod:: has_writable_server - .. automethod:: server_descriptions - .. autoattribute:: topology_type - .. autoattribute:: topology_type_name + :members: diff --git a/doc/atlas.rst b/doc/atlas.rst index bb661e8592..19ba9732f2 100644 --- a/doc/atlas.rst +++ b/doc/atlas.rst @@ -7,24 +7,7 @@ Atlas to :class:`~pymongo.mongo_client.MongoClient`:: client = pymongo.MongoClient() -Connections to Atlas require TLS/SSL. For connections using TLS/SSL, PyMongo -may require third party dependencies as determined by your version of Python. -With PyMongo 3.3+, you can install PyMongo 3.3+ and any TLS/SSL-related -dependencies using the following pip command:: - - $ python -m pip install pymongo[tls] - -Earlier versions of PyMongo require you to manually install the dependencies. -For a list of TLS/SSL-related dependencies, see :doc:`examples/tls`. - -.. note:: Connecting to Atlas "Free Tier" or "Shared Cluster" instances - requires Server Name Indication (SNI) support. SNI support requires CPython - 2.7.9 / PyPy 2.5.1 or newer. To check if your version of Python supports - SNI run the following command:: - - $ python -c "import ssl; print(getattr(ssl, 'HAS_SNI', False))" - - You should see "True". +Connections to Atlas require TLS/SSL. .. warning:: Industry best practices recommend, and some regulations require, the use of TLS 1.1 or newer. Though no application changes are required for @@ -52,10 +35,9 @@ For a list of TLS/SSL-related dependencies, see :doc:`examples/tls`. You can read more about TLS versions and their security implications here: - ``_ + ``_ .. _python.org: https://www.python.org/downloads/ .. _homebrew: https://brew.sh/ .. _macports: https://www.macports.org/ .. _requests: https://pypi.python.org/pypi/requests - diff --git a/doc/changelog.rst b/doc/changelog.rst index f8ed9f702f..d823828eff 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,6 +1,1020 @@ Changelog ========= +Changes in Version 4.6.2 +------------------------ + +PyMongo 4.6.2 fixes the following bug: + +- Fixed a bug appearing in Python 3.12 where "RuntimeError: can't create new thread at interpreter shutdown" + could be written to stderr when a MongoClient's thread starts as the python interpreter is shutting down. + +Changes in Version 4.6.1 +------------------------ + +PyMongo 4.6.1 fixes the following bug: + +- Ensure retryable read ``OperationFailure`` errors re-raise exception when 0 or NoneType error code is provided. + +Changes in Version 4.6 +---------------------- + +PyMongo 4.6 brings a number of improvements including: + +- Added the ``serverMonitoringMode`` URI and keyword argument to :class:`~pymongo.mongo_client.MongoClient`. +- Improved client performance and reduced connection requirements in Function-as-a-service (FaaS) + environments like AWS Lambda, Google Cloud Functions, and Microsoft Azure Functions. +- Added the :attr:`pymongo.monitoring.CommandSucceededEvent.database_name` property. +- Added the :attr:`pymongo.monitoring.CommandFailedEvent.database_name` property. +- Allow passing a ``dict`` to sort/create_index/hint. +- Added :func:`repr` support to the write result classes: + :class:`~pymongo.results.BulkWriteResult`, + :class:`~pymongo.results.DeleteResult`, + :class:`~pymongo.results.InsertManyResult`, + :class:`~pymongo.results.InsertOneResult`, + :class:`~pymongo.results.UpdateResult`, and + :class:`~pymongo.encryption.RewrapManyDataKeyResult`. For example: + + >>> client.t.t.insert_one({}) + InsertOneResult(ObjectId('65319acdd55bb3a27ab5502b'), acknowledged=True) + >>> client.t.t.insert_many([{} for _ in range(3)]) + InsertManyResult([ObjectId('6532f85e826f2b6125d6ce39'), ObjectId('6532f85e826f2b6125d6ce3a'), ObjectId('6532f85e826f2b6125d6ce3b')], acknowledged=True) + +- :meth:`~pymongo.uri_parser.parse_uri` now considers the delimiting slash (``/``) + between hosts and connection options optional. For example, + "mongodb://example.com?tls=true" is now a valid URI. +- Fixed a bug where PyMongo would incorrectly promote all cursors to exhaust cursors + when connected to load balanced MongoDB clusters or Serverless clusters. +- Added the :ref:`network-compression-example` documentation page. +- Added more timeout information to network errors. + +Changes in Version 4.5 +---------------------- + +PyMongo 4.5 brings a number of improvements including: + +- Added new helper methods for Atlas Search Index (requires MongoDB Server 7.0+): + :meth:`~pymongo.collection.Collection.list_search_indexes`, + :meth:`~pymongo.collection.Collection.create_search_index`, + :meth:`~pymongo.collection.Collection.create_search_indexes`, + :meth:`~pymongo.collection.Collection.drop_search_index`, + :meth:`~pymongo.collection.Collection.update_search_index` +- Added :meth:`~pymongo.database.Database.cursor_command` + and :meth:`~pymongo.command_cursor.CommandCursor.try_next` to support + executing an arbitrary command that returns a cursor. +- ``cryptography`` 2.5 or later is now required for :ref:`OCSP` support. +- Improved bson encoding and decoding performance by up to 134%(`PYTHON-3729`_, `PYTHON-3797`_, `PYTHON-3816`_, `PYTHON-3817`_, `PYTHON-3820`_, `PYTHON-3824`_, and `PYTHON-3846`_). + +.. warning:: PyMongo no longer supports PyPy3 versions older than 3.8. Users + must upgrade to PyPy3.8+. + +Issues Resolved +............... + +See the `PyMongo 4.5 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.5 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=35492 + +.. _PYTHON-3729: https://jira.mongodb.org/browse/PYTHON-3729 +.. _PYTHON-3797: https://jira.mongodb.org/browse/PYTHON-3797 +.. _PYTHON-3816: https://jira.mongodb.org/browse/PYTHON-3816 +.. _PYTHON-3817: https://jira.mongodb.org/browse/PYTHON-3817 +.. _PYTHON-3820: https://jira.mongodb.org/browse/PYTHON-3820 +.. _PYTHON-3824: https://jira.mongodb.org/browse/PYTHON-3824 +.. _PYTHON-3846: https://jira.mongodb.org/browse/PYTHON-3846 + +Changes in Version 4.4.1 +------------------------ + +Version 4.4.1 fixes the following bugs: + +- Fixed a bug where pymongo would raise a ``ConfigurationError: Invalid SRV host`` + error when connecting to a "mongodb+srv://" URI that included capital letters + in the SRV hosts returned from DNS. (`PYTHON-3800`_). +- Fixed a minor reference counting bug in the C extension (`PYTHON-3798`_). + +Issues Resolved +............... + +See the `PyMongo 4.4.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PYTHON-3798: https://jira.mongodb.org/browse/PYTHON-3798 +.. _PYTHON-3800: https://jira.mongodb.org/browse/PYTHON-3800 +.. _PyMongo 4.4.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=36329 + +Changes in Version 4.4 +----------------------- + +PyMongo 4.4 brings a number of improvements including: + +- Added support for MongoDB 7.0. +- Added support for Python 3.11. +- Added support for passing a list containing (key, direction) pairs + or keys to :meth:`~pymongo.collection.Collection.create_index`. +- Improved bson encoding performance (`PYTHON-3717`_ and `PYTHON-3718`_). +- Improved support for Pyright to improve typing support for IDEs like Visual Studio Code + or Visual Studio. +- Improved support for type-checking with MyPy "strict" mode (`--strict`). +- Added :meth:`~pymongo.encryption.ClientEncryption.create_encrypted_collection`, + :class:`~pymongo.errors.EncryptedCollectionError`, + :meth:`~pymongo.encryption.ClientEncryption.encrypt_expression`, + :class:`~pymongo.encryption_options.RangeOpts`, + and :attr:`~pymongo.encryption.Algorithm.RANGEPREVIEW` as part of the experimental + Queryable Encryption beta. +- pymongocrypt 1.6.0 or later is now required for :ref:`In-Use Encryption` support. MongoDB + Server 7.0 introduced a backwards breaking change to the QE protocol. Users taking + advantage of the Queryable Encryption beta must now upgrade to MongoDB 7.0+ and + PyMongo 4.4+. +- Previously, PyMongo's docs recommended using :meth:`datetime.datetime.utcnow` and + :meth:`datetime.datetime.utcfromtimestamp`. utcnow and utcfromtimestamp are deprecated + in Python 3.12, for reasons explained `in this Github issue`_. Instead, users should + use :meth:`datetime.datetime.now(tz=timezone.utc)` and + :meth:`datetime.datetime.fromtimestamp(tz=timezone.utc)` instead. + +.. _in this Github issue: https://github.com/python/cpython/issues/103857 + +Issues Resolved +............... + +See the `PyMongo 4.4 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.4 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=34354 + +.. _PYTHON-3717: https://jira.mongodb.org/browse/PYTHON-3717 +.. _PYTHON-3718: https://jira.mongodb.org/browse/PYTHON-3718 + +Changes in Version 4.3.3 +------------------------ + +Version 4.3.3 documents support for the following: + +- :ref:`CSFLE on-demand credentials` for cloud KMS providers. +- Authentication support for :ref:`EKS Clusters`. +- Added the :ref:`timeout-example` example page to improve the documentation + for :func:`pymongo.timeout`. + +Bug Fixes +......... +- Fixed a performance regression in :meth:`~gridfs.GridFSBucket.download_to_stream` + and :meth:`~gridfs.GridFSBucket.download_to_stream_by_name` by reading in chunks + instead of line by line (`PYTHON-3502`_). +- Improved performance of :meth:`gridfs.grid_file.GridOut.read` and + :meth:`gridfs.grid_file.GridOut.readline` (`PYTHON-3508`_). + +Issues Resolved +............... + +See the `PyMongo 4.3.3 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PYTHON-3502: https://jira.mongodb.org/browse/PYTHON-3502 +.. _PYTHON-3508: https://jira.mongodb.org/browse/PYTHON-3508 +.. _PyMongo 4.3.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=34709 + +Changes in Version 4.3 (4.3.2) +------------------------------ + +Note: We withheld uploading tags 4.3.0 and 4.3.1 to PyPI due to a +version handling error and a necessary documentation update. + +`dnspython `_ is now a required +dependency. This change makes PyMongo easier to install for use with "mongodb+srv://" +connection strings and `MongoDB Atlas `_. + +PyMongo 4.3 brings a number of improvements including: + +- Added support for decoding BSON datetimes outside of the range supported + by Python's :class:`~datetime.datetime` builtin. See + :ref:`handling-out-of-range-datetimes` for examples, as well as + :class:`bson.datetime_ms.DatetimeMS`, + :class:`bson.codec_options.DatetimeConversion`, and + :class:`bson.codec_options.CodecOptions`'s ``datetime_conversion`` + parameter for more details (`PYTHON-1824`_). +- PyMongo now resets its locks and other shared state in the child process + after a :py:func:`os.fork` to reduce the frequency of deadlocks. Note that + deadlocks are still possible because libraries that PyMongo depends like + OpenSSL cannot be made fork() safe in multithreaded applications. + (`PYTHON-2484`_). For more info see :ref:`pymongo-fork-safe`. +- When used with MongoDB 6.0+, :class:`~pymongo.change_stream.ChangeStream` s + now allow for new types of events (such as DDL and C2C replication events) + to be recorded with the new parameter ``show_expanded_events`` + that can be passed to methods such as :meth:`~pymongo.collection.Collection.watch`. +- PyMongo now internally caches AWS credentials that it fetches from AWS + endpoints, to avoid rate limitations. The cache is cleared when the + credentials expire or an error is encountered. +- When using the ``MONGODB-AWS`` authentication mechanism with the + ``aws`` extra, the behavior of credential fetching has changed with + ``pymongo_auth_aws>=1.1.0``. Please see :doc:`examples/authentication` for + more information. + +Bug fixes +......... + +- Fixed a bug where :class:`~pymongo.change_stream.ChangeStream` + would allow an app to retry calling ``next()`` or ``try_next()`` even + after non-resumable errors (`PYTHON-3389`_). +- Fixed a bug where the client could be unable to discover the new primary + after a simultaneous replica set election and reconfig (`PYTHON-2970`_). + +Issues Resolved +............... + +See the `PyMongo 4.3 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PYTHON-1824: https://jira.mongodb.org/browse/PYTHON-1824 +.. _PYTHON-2484: https://jira.mongodb.org/browse/PYTHON-2484 +.. _PYTHON-2970: https://jira.mongodb.org/browse/PYTHON-2970 +.. _PYTHON-3389: https://jira.mongodb.org/browse/PYTHON-3389 +.. _PyMongo 4.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=33425 + +Changes in Version 4.2 +---------------------- + +.. warning:: PyMongo 4.2 drops support for Python 3.6: Python 3.7+ is now required. + +PyMongo 4.2 brings a number of improvements including: + +- Support for MongoDB 6.0. +- Support for the Queryable Encryption beta with MongoDB 6.0. Note that backwards-breaking + changes may be made before the final release. See :ref:`automatic-queryable-client-side-encryption` for example usage. +- Provisional (beta) support for :func:`pymongo.timeout` to apply a single timeout + to an entire block of pymongo operations. See :ref:`timeout-example` for examples. +- Added the ``timeoutMS`` URI and keyword argument to :class:`~pymongo.mongo_client.MongoClient`. +- Added the :attr:`pymongo.errors.PyMongoError.timeout` property which is ``True`` when + the error was caused by a timeout. +- Added the ``check_exists`` argument to :meth:`~pymongo.database.Database.create_collection` + that when True (the default) runs an additional ``listCollections`` command to verify that the + collection does not exist already. +- Added the following key management APIs to :class:`~pymongo.encryption.ClientEncryption`: + + - :meth:`~pymongo.encryption.ClientEncryption.get_key` + - :meth:`~pymongo.encryption.ClientEncryption.get_keys` + - :meth:`~pymongo.encryption.ClientEncryption.delete_key` + - :meth:`~pymongo.encryption.ClientEncryption.add_key_alt_name` + - :meth:`~pymongo.encryption.ClientEncryption.get_key_by_alt_name` + - :meth:`~pymongo.encryption.ClientEncryption.remove_key_alt_name` + - :meth:`~pymongo.encryption.ClientEncryption.rewrap_many_data_key` + - :class:`~pymongo.encryption.RewrapManyDataKeyResult` + +- Support for the ``crypt_shared`` library to replace ``mongocryptd`` using the new + ``crypt_shared_lib_path`` and ``crypt_shared_lib_required`` arguments to + :class:`~pymongo.encryption_options.AutoEncryptionOpts`. + +Bug fixes +......... + +- Fixed a bug where :meth:`~pymongo.collection.Collection.estimated_document_count` + would fail with a "CommandNotSupportedOnView" error on views (`PYTHON-2885`_). +- Fixed a bug where invalid UTF-8 strings could be passed as patterns for :class:`~bson.regex.Regex` + objects. :func:`bson.encode` now correctly raises :class:`bson.errors.InvalidStringData` (`PYTHON-3048`_). +- Fixed a bug that caused ``AutoReconnect("connection pool paused")`` errors in the child + process after fork (`PYTHON-3257`_). +- Fixed a bug where :meth:`~pymongo.collection.Collection.count_documents` and + :meth:`~pymongo.collection.Collection.distinct` would fail in a transaction with + ``directConnection=True`` (`PYTHON-3333`_). +- GridFS no longer uploads an incomplete files collection document after encountering an + error in the middle of an upload fork. This results in fewer + :class:`~gridfs.errors.CorruptGridFile` errors (`PYTHON-1552`_). +- Renamed PyMongo's internal C extension methods to avoid crashing due to name conflicts + with mpi4py and other shared libraries (`PYTHON-2110`_). +- Fixed tight CPU loop for network I/O when using PyOpenSSL (`PYTHON-3187`_). + +Unavoidable breaking changes +............................ + +- pymongocrypt 1.3.0 or later is now required for client side field level + encryption support. +- :meth:`~pymongo.collection.Collection.estimated_document_count` now always uses + the `count`_ command. Due to an oversight in versions 5.0.0-5.0.8 of MongoDB, + the count command was not included in V1 of the :ref:`versioned-api-ref`. + Users of the Stable API with estimated_document_count are recommended to upgrade + their server version to 5.0.9+ or set :attr:`pymongo.server_api.ServerApi.strict` + to ``False`` to avoid encountering errors (`PYTHON-3167`_). +- Removed generic typing from :class:`~pymongo.client_session.ClientSession` to improve + support for Pyright (`PYTHON-3283`_). +- Added ``__all__`` to the bson, pymongo, and gridfs packages. This could be a breaking + change for apps that relied on ``from bson import *`` to import APIs not present in + ``__all__`` (`PYTHON-3311`_). + +.. _count: https://mongodb.com/docs/manual/reference/command/count/ + +Issues Resolved +............... + +See the `PyMongo 4.2 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PYTHON-3048: https://jira.mongodb.org/browse/PYTHON-3048 +.. _PYTHON-2885: https://jira.mongodb.org/browse/PYTHON-2885 +.. _PYTHON-3167: https://jira.mongodb.org/browse/PYTHON-3167 +.. _PYTHON-3257: https://jira.mongodb.org/browse/PYTHON-3257 +.. _PYTHON-3333: https://jira.mongodb.org/browse/PYTHON-3333 +.. _PYTHON-1552: https://jira.mongodb.org/browse/PYTHON-1552 +.. _PYTHON-2110: https://jira.mongodb.org/browse/PYTHON-2110 +.. _PYTHON-3283: https://jira.mongodb.org/browse/PYTHON-3283 +.. _PYTHON-3311: https://jira.mongodb.org/browse/PYTHON-3311 +.. _PYTHON-3187: https://jira.mongodb.org/browse/PYTHON-3187 +.. _PyMongo 4.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=33196 + +Changes in Version 4.1.1 +------------------------- + +Version 4.1.1 fixes a number of bugs: + +- Fixed a memory leak bug when calling :func:`~bson.decode_all` without a + ``codec_options`` argument (`PYTHON-3222`_). +- Fixed a bug where :func:`~bson.decode_all` did not accept ``codec_options`` + as a keyword argument (`PYTHON-3222`_). +- Fixed an oversight where type markers (py.typed files) were not included + in our release distributions (`PYTHON-3214`_). +- Fixed a bug where pymongo would raise a "NameError: name sys is not defined" + exception when attempting to parse a "mongodb+srv://" URI when the dnspython + dependency was not installed (`PYTHON-3198`_). + +Issues Resolved +............... + +See the `PyMongo 4.1.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PYTHON-3198: https://jira.mongodb.org/browse/PYTHON-3198 +.. _PYTHON-3214: https://jira.mongodb.org/browse/PYTHON-3214 +.. _PYTHON-3222: https://jira.mongodb.org/browse/PYTHON-3222 +.. _PyMongo 4.1.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=33290 + +Changes in Version 4.1 +---------------------- + +.. warning:: PyMongo 4.1 drops support for Python 3.6.0 and 3.6.1, Python 3.6.2+ is now required. + +PyMongo 4.1 brings a number of improvements including: + +- Type Hinting support (formerly provided by `pymongo-stubs`_). See :doc:`examples/type_hints` for more information. +- Added support for the ``comment`` parameter to all helpers. For example see + :meth:`~pymongo.collection.Collection.insert_one`. +- Added support for the ``let`` parameter to + :meth:`~pymongo.collection.Collection.update_one`, + :meth:`~pymongo.collection.Collection.update_many`, + :meth:`~pymongo.collection.Collection.delete_one`, + :meth:`~pymongo.collection.Collection.delete_many`, + :meth:`~pymongo.collection.Collection.replace_one`, + :meth:`~pymongo.collection.Collection.aggregate`, + :meth:`~pymongo.collection.Collection.find_one_and_delete`, + :meth:`~pymongo.collection.Collection.find_one_and_replace`, + :meth:`~pymongo.collection.Collection.find_one_and_update`, + :meth:`~pymongo.collection.Collection.find`, + :meth:`~pymongo.collection.Collection.find_one`, + and :meth:`~pymongo.collection.Collection.bulk_write`. + ``let`` is a map of parameter names and values. + Parameters can then be accessed as variables in an aggregate expression + context. +- :meth:`~pymongo.collection.Collection.aggregate` now supports + $merge and $out executing on secondaries on MongoDB >=5.0. + aggregate() now always obeys the collection's :attr:`read_preference` on + MongoDB >= 5.0. +- :meth:`gridfs.grid_file.GridOut.seek` now returns the new position in the file, to + conform to the behavior of :meth:`io.IOBase.seek`. +- Improved reuse of implicit sessions (`PYTHON-2956`_). + +Bug fixes +......... + +- Fixed bug that would cause SDAM heartbeat timeouts and connection churn on + AWS Lambda and other FaaS environments (`PYTHON-3186`_). +- Fixed bug where :class:`~pymongo.mongo_client.MongoClient`, + :class:`~pymongo.database.Database`, and :class:`~pymongo.collection.Collection` + mistakenly implemented :class:`typing.Iterable` (`PYTHON-3084`_). + +Issues Resolved +............... + +See the `PyMongo 4.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=30619 +.. _PYTHON-2956: https://jira.mongodb.org/browse/PYTHON-2956 +.. _PYTHON-3084: https://jira.mongodb.org/browse/PYTHON-3084 +.. _PYTHON-3186: https://jira.mongodb.org/browse/PYTHON-3186 +.. _pymongo-stubs: https://github.com/mongodb-labs/pymongo-stubs + +Changes in Version 4.0 +---------------------- + +.. warning:: PyMongo 4.0 drops support for Python 2.7, 3.4, and 3.5. + +.. warning:: PyMongo 4.0 drops support for MongoDB 2.6, 3.0, 3.2, and 3.4. + +.. warning:: PyMongo 4.0 changes the default value of the ``directConnection`` URI option and + keyword argument to :class:`~pymongo.mongo_client.MongoClient` + to ``False`` instead of ``None``, allowing for the automatic + discovery of replica sets. This means that if you + want a direct connection to a single server you must pass + ``directConnection=True`` as a URI option or keyword argument. + For more details, see the relevant section of the PyMongo 4.x migration + guide: :ref:`pymongo4-migration-direct-connection`. + +PyMongo 4.0 brings a number of improvements as well as some backward breaking +changes. For example, all APIs deprecated in PyMongo 3.X have been removed. +Be sure to read the changes listed below and the :doc:`migrate-to-pymongo4` +before upgrading from PyMongo 3.x. + +Breaking Changes in 4.0 +....................... + +- Removed support for Python 2.7, 3.4, and 3.5. Python 3.6.2+ is now required. +- The default uuid_representation for :class:`~bson.codec_options.CodecOptions`, + :class:`~bson.json_util.JSONOptions`, and + :class:`~pymongo.mongo_client.MongoClient` has been changed from + :data:`bson.binary.UuidRepresentation.PYTHON_LEGACY` to + :data:`bson.binary.UuidRepresentation.UNSPECIFIED`. Attempting to encode a + :class:`uuid.UUID` instance to BSON or JSON now produces an error by default. + See :ref:`handling-uuid-data-example` for details. +- Removed the ``waitQueueMultiple`` keyword argument to + :class:`~pymongo.mongo_client.MongoClient` and removed + :exc:`pymongo.errors.ExceededMaxWaiters`. +- Removed the ``socketKeepAlive`` keyword argument to + :class:`~pymongo.mongo_client.MongoClient`. +- Removed :meth:`pymongo.mongo_client.MongoClient.fsync`, + :meth:`pymongo.mongo_client.MongoClient.unlock`, and + :attr:`pymongo.mongo_client.MongoClient.is_locked`. +- Removed :meth:`pymongo.mongo_client.MongoClient.database_names`. +- Removed :attr:`pymongo.mongo_client.MongoClient.max_bson_size`. +- Removed :attr:`pymongo.mongo_client.MongoClient.max_message_size`. +- Removed :attr:`pymongo.mongo_client.MongoClient.max_write_batch_size`. +- Removed :attr:`pymongo.mongo_client.MongoClient.event_listeners`. +- Removed :attr:`pymongo.mongo_client.MongoClient.max_pool_size`. +- Removed :attr:`pymongo.mongo_client.MongoClient.max_idle_time_ms`. +- Removed :attr:`pymongo.mongo_client.MongoClient.local_threshold_ms`. +- Removed :attr:`pymongo.mongo_client.MongoClient.server_selection_timeout`. +- Removed :attr:`pymongo.mongo_client.MongoClient.retry_writes`. +- Removed :attr:`pymongo.mongo_client.MongoClient.retry_reads`. +- Removed :meth:`pymongo.database.Database.eval`, + :data:`pymongo.database.Database.system_js` and + :class:`pymongo.database.SystemJS`. +- Removed :meth:`pymongo.database.Database.collection_names`. +- Removed :meth:`pymongo.database.Database.current_op`. +- Removed :meth:`pymongo.database.Database.authenticate` and + :meth:`pymongo.database.Database.logout`. +- Removed :meth:`pymongo.database.Database.error`, + :meth:`pymongo.database.Database.last_status`, + :meth:`pymongo.database.Database.previous_error`, + :meth:`pymongo.database.Database.reset_error_history`. +- Removed :meth:`pymongo.database.Database.add_user` and + :meth:`pymongo.database.Database.remove_user`. +- Removed support for database profiler helpers + :meth:`~pymongo.database.Database.profiling_level`, + :meth:`~pymongo.database.Database.set_profiling_level`, + and :meth:`~pymongo.database.Database.profiling_info`. Instead, users + should run the `profile command`_ with the + :meth:`~pymongo.database.Database.command` helper directly. +- Removed :attr:`pymongo.OFF`, :attr:`pymongo.SLOW_ONLY`, and + :attr:`pymongo.ALL`. +- Removed :meth:`pymongo.collection.Collection.parallel_scan`. +- Removed :meth:`pymongo.collection.Collection.ensure_index`. +- Removed :meth:`pymongo.collection.Collection.reindex`. +- Removed :meth:`pymongo.collection.Collection.save`. +- Removed :meth:`pymongo.collection.Collection.insert`. +- Removed :meth:`pymongo.collection.Collection.update`. +- Removed :meth:`pymongo.collection.Collection.remove`. +- Removed :meth:`pymongo.collection.Collection.find_and_modify`. +- Removed :meth:`pymongo.collection.Collection.count`. +- Removed :meth:`pymongo.collection.Collection.initialize_ordered_bulk_op`, + :meth:`pymongo.collection.Collection.initialize_unordered_bulk_op`, and + :class:`pymongo.bulk.BulkOperationBuilder`. Use + :meth:`pymongo.collection.Collection.bulk_write` instead. +- Removed :meth:`pymongo.collection.Collection.group`. +- Removed :meth:`pymongo.collection.Collection.map_reduce` and + :meth:`pymongo.collection.Collection.inline_map_reduce`. +- Removed the ``useCursor`` option for + :meth:`~pymongo.collection.Collection.aggregate`. +- Removed :meth:`pymongo.mongo_client.MongoClient.close_cursor`. Use + :meth:`pymongo.cursor.Cursor.close` instead. +- Removed :meth:`pymongo.mongo_client.MongoClient.kill_cursors`. +- Removed :class:`pymongo.cursor_manager.CursorManager` and + :mod:`pymongo.cursor_manager`. +- Removed :meth:`pymongo.mongo_client.MongoClient.set_cursor_manager`. +- Removed :meth:`pymongo.cursor.Cursor.count`. +- Removed :mod:`pymongo.thread_util`. +- Removed :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient`. +- Removed :class:`~pymongo.ismaster.IsMaster`. + Use :class:`~pymongo.hello.Hello` instead. +- Removed :mod:`pymongo.son_manipulator`, + :class:`pymongo.son_manipulator.SONManipulator`, + :class:`pymongo.son_manipulator.ObjectIdInjector`, + :class:`pymongo.son_manipulator.ObjectIdShuffler`, + :class:`pymongo.son_manipulator.AutoReference`, + :class:`pymongo.son_manipulator.NamespaceInjector`, + :meth:`pymongo.database.Database.add_son_manipulator`, + :attr:`pymongo.database.Database.outgoing_copying_manipulators`, + :attr:`pymongo.database.Database.outgoing_manipulators`, + :attr:`pymongo.database.Database.incoming_copying_manipulators`, and + :attr:`pymongo.database.Database.incoming_manipulators`. +- Removed the ``manipulate`` and ``modifiers`` parameters from + :meth:`~pymongo.collection.Collection.find`, + :meth:`~pymongo.collection.Collection.find_one`, + :meth:`~pymongo.collection.Collection.find_raw_batches`, and + :meth:`~pymongo.cursor.Cursor`. +- Removed :meth:`pymongo.message.delete`, :meth:`pymongo.message.get_more`, + :meth:`pymongo.message.insert`, :meth:`pymongo.message.kill_cursors`, + :meth:`pymongo.message.query`, and :meth:`pymongo.message.update`. +- Removed :exc:`pymongo.errors.NotMasterError`. + Use :exc:`pymongo.errors.NotPrimaryError` instead. +- Removed :exc:`pymongo.errors.CertificateError`. +- Removed :attr:`pymongo.GEOHAYSTACK`. +- Removed :class:`bson.binary.UUIDLegacy`. +- Removed :const:`bson.json_util.STRICT_JSON_OPTIONS`. Use + :const:`~bson.json_util.RELAXED_JSON_OPTIONS` or + :const:`~bson.json_util.CANONICAL_JSON_OPTIONS` instead. +- Changed the default JSON encoding representation from legacy to relaxed. + The json_mode parameter for :const:`bson.json_util.dumps` now defaults to + :const:`~bson.json_util.RELAXED_JSON_OPTIONS`. +- Changed the BSON and JSON decoding behavior of :class:`~bson.dbref.DBRef` + to match the behavior outlined in the `DBRef specification`_ version 1.0. + Specifically, PyMongo now only decodes a subdocument into a + :class:`~bson.dbref.DBRef` if and only if, it contains both ``$ref`` and + ``$id`` fields and the ``$ref``, ``$id``, and ``$db`` fields are of the + correct type. Otherwise the document is returned as normal. Previously, any + subdocument containing a ``$ref`` field would be decoded as a + :class:`~bson.dbref.DBRef`. +- The "tls" install extra is no longer necessary or supported and will be + ignored by pip. +- The ``tz_aware`` argument to :class:`~bson.json_util.JSONOptions` + now defaults to ``False`` instead of ``True``. :meth:`bson.json_util.loads` now + decodes datetime as naive by default. See :ref:`tz_aware_default_change` for more info. +- ``directConnection`` URI option and keyword argument to :class:`~pymongo.mongo_client.MongoClient` + defaults to ``False`` instead of ``None``, allowing for the automatic + discovery of replica sets. This means that if you + want a direct connection to a single server you must pass + ``directConnection=True`` as a URI option or keyword argument. +- The ``hint`` option is now required when using ``min`` or ``max`` queries + with :meth:`~pymongo.collection.Collection.find`. +- ``name`` is now a required argument for the :class:`pymongo.driver_info.DriverInfo` class. +- When providing a "mongodb+srv://" URI to + :class:`~pymongo.mongo_client.MongoClient` constructor you can now use the + ``srvServiceName`` URI option to specify your own SRV service name. +- :meth:`~bson.son.SON.items` now returns a ``dict_items`` object rather + than a list. +- Removed :meth:`bson.son.SON.iteritems`. +- :class:`~pymongo.collection.Collection` and :class:`~pymongo.database.Database` + now raises an error upon evaluating as a Boolean, please use the + syntax ``if collection is not None:`` or ``if database is not None:`` as + opposed to + the previous syntax which was simply ``if collection:`` or ``if database:``. + You must now explicitly compare with None. +- :class:`~pymongo.mongo_client.MongoClient` cannot execute any operations + after being closed. The previous behavior would simply reconnect. However, + now you must create a new instance. +- Classes :class:`~bson.int64.Int64`, :class:`~bson.min_key.MinKey`, + :class:`~bson.max_key.MaxKey`, :class:`~bson.timestamp.Timestamp`, + :class:`~bson.regex.Regex`, and :class:`~bson.dbref.DBRef` all implement + ``__slots__`` now. This means that their attributes are fixed, and new + attributes cannot be added to them at runtime. +- Empty projections (eg {} or []) for + :meth:`~pymongo.collection.Collection.find`, and + :meth:`~pymongo.collection.Collection.find_one` + are passed to the server as-is rather than the previous behavior which + substituted in a projection of ``{"_id": 1}``. This means that an empty + projection will now return the entire document, not just the ``"_id"`` field. +- :class:`~pymongo.mongo_client.MongoClient` now raises a + :exc:`~pymongo.errors.ConfigurationError` when more than one URI is passed + into the ``hosts`` argument. +- :class:`~pymongo.mongo_client.MongoClient`` now raises an + :exc:`~pymongo.errors.InvalidURI` exception + when it encounters unescaped percent signs in username and password when + parsing MongoDB URIs. +- Comparing two :class:`~pymongo.mongo_client.MongoClient` instances now + uses a set of immutable properties rather than + :attr:`~pymongo.mongo_client.MongoClient.address` which can change. +- Removed the ``disable_md5`` parameter for :class:`~gridfs.GridFSBucket` and + :class:`~gridfs.GridFS`. See :ref:`removed-gridfs-checksum` for details. +- pymongocrypt 1.2.0 or later is now required for client side field level + encryption support. + +Notable improvements +.................... + +- Enhanced connection pooling to create connections more efficiently and + avoid connection storms. +- Added the ``maxConnecting`` URI and + :class:`~pymongo.mongo_client.MongoClient` keyword argument. +- :class:`~pymongo.mongo_client.MongoClient` now accepts a URI and keyword + argument ``srvMaxHosts`` that limits the number of mongos-like hosts a client + will connect to. More specifically, when a mongodb+srv:// connection string + resolves to more than ``srvMaxHosts`` number of hosts, the client will randomly + choose a ``srvMaxHosts`` sized subset of hosts. +- Added :attr:`pymongo.mongo_client.MongoClient.options` for read-only access + to a client's configuration options. +- Support for the "kmip" KMS provider for client side field level encryption. + See the docstring for :class:`~pymongo.encryption_options.AutoEncryptionOpts` + and :mod:`~pymongo.encryption`. + +Issues Resolved +............... + +See the `PyMongo 4.0 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.0 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=18463 +.. _DBRef specification: https://github.com/mongodb/specifications/blob/5a8c8d7/source/dbref.rst + +Changes in Version 3.13.0 +------------------------- + +Version 3.13 provides an upgrade path to PyMongo 4.x. Most of the API changes +from PyMongo 4.0 have been backported in a backward compatible way, allowing +applications to be written against PyMongo >= 3.13, rather then PyMongo 3.x or +PyMongo 4.x. See the `PyMongo 4 Migration Guide`_ for detailed examples. + +Notable improvements +.................... +- Added :attr:`pymongo.mongo_client.MongoClient.options` for read-only access + to a client's configuration options. + + +Issues Resolved +............... + +PyMongo 3.13 drops support for Python 3.4. + +Bug fixes +......... + +- Fixed a memory leak bug when calling :func:`~bson.decode_all` without a + ``codec_options`` argument (`PYTHON-3222`_). +- Fixed a bug where :func:`~bson.decode_all` did not accept ``codec_options`` + as a keyword argument (`PYTHON-3222`_). + +Deprecations +............ +- Deprecated :meth:`~pymongo.collection.Collection.map_reduce` and + :meth:`~pymongo.collection.Collection.inline_map_reduce`. + Use :meth:`~pymongo.collection.Collection.aggregate` instead. +- Deprecated :attr:`pymongo.mongo_client.MongoClient.event_listeners`. + Use :attr:`~pymongo.mongo_client.options.event_listeners` instead. +- Deprecated :attr:`pymongo.mongo_client.MongoClient.max_pool_size`. + Use :attr:`~pymongo.mongo_client.options.pool_options.max_pool_size` instead. +- Deprecated :attr:`pymongo.mongo_client.MongoClient.max_idle_time_ms`. + Use :attr:`~pymongo.mongo_client.options.pool_options.max_idle_time_seconds` instead. +- Deprecated :attr:`pymongo.mongo_client.MongoClient.local_threshold_ms`. + Use :attr:`~pymongo.mongo_client.options.local_threshold_ms` instead. +- Deprecated :attr:`pymongo.mongo_client.MongoClient.server_selection_timeout`. + Use :attr:`~pymongo.mongo_client.options.server_selection_timeout` instead. +- Deprecated :attr:`pymongo.mongo_client.MongoClient.retry_writes`. + Use :attr:`~pymongo.mongo_client.options.retry_writes` instead. +- Deprecated :attr:`pymongo.mongo_client.MongoClient.retry_reads`. + Use :attr:`~pymongo.mongo_client.options.retry_reads` instead. +- Deprecated :attr:`pymongo.mongo_client.MongoClient.max_bson_size`, + :attr:`pymongo.mongo_client.MongoClient.max_message_size`, and + :attr:`pymongo.mongo_client.MongoClient.max_write_batch_size`. These helpers + were incorrect when in ``loadBalanced=true mode`` and ambiguous in clusters + with mixed versions. Use the `hello command`_ to get the authoritative + value from the remote server instead. Code like this:: + + max_bson_size = client.max_bson_size + max_message_size = client.max_message_size + max_write_batch_size = client.max_write_batch_size + +can be changed to this:: + + doc = client.admin.command('hello') + max_bson_size = doc['maxBsonObjectSize'] + max_message_size = doc['maxMessageSizeBytes'] + max_write_batch_size = doc['maxWriteBatchSize'] + +.. _hello command: https://docs.mongodb.com/manual/reference/command/hello/ + +See the `PyMongo 3.13.0 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4 Migration Guide: https://pymongo.readthedocs.io/en/stable/migrate-to-pymongo4.html +.. _PYTHON-3222: https://jira.mongodb.org/browse/PYTHON-3222 +.. _PyMongo 3.13.0 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=31570 + +Changes in Version 3.12.3 +------------------------- + +Issues Resolved +............... + +Version 3.12.3 fixes a bug that prevented :meth:`bson.json_util.loads` from +decoding a document with a non-string "$regex" field (`PYTHON-3028`_). + +See the `PyMongo 3.12.3 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PYTHON-3028: https://jira.mongodb.org/browse/PYTHON-3028 +.. _PyMongo 3.12.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=32505 + +Changes in Version 3.12.2 +------------------------- + +Issues Resolved +............... + +Version 3.12.2 fixes a number of bugs: + +- Fixed a bug that prevented PyMongo from retrying bulk writes + after a ``writeConcernError`` on MongoDB 4.4+ (`PYTHON-2984`_). +- Fixed a bug that could cause the driver to hang during automatic + client side field level encryption (`PYTHON-3017`_). + +See the `PyMongo 3.12.2 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PYTHON-2984: https://jira.mongodb.org/browse/PYTHON-2984 +.. _PYTHON-3017: https://jira.mongodb.org/browse/PYTHON-3017 +.. _PyMongo 3.12.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=32310 + +Changes in Version 3.12.1 +------------------------- + +Issues Resolved +............... + +Version 3.12.1 fixes a number of bugs: + +- Fixed a bug that caused a multi-document transaction to fail when the first + operation was large bulk write (>48MB) that required splitting a batched + write command (`PYTHON-2915`_). +- Fixed a bug that caused the ``tlsDisableOCSPEndpointCheck`` URI option to + be applied incorrectly (`PYTHON-2866`_). + +See the `PyMongo 3.12.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PYTHON-2915: https://jira.mongodb.org/browse/PYTHON-2915 +.. _PYTHON-2866: https://jira.mongodb.org/browse/PYTHON-2866 +.. _PyMongo 3.12.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=31527 + +Changes in Version 3.12.0 +------------------------- + +.. warning:: PyMongo 3.12.0 deprecates support for Python 2.7, 3.4 and 3.5. + These Python versions will not be supported by PyMongo 4. + +.. warning:: PyMongo now allows insertion of documents with keys that include + dots ('.') or start with dollar signs ('$'). + +- pymongocrypt 1.1.0 or later is now required for client side field level + encryption support. +- Iterating over :class:`gridfs.grid_file.GridOut` now moves through + the file line by line instead of chunk by chunk, and does not + restart at the top for subsequent iterations on the same object. + Call ``seek(0)`` to reset the iterator. + +Notable improvements +.................... + +- Added support for MongoDB 5.0. +- Support for MongoDB Stable API, see :class:`~pymongo.server_api.ServerApi`. +- Support for snapshot reads on secondaries (see :ref:`snapshot-reads-ref`). +- Support for Azure and GCP KMS providers for client side field level + encryption. See the docstring for :class:`~pymongo.mongo_client.MongoClient`, + :class:`~pymongo.encryption_options.AutoEncryptionOpts`, + and :mod:`~pymongo.encryption`. +- Support AWS authentication with temporary credentials when connecting to KMS + in client side field level encryption. +- Support for connecting to load balanced MongoDB clusters via the new + ``loadBalanced`` URI option. +- Support for creating timeseries collections via the ``timeseries`` and + ``expireAfterSeconds`` arguments to + :meth:`~pymongo.database.Database.create_collection`. +- Added :attr:`pymongo.mongo_client.MongoClient.topology_description`. +- Added hash support to :class:`~pymongo.mongo_client.MongoClient`, + :class:`~pymongo.database.Database` and + :class:`~pymongo.collection.Collection` (`PYTHON-2466`_). +- Improved the error message returned by + :meth:`~pymongo.collection.Collection.insert_many` when supplied with an + argument of incorrect type (`PYTHON-1690`_). +- Added session and read concern support to + :meth:`~pymongo.collection.Collection.find_raw_batches` + and :meth:`~pymongo.collection.Collection.aggregate_raw_batches`. + +Bug fixes +......... + +- Fixed a bug that could cause the driver to deadlock during automatic + client side field level encryption (`PYTHON-2472`_). +- Fixed a potential deadlock when garbage collecting an unclosed exhaust + :class:`~pymongo.cursor.Cursor`. +- Fixed an bug where using gevent.Timeout to timeout an operation could + lead to a deadlock. +- Fixed the following bug with Atlas Data Lake. When closing cursors, + pymongo now sends killCursors with the namespace returned the cursor's + initial command response. +- Fixed a bug in :class:`~pymongo.cursor.RawBatchCursor` that caused it to + return an empty bytestring when the cursor contained no results. It now + raises :exc:`StopIteration` instead. + +Deprecations +............ + +- Deprecated support for Python 2.7, 3.4 and 3.5. +- Deprecated support for database profiler helpers + :meth:`~pymongo.database.Database.profiling_level`, + :meth:`~pymongo.database.Database.set_profiling_level`, + and :meth:`~pymongo.database.Database.profiling_info`. Instead, users + should run the `profile command`_ with the + :meth:`~pymongo.database.Database.command` helper directly. +- Deprecated :exc:`~pymongo.errors.NotMasterError`. Users should + use :exc:`~pymongo.errors.NotPrimaryError` instead. +- Deprecated :class:`~pymongo.ismaster.IsMaster` and :mod:`~pymongo.ismaster` + which will be removed in PyMongo 4.0 and are replaced by + :class:`~pymongo.hello.Hello` and :mod:`~pymongo.hello` which provide the + same API. +- Deprecated the :mod:`pymongo.messeage` module. +- Deprecated the ``ssl_keyfile`` and ``ssl_certfile`` URI options in favor + of ``tlsCertificateKeyFile`` (see :doc:`examples/tls`). + +.. _PYTHON-2466: https://jira.mongodb.org/browse/PYTHON-2466 +.. _PYTHON-1690: https://jira.mongodb.org/browse/PYTHON-1690 +.. _PYTHON-2472: https://jira.mongodb.org/browse/PYTHON-2472 +.. _profile command: https://mongodb.com/docs/manual/reference/command/profile/ + +Issues Resolved +............... + +See the `PyMongo 3.12.0 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 3.12.0 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=29594 + +Changes in Version 3.11.3 +------------------------- + +Issues Resolved +............... + +Version 3.11.3 fixes a bug that prevented PyMongo from retrying writes after +a ``writeConcernError`` on MongoDB 4.4+ (`PYTHON-2452`_) + +See the `PyMongo 3.11.3 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PYTHON-2452: https://jira.mongodb.org/browse/PYTHON-2452 +.. _PyMongo 3.11.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=30355 + +Changes in Version 3.11.2 +------------------------- + +Issues Resolved +............... + +Version 3.11.2 includes a number of bugfixes. Highlights include: + +- Fixed a memory leak caused by failing SDAM monitor checks on Python 3 (`PYTHON-2433`_). +- Fixed a regression that changed the string representation of + :exc:`~pymongo.errors.BulkWriteError` (`PYTHON-2438`_). +- Fixed a bug that made it impossible to use + :meth:`bson.codec_options.CodecOptions.with_options` and + :meth:`~bson.json_util.JSONOptions.with_options` on some early versions of + Python 3.4 and Python 3.5 due to a bug in the standard library implementation + of :meth:`collections.namedtuple._asdict` (`PYTHON-2440`_). +- Fixed a bug that resulted in a :exc:`TypeError` exception when a PyOpenSSL + socket was configured with a timeout of ``None`` (`PYTHON-2443`_). + +See the `PyMongo 3.11.2 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PYTHON-2433: https://jira.mongodb.org/browse/PYTHON-2433 +.. _PYTHON-2438: https://jira.mongodb.org/browse/PYTHON-2438 +.. _PYTHON-2440: https://jira.mongodb.org/browse/PYTHON-2440 +.. _PYTHON-2443: https://jira.mongodb.org/browse/PYTHON-2443 +.. _PyMongo 3.11.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=30315 + +Changes in Version 3.11.1 +------------------------- + +Version 3.11.1 adds support for Python 3.9 and includes a number of bugfixes. +Highlights include: + +- Support for Python 3.9. +- Initial support for Azure and GCP KMS providers for client side field level + encryption is in beta. See the docstring for + :class:`~pymongo.mongo_client.MongoClient`, + :class:`~pymongo.encryption_options.AutoEncryptionOpts`, + and :mod:`~pymongo.encryption`. **Note: Backwards-breaking changes may be + made before the final release.** +- Fixed a bug where the :class:`bson.json_util.JSONOptions` API did not match + the :class:`bson.codec_options.CodecOptions` API due to the absence of + a :meth:`bson.json_util.JSONOptions.with_options` method. This method has now + been added. +- Fixed a bug which made it impossible to serialize + :class:`~pymongo.errors.BulkWriteError` instances using :mod:`pickle`. +- Fixed a bug wherein PyMongo did not always discard an implicit session after + encountering a network error. +- Fixed a bug where connections created in the background were not + authenticated. +- Fixed a memory leak in the :mod:`bson` module when using a + :class:`~bson.codec_options.TypeRegistry`. + +Issues Resolved +............... + +See the `PyMongo 3.11.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 3.11.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=29997 + +Changes in Version 3.11.0 +------------------------- + +Version 3.11 adds support for MongoDB 4.4 and includes a number of bug fixes. +Highlights include: + +- Support for :ref:`OCSP` (Online Certificate Status Protocol). +- Support for `PyOpenSSL `_ as an + alternative TLS implementation. PyOpenSSL is required for :ref:`OCSP` + support. It will also be installed when using the "tls" extra if the + version of Python in use is older than 2.7.9. +- Support for the :ref:`MONGODB-AWS` authentication mechanism. +- Support for the ``directConnection`` URI option and kwarg to + :class:`~pymongo.mongo_client.MongoClient`. +- Support for speculative authentication attempts in connection handshakes + which reduces the number of network roundtrips needed to authenticate new + connections on MongoDB 4.4+. +- Support for creating collections in multi-document transactions with + :meth:`~pymongo.database.Database.create_collection` on MongoDB 4.4+. +- Added index hinting support to the + :meth:`~pymongo.collection.Collection.replace_one`, + :meth:`~pymongo.collection.Collection.update_one`, + :meth:`~pymongo.collection.Collection.update_many`, + :meth:`~pymongo.collection.Collection.find_one_and_replace`, + :meth:`~pymongo.collection.Collection.find_one_and_update`, + :meth:`~pymongo.collection.Collection.delete_one`, + :meth:`~pymongo.collection.Collection.delete_many`, and + :meth:`~pymongo.collection.Collection.find_one_and_delete` commands. +- Added index hinting support to the + :class:`~pymongo.operations.ReplaceOne`, + :class:`~pymongo.operations.UpdateOne`, + :class:`~pymongo.operations.UpdateMany`, + :class:`~pymongo.operations.DeleteOne`, and + :class:`~pymongo.operations.DeleteMany` bulk operations. +- Added support for :data:`bson.binary.UuidRepresentation.UNSPECIFIED` and + ``MongoClient(uuidRepresentation='unspecified')`` which will become the + default UUID representation starting in PyMongo 4.0. See + :ref:`handling-uuid-data-example` for details. +- New methods :meth:`bson.binary.Binary.from_uuid` and + :meth:`bson.binary.Binary.as_uuid`. +- Added the ``background`` parameter to + :meth:`pymongo.database.Database.validate_collection`. For a description + of this parameter see the MongoDB documentation for the `validate command`_. +- Added the ``allow_disk_use`` parameters to + :meth:`pymongo.collection.Collection.find`. +- Added the ``hedge`` parameter to + :class:`~pymongo.read_preferences.PrimaryPreferred`, + :class:`~pymongo.read_preferences.Secondary`, + :class:`~pymongo.read_preferences.SecondaryPreferred`, + :class:`~pymongo.read_preferences.Nearest` to support disabling + (or explicitly enabling) hedged reads in MongoDB 4.4+. +- Fixed a bug in change streams that could cause PyMongo to miss some change + documents when resuming a stream that was started without a resume token and + whose first batch did not contain any change documents. +- Fixed an bug where using gevent.Timeout to timeout an operation could + lead to a deadlock. + +Deprecations: + +- Deprecated the ``oplog_replay`` parameter to + :meth:`pymongo.collection.Collection.find`. Starting in MongoDB 4.4, the + server optimizes queries against the oplog collection without requiring + the user to set this flag. +- Deprecated :meth:`pymongo.collection.Collection.reindex`. Use + :meth:`~pymongo.database.Database.command` to run the ``reIndex`` command + instead. +- Deprecated :meth:`pymongo.mongo_client.MongoClient.fsync`. Use + :meth:`~pymongo.database.Database.command` to run the ``fsync`` command + instead. +- Deprecated :meth:`pymongo.mongo_client.MongoClient.unlock`. Use + :meth:`~pymongo.database.Database.command` to run the ``fsyncUnlock`` command + instead. See the documentation for more information. +- Deprecated :attr:`pymongo.mongo_client.MongoClient.is_locked`. Use + :meth:`~pymongo.database.Database.command` to run the ``currentOp`` command + instead. See the documentation for more information. +- Deprecated :class:`bson.binary.UUIDLegacy`. Use + :meth:`bson.binary.Binary.from_uuid` instead. + +Unavoidable breaking changes: + +- :class:`~gridfs.GridFSBucket` and :class:`~gridfs.GridFS` do not support + multi-document transactions. Running a GridFS operation in a transaction + now always raises the following error: + ``InvalidOperation: GridFS does not support multi-document transactions`` + +.. _validate command: https://mongodb.com/docs/manual/reference/command/validate/ + +Issues Resolved +............... + +See the `PyMongo 3.11.0 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 3.11.0 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=24799 + Changes in Version 3.10.1 ------------------------- @@ -156,8 +1170,8 @@ Changes in Version 3.8.0 .. warning:: PyMongo no longer supports Python 2.6. RHEL 6 users should install Python 2.7 or newer from `Red Hat Software Collections - `_. CentOS 6 users should install Python - 2.7 or newer from `SCL + `_. + CentOS 6 users should install Python 2.7 or newer from `SCL `_ .. warning:: PyMongo no longer supports PyPy3 versions older than 3.5. Users @@ -203,9 +1217,9 @@ Changes in Version 3.8.0 is expected to require a :meth:`~pymongo.cursor.Cursor.hint` when using min/max starting in MongoDB 4.2. - Documented support for the uuidRepresentation URI option, which has been - supported since PyMongo 2.7. Valid values are `pythonLegacy` (the default), - `javaLegacy`, `csharpLegacy` and `standard`. New applications should consider - setting this to `standard` for cross language compatibility. + supported since PyMongo 2.7. Valid values are ``pythonLegacy`` (the default), + ``javaLegacy``, ``csharpLegacy`` and ``standard``. New applications should consider + setting this to ``standard`` for cross language compatibility. - :class:`~bson.raw_bson.RawBSONDocument` now validates that the ``bson_bytes`` passed in represent a single bson document. Earlier versions would mistakenly accept multiple bson documents. @@ -281,8 +1295,8 @@ Version 3.7 adds support for MongoDB 4.0. Highlights include: - Support for single replica set multi-document ACID transactions. See :ref:`transactions-ref`. -- Support for wire protocol compression. See the - :meth:`~pymongo.mongo_client.MongoClient` documentation for details. +- Support for wire protocol compression via the new ``compressors`` URI and keyword argument to + :meth:`~pymongo.mongo_client.MongoClient`. See :ref:`network-compression-example` for details. - Support for Python 3.7. - New count methods, :meth:`~pymongo.collection.Collection.count_documents` and :meth:`~pymongo.collection.Collection.estimated_document_count`. @@ -308,7 +1322,7 @@ Version 3.7 adds support for MongoDB 4.0. Highlights include: :ref:`PLAIN `, and :ref:`MONGODB-X509 ` mechanisms can also be used to avoid issues with OpenSSL in FIPS environments. - - MD5 checksums are now optional in GridFS. See the `disable_md5` option + - MD5 checksums are now optional in GridFS. See the ``disable_md5`` option of :class:`~gridfs.GridFS` and :class:`~gridfs.GridFSBucket`. - :class:`~bson.objectid.ObjectId` machine bytes are now hashed using `FNV-1a @@ -327,7 +1341,7 @@ Version 3.7 adds support for MongoDB 4.0. Highlights include: authentication mechanism defaults to $external. - wtimeoutMS is once again supported as a URI option. - When using unacknowledged write concern and connected to MongoDB server - version 3.6 or greater, the `bypass_document_validation` option is now + version 3.6 or greater, the ``bypass_document_validation`` option is now supported in the following write helpers: :meth:`~pymongo.collection.Collection.insert_one`, :meth:`~pymongo.collection.Collection.replace_one`, @@ -337,9 +1351,9 @@ Version 3.7 adds support for MongoDB 4.0. Highlights include: Deprecations: - Deprecated :meth:`pymongo.collection.Collection.count` and - :meth:`pymongo.cursor.Cursor.count`. These two methods use the `count` + :meth:`pymongo.cursor.Cursor.count`. These two methods use the ``count`` command and `may or may not be accurate - `_, + `_, depending on the options used and connected MongoDB topology. Use :meth:`~pymongo.collection.Collection.count_documents` instead. - Deprecated the snapshot option of :meth:`~pymongo.collection.Collection.find` @@ -347,7 +1361,7 @@ Deprecations: deprecated in MongoDB 3.6 and removed in MongoDB 4.0. - Deprecated the max_scan option of :meth:`~pymongo.collection.Collection.find` and :meth:`~pymongo.collection.Collection.find_one`. The option was - deprecated in MongoDB 4.0. Use `maxTimeMS` instead. + deprecated in MongoDB 4.0. Use ``maxTimeMS`` instead. - Deprecated :meth:`~pymongo.mongo_client.MongoClient.close_cursor`. Use :meth:`~pymongo.cursor.Cursor.close` instead. - Deprecated :meth:`~pymongo.mongo_client.MongoClient.database_names`. Use @@ -449,7 +1463,7 @@ Highlights include: Deprecations: -- The `useCursor` option for :meth:`~pymongo.collection.Collection.aggregate` +- The ``useCursor`` option for :meth:`~pymongo.collection.Collection.aggregate` is deprecated. The option was only necessary when upgrading from MongoDB 2.4 to MongoDB 2.6. MongoDB 2.4 is no longer supported. - The :meth:`~pymongo.database.Database.add_user` and @@ -514,13 +1528,13 @@ Highlights include: - Increased the performance of using :class:`~bson.raw_bson.RawBSONDocument`. - Increased the performance of :meth:`~pymongo.mongo_client.MongoClient.database_names` by using the - `nameOnly` option for listDatabases when available. + ``nameOnly`` option for listDatabases when available. - Increased the performance of :meth:`~pymongo.collection.Collection.bulk_write` by reducing the memory overhead of :class:`~pymongo.operations.InsertOne`, :class:`~pymongo.operations.DeleteOne`, and :class:`~pymongo.operations.DeleteMany`. -- Added the `collation` option to :class:`~pymongo.operations.DeleteOne`, +- Added the ``collation`` option to :class:`~pymongo.operations.DeleteOne`, :class:`~pymongo.operations.DeleteMany`, :class:`~pymongo.operations.ReplaceOne`, :class:`~pymongo.operations.UpdateOne`, and @@ -534,13 +1548,13 @@ Highlights include: Changes and Deprecations: -- :meth:`~pymongo.collection.Collection.find` has new options `return_key`, - `show_record_id`, `snapshot`, `hint`, `max_time_ms`, `max_scan`, `min`, `max`, - and `comment`. Deprecated the option `modifiers`. +- :meth:`~pymongo.collection.Collection.find` has new options ``return_key``, + ``show_record_id``, ``snapshot``, ``hint``, ``max_time_ms``, ``max_scan``, ``min``, ``max``, + and ``comment``. Deprecated the option ``modifiers``. - Deprecated :meth:`~pymongo.collection.Collection.group`. The group command was deprecated in MongoDB 3.4 and is expected to be removed in MongoDB 3.6. Applications should use :meth:`~pymongo.collection.Collection.aggregate` - with the `$group` pipeline stage instead. + with the ``$group`` pipeline stage instead. - Deprecated :meth:`~pymongo.database.Database.authenticate`. Authenticating multiple users conflicts with support for logical sessions in MongoDB 3.6. To authenticate as multiple users, create multiple instances of @@ -550,12 +1564,12 @@ Changes and Deprecations: - Deprecated :class:`~pymongo.database.SystemJS`. - Deprecated :meth:`~pymongo.mongo_client.MongoClient.get_default_database`. Applications should use - :meth:`~pymongo.mongo_client.MongoClient.get_database` without the `name` + :meth:`~pymongo.mongo_client.MongoClient.get_database` without the ```name``` parameter instead. -- Deprecated the MongoClient option `socketKeepAlive`. It now defaults to true +- Deprecated the MongoClient option ``socketKeepAlive```. It now defaults to true and disabling it is not recommended, see `does TCP keepalive time affect MongoDB Deployments? - `_ + `_ - Deprecated :meth:`~pymongo.collection.Collection.initialize_ordered_bulk_op`, :meth:`~pymongo.collection.Collection.initialize_unordered_bulk_op`, and :class:`~pymongo.bulk.BulkOperationBuilder`. Use @@ -564,7 +1578,7 @@ Changes and Deprecations: :const:`~bson.json_util.RELAXED_JSON_OPTIONS` or :const:`~bson.json_util.CANONICAL_JSON_OPTIONS` instead. - If a custom :class:`~bson.codec_options.CodecOptions` is passed to - :class:`RawBSONDocument`, its `document_class` must be + :class:`RawBSONDocument`, its ``document_class``` must be :class:`RawBSONDocument`. - :meth:`~pymongo.collection.Collection.list_indexes` no longer raises OperationFailure when the collection (or database) does not exist on @@ -709,7 +1723,7 @@ Changes in Version 3.2.2 ------------------------ Version 3.2.2 fixes a few issues reported since the release of 3.2.1, including -a fix for using the `connect` option in the MongoDB URI and support for setting +a fix for using the ``connect`` option in the MongoDB URI and support for setting the batch size for a query to 1 when using MongoDB 3.2+. Issues Resolved @@ -753,7 +1767,7 @@ Highlights include: :meth:`~pymongo.collection.Collection.find_one_and_replace`, :meth:`~pymongo.collection.Collection.find_one_and_update`, and :meth:`~pymongo.collection.Collection.find_one_and_delete`. - - Support for the new `bypassDocumentValidation` option in write + - Support for the new ``bypassDocumentValidation`` option in write helpers. - Support for reading and writing raw BSON with @@ -797,10 +1811,10 @@ Highlights include: - Command monitoring support. See :mod:`~pymongo.monitoring` for details. - Configurable error handling for :exc:`UnicodeDecodeError`. See the - `unicode_decode_error_handler` option of + ``unicode_decode_error_handler`` option of :class:`~bson.codec_options.CodecOptions`. - Optional automatic timezone conversion when decoding BSON datetime. See the - `tzinfo` option of :class:`~bson.codec_options.CodecOptions`. + ``tzinfo`` option of :class:`~bson.codec_options.CodecOptions`. - An implementation of :class:`~gridfs.GridFSBucket` from the new GridFS spec. - Compliance with the new Connection String spec. - Reduced idle CPU usage in Python 2. @@ -923,7 +1937,7 @@ applied to documents returned by the new methods SSL/TLS changes ............... -When `ssl` is ``True`` the `ssl_cert_reqs` option now defaults to +When ``ssl`` is ``True`` the ``ssl_cert_reqs`` option now defaults to :attr:`ssl.CERT_REQUIRED` if not provided. PyMongo will attempt to load OS provided CA certificates to verify the server, raising :exc:`~pymongo.errors.ConfigurationError` if it cannot. @@ -1070,12 +2084,12 @@ Cursor management changes :meth:`~pymongo.mongo_client.MongoClient.set_cursor_manager` are no longer deprecated. If you subclass :class:`~pymongo.cursor_manager.CursorManager` your implementation of :meth:`~pymongo.cursor_manager.CursorManager.close` -must now take a second parameter, `address`. The ``BatchCursorManager`` class +must now take a second parameter, ``address``. The ``BatchCursorManager`` class is removed. The second parameter to :meth:`~pymongo.mongo_client.MongoClient.close_cursor` is renamed from ``_conn_id`` to ``address``. -:meth:`~pymongo.mongo_client.MongoClient.kill_cursors` now accepts an `address` +:meth:`~pymongo.mongo_client.MongoClient.kill_cursors` now accepts an ``address`` parameter. :class:`~pymongo.database.Database` changes @@ -1110,13 +2124,13 @@ The following methods have been added: The following methods have been changed: -- :meth:`~pymongo.database.Database.command`. Support for `as_class`, - `uuid_subtype`, `tag_sets`, and `secondary_acceptable_latency_ms` have been +- :meth:`~pymongo.database.Database.command`. Support for ``as_class``, + ``uuid_subtype``, ``tag_sets``, and ``secondary_acceptable_latency_ms`` have been removed. You can instead pass an instance of - :class:`~bson.codec_options.CodecOptions` as `codec_options` and an instance + :class:`~bson.codec_options.CodecOptions` as ``codec_options`` and an instance of a read preference class from :mod:`~pymongo.read_preferences` as - `read_preference`. The `fields` and `compile_re` options are also removed. - The `fields` options was undocumented and never really worked. Regular + ``read_preference``. The ``fields`` and ``compile_re`` options are also removed. + The ``fields`` options was undocumented and never really worked. Regular expressions are always decoded to :class:`~bson.regex.Regex`. The following methods have been deprecated: @@ -1180,9 +2194,9 @@ The following methods have changed: - :meth:`~pymongo.collection.Collection.distinct` now optionally takes a filter argument. - :meth:`~pymongo.collection.Collection.create_index` no longer caches - indexes, therefore the `cache_for` parameter has been removed. It also - no longer supports the `bucket_size` and `drop_dups` aliases for `bucketSize` - and `dropDups`. + indexes, therefore the ``cache_for`` parameter has been removed. It also + no longer supports the ``bucket_size`` and ``drop_dups`` aliases for ``bucketSize`` + and ``dropDups``. The following methods are deprecated: @@ -1230,13 +2244,13 @@ The following find/find_one options have been removed: - tag_sets (use one of the read preference classes from :mod:`~pymongo.read_preferences` and :meth:`~pymongo.collection.Collection.with_options` instead) -- secondary_acceptable_latency_ms (use the `localThresholdMS` URI option +- secondary_acceptable_latency_ms (use the ``localThresholdMS`` URI option instead) -- max_scan (use the new `modifiers` option instead) -- snapshot (use the new `modifiers` option instead) -- tailable (use the new `cursor_type` option instead) -- await_data (use the new `cursor_type` option instead) -- exhaust (use the new `cursor_type` option instead) +- max_scan (use the new ``modifiers`` option instead) +- snapshot (use the new ``modifiers`` option instead) +- tailable (use the new ``cursor_type`` option instead) +- await_data (use the new ``cursor_type`` option instead) +- exhaust (use the new ``cursor_type`` option instead) - as_class (use :meth:`~pymongo.collection.Collection.with_options` with :class:`~bson.codec_options.CodecOptions` instead) - compile_re (BSON regular expressions are always decoded to @@ -1249,9 +2263,9 @@ The following find/find_one options are deprecated: The following renames need special handling. - timeout -> no_cursor_timeout - - The default for `timeout` was True. The default for `no_cursor_timeout` is - False. If you were previously passing False for `timeout` you must pass - **True** for `no_cursor_timeout` to keep the previous behavior. + The default for ``timeout`` was True. The default for ``no_cursor_timeout`` is + False. If you were previously passing False for ``t`imeout`` you must pass + **True** for ``no_cursor_timeout`` to keep the previous behavior. :mod:`~pymongo.errors` changes .............................. @@ -1268,7 +2282,7 @@ The unsupported methods, the class, and the exception are all deleted. :mod:`~bson` changes .................... -The `compile_re` option is removed from all methods +The ``compile_re`` option is removed from all methods that accepted it in :mod:`~bson` and :mod:`~bson.json_util`. Additionally, it is removed from :meth:`~pymongo.collection.Collection.find`, :meth:`~pymongo.collection.Collection.find_one`, @@ -1286,7 +2300,7 @@ allows BSON int64 to be round tripped without losing type information in python 3. Note that if you store a python long (or a python int larger than 4 bytes) it will be returned from PyMongo as :class:`~bson.int64.Int64`. -The `as_class`, `tz_aware`, and `uuid_subtype` options are removed from all +The ``as_class``, ``tz_aware``, and ``uuid_subtype`` options are removed from all BSON encoding and decoding methods. Use :class:`~bson.codec_options.CodecOptions` to configure these options. The APIs affected are: @@ -1378,7 +2392,7 @@ improves an error message when decoding BSON as well as fixes a couple other issues including :meth:`~pymongo.collection.Collection.aggregate` ignoring :attr:`~pymongo.collection.Collection.codec_options` and :meth:`~pymongo.database.Database.command` raising a superfluous -`DeprecationWarning`. +``DeprecationWarning``. Issues Resolved ............... @@ -1408,7 +2422,9 @@ Changes in Version 2.9 Version 2.9 provides an upgrade path to PyMongo 3.x. Most of the API changes from PyMongo 3.0 have been backported in a backward compatible way, allowing applications to be written against PyMongo >= 2.9, rather then PyMongo 2.x or -PyMongo 3.x. See the :doc:`/migrate-to-pymongo3` for detailed examples. +PyMongo 3.x. See the `PyMongo 3 Migration Guide +`_ for +detailed examples. .. note:: There are a number of new deprecations in this release for features that were removed in PyMongo 3.0. @@ -1667,7 +2683,7 @@ Important new features: - The ``max_pool_size`` option for :class:`~pymongo.mongo_client.MongoClient` and :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient` now actually caps the number of sockets the pool will open concurrently. - Once the pool has reached :attr:`~pymongo.mongo_client.MongoClient.max_pool_size` + Once the pool has reaches max_pool_size operations will block waiting for a socket to become available. If ``waitQueueTimeoutMS`` is set, an operation that blocks waiting for a socket will raise :exc:`~pymongo.errors.ConnectionFailure` after the timeout. By @@ -1684,7 +2700,7 @@ Important new features: - Support aggregation output as a :class:`~pymongo.cursor.Cursor`. See :meth:`~pymongo.collection.Collection.aggregate` for details. -.. warning:: SIGNIFICANT BEHAVIOR CHANGE in 2.6. Previously, `max_pool_size` +.. warning:: SIGNIFICANT BEHAVIOR CHANGE in 2.6. Previously, ``max_pool_size`` would limit only the idle sockets the pool would hold onto, not the number of open sockets. The default has also changed, from 10 to 100. If you pass a value for ``max_pool_size`` make sure it is large enough for @@ -1809,7 +2825,7 @@ Important new features: - :class:`~pymongo.cursor.Cursor` can be copied with functions from the :mod:`copy` module. - The :meth:`~pymongo.database.Database.set_profiling_level` method now supports - a `slow_ms` option. + a ``slow_ms`` option. - The replica set monitor task (used by :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient` and :class:`~pymongo.replica_set_connection.ReplicaSetConnection`) is a daemon thread @@ -1849,7 +2865,7 @@ Important New Features: - Support for mongos failover. - A new :meth:`~pymongo.collection.Collection.aggregate` method to support MongoDB's new `aggregation framework - `_. + `_. - Support for legacy Java and C# byte order when encoding and decoding UUIDs. - Support for connecting directly to an arbiter. @@ -1857,7 +2873,7 @@ Important New Features: Starting with MongoDB 2.2 the getLastError command requires authentication when the server's `authentication features - `_ are enabled. + `_ are enabled. Changes to PyMongo were required to support this behavior change. Users of authentication must upgrade to PyMongo 2.3 (or newer) for "safe" write operations to function correctly. @@ -2029,16 +3045,16 @@ Important New Features: independently at the connection, database, collection or query level. Each level will inherit settings from the previous level and each level can override the previous level's setting. -- PyMongo now supports the `await_data` and `partial` cursor flags. If the - `await_data` flag is set on a `tailable` cursor the server will block for - some extra time waiting for more data to return. The `partial` flag tells +- PyMongo now supports the ``await_data`` and ``partial`` cursor flags. If the + ``await_data`` flag is set on a ``tailable`` cursor the server will block for + some extra time waiting for more data to return. The ``partial`` flag tells a mongos to return partial data for a query if not all shards are available. -- :meth:`~pymongo.collection.Collection.map_reduce` will accept a `dict` or - instance of :class:`~bson.son.SON` as the `out` parameter. +- :meth:`~pymongo.collection.Collection.map_reduce` will accept a ``dict`` or + instance of :class:`~bson.son.SON` as the ``out`` parameter. - The URI parser has been moved into its own module and can be used directly by application code. - AutoReconnect exception now provides information about the error that - actually occured instead of a generic failure message. + actually occurred instead of a generic failure message. - A number of new helper methods have been added with options for setting and unsetting cursor flags, re-indexing a collection, fsync and locking a server, and getting the server's current operations. @@ -2046,9 +3062,9 @@ Important New Features: API changes: - If only one host:port pair is specified :class:`~pymongo.connection.Connection` - will make a direct connection to only that host. Please note that `slave_okay` - must be `True` in order to query from a secondary. -- If more than one host:port pair is specified or the `replicaset` option is + will make a direct connection to only that host. Please note that ``slave_okay`` + must be ``True`` in order to query from a secondary. +- If more than one host:port pair is specified or the ``replicaset`` option is used PyMongo will treat the specified host:port pair(s) as a seed list and connect using replica set behavior. @@ -2073,7 +3089,7 @@ Version 1.11 adds a few new features and fixes a few more bugs. New Features: - Basic IPv6 support: pymongo prefers IPv4 but will try IPv6. You can - also specify an IPv6 address literal in the `host` parameter or a + also specify an IPv6 address literal in the ``host`` parameter or a MongoDB URI provided it is enclosed in '[' and ']'. - max_pool_size option: previously pymongo had a hard coded pool size of 10 connections. With this change you can specify a different pool @@ -2091,10 +3107,10 @@ API changes: - :meth:`~pymongo.database.Database.validate_collection` now returns a dict instead of a string. This change was required to deal with an API change on the server. This method also now takes the optional - `scandata` and `full` parameters. See the documentation for more + ``scandata`` and ``full`` parameters. See the documentation for more details. -.. warning:: The `pool_size`, `auto_start_request`, and `timeout` parameters +.. warning:: The ``pool_size``, ``auto_start_request```, and ``timeout`` parameters for :class:`~pymongo.connection.Connection` have been completely removed in this release. They were deprecated in pymongo-1.4 and have had no effect since then. Please make sure that your code @@ -2136,9 +3152,9 @@ There are two behavior changes to be aware of: Previously the read would be sent to one randomly chosen slave and :class:`~pymongo.errors.AutoReconnect` was immediately raised in case of a connection failure. -- A Python `long` is now always BSON encoded as an int64. Previously the - encoding was based only on the value of the field and a `long` with a - value less than `2147483648` or greater than `-2147483649` would always +- A Python ``long`` is now always BSON encoded as an int64. Previously the + encoding was based only on the value of the field and a ``long`` with a + value less than ``2147483648`` or greater than ``-2147483649`` would always be BSON encoded as an int32. Issues resolved @@ -2163,7 +3179,7 @@ Issues resolved - `PYTHON-186 `_: When storing integers, type is selected according to value instead of type - `PYTHON-173 `_: - as_class option is not propogated by Cursor.clone + as_class option is not propagated by Cursor.clone - `PYTHON-113 `_: Redunducy in MasterSlaveConnection @@ -2183,7 +3199,7 @@ server for the maximum BSON document size it supports. collections for map/reduce results. An output collection name must be provided and the output will replace any existing output collection with the same name. :meth:`~pymongo.collection.Collection.map_reduce` now - requires the `out` parameter. + requires the ``out`` parameter. Issues resolved ............... @@ -2206,7 +3222,7 @@ Issues resolved - PYTHON-169: Support deepcopy of DBRef. - PYTHON-167: Duplicate of PYTHON-166. - PYTHON-166: Fixes a concurrency issue. -- PYTHON-158: Add code and err string to `db assertion` messages. +- PYTHON-158: Add code and err string to ``db assertion`` messages. Changes in Version 1.9 ---------------------- @@ -2283,7 +3299,7 @@ rather than :class:`pymongo.errors.PyMongoError`. :class:`~pymongo.connection.Connection` has been idle for a while. - added :meth:`~pymongo.database.SystemJS.list` to :class:`~pymongo.database.SystemJS`. -- added `file_document` argument to :meth:`~gridfs.grid_file.GridOut` +- added ``file_document`` argument to :meth:`~gridfs.grid_file.GridOut` to allow initializing from an existing file document. - raise :class:`~pymongo.errors.TimeoutError` even if the ``getLastError`` command was run manually and not through "safe" @@ -2303,13 +3319,13 @@ Changes in Version 1.8 ---------------------- Version 1.8 adds support for connecting to replica sets, specifying -per-operation values for `w` and `wtimeout`, and decoding to +per-operation values for ``w`` and ``wtimeout``, and decoding to timezone-aware datetimes. - fixed a reference leak in the C extension when decoding a :class:`~bson.dbref.DBRef`. -- added support for `w`, `wtimeout`, and `fsync` (and any other - options for `getLastError`) to "safe mode" operations. +- added support for ``w``, ``wtimeout``, and ``fsync`` (and any other + options for ``getLastError``) to "safe mode" operations. - added :attr:`~pymongo.connection.Connection.nodes` property. - added a maximum pool size of 10 sockets. - added support for replica sets. @@ -2327,9 +3343,9 @@ timezone-aware datetimes. :class:`~bson.max_key.MaxKey` and :class:`~bson.timestamp.Timestamp` to :mod:`~bson.json_util`. - added support for decoding datetimes as aware (UTC) - it is highly - recommended to enable this by setting the `tz_aware` parameter to + recommended to enable this by setting the ``tz_aware`` parameter to :meth:`~pymongo.connection.Connection` to ``True``. -- added `network_timeout` option for individual calls to +- added ``network_timeout`` option for individual calls to :meth:`~pymongo.collection.Collection.find` and :meth:`~pymongo.collection.Collection.find_one`. - added :meth:`~gridfs.GridFS.exists` to check if a file exists in @@ -2360,27 +3376,27 @@ highlights is `here support for querying unique status and other index information. - added :attr:`~pymongo.connection.Connection.document_class`, to specify class for returned documents. -- added `as_class` argument for +- added ``as_class`` argument for :meth:`~pymongo.collection.Collection.find`, and in the BSON decoder. - added support for creating :class:`~bson.timestamp.Timestamp` instances using a :class:`~datetime.datetime`. -- allow `dropTarget` argument for +- allow ``dropTarget`` argument for :class:`~pymongo.collection.Collection.rename`. - handle aware :class:`~datetime.datetime` instances, by converting to UTC. - added support for :class:`~pymongo.cursor.Cursor.max_scan`. - raise :class:`~gridfs.errors.FileExists` exception when creating a duplicate GridFS file. -- use `y2038 `_ for time handling in +- use `y2038 `_ for time handling in the C extension - eliminates 2038 problems when extension is installed. -- added `sort` parameter to +- added ``sort`` parameter to :meth:`~pymongo.collection.Collection.find` - finalized deprecation of changes from versions **<= 1.4** - take any non-:class:`dict` as an ``"_id"`` query for :meth:`~pymongo.collection.Collection.find_one` or :meth:`~pymongo.collection.Collection.remove` -- added ability to pass a :class:`dict` for `fields` argument to +- added ability to pass a :class:`dict` for ``fields`` argument to :meth:`~pymongo.collection.Collection.find` (supports ``"$slice"`` and field negation) - simplified code to find master, since paired setups don't always have @@ -2422,7 +3438,7 @@ Changes in Version 1.5.1 - added :data:`~gridfs.grid_file.GridFile._id` property for :class:`~gridfs.grid_file.GridFile` instances. - fix for making a :class:`~pymongo.connection.Connection` (with - `slave_okay` set) directly to a slave in a replica pair. + ``slave_okay`` set) directly to a slave in a replica pair. - accept kwargs for :meth:`~pymongo.collection.Collection.create_index` and :meth:`~pymongo.collection.Collection.ensure_index` to support all @@ -2434,7 +3450,7 @@ Changes in Version 1.5.1 Changes in Version 1.5 ---------------------- - added subtype constants to :mod:`~bson.binary` module. -- DEPRECATED `options` argument to +- DEPRECATED ``options`` argument to :meth:`~pymongo.collection.Collection` and :meth:`~pymongo.database.Database.create_collection` in favor of kwargs. @@ -2444,7 +3460,7 @@ Changes in Version 1.5 might have more data to return (useful for tailable cursors). - added :class:`~bson.timestamp.Timestamp` to better support dealing with internal MongoDB timestamps. -- added `name` argument for +- added ``name`` argument for :meth:`~pymongo.collection.Collection.create_index` and :meth:`~pymongo.collection.Collection.ensure_index`. - fixed connection pooling w/ fork @@ -2496,7 +3512,7 @@ Other changes: for example. - added :class:`~pymongo.errors.DuplicateKeyError` for calls to :meth:`~pymongo.collection.Collection.insert` or - :meth:`~pymongo.collection.Collection.update` with `safe` set to + :meth:`~pymongo.collection.Collection.update` with ``safe`` set to ``True``. - removed :mod:`~pymongo.thread_util`. - added :meth:`~pymongo.database.Database.add_user` and @@ -2509,7 +3525,7 @@ Other changes: is raised. - simplification of connection pooling - makes driver ~2x faster for simple benchmarks. see :ref:`connection-pooling` for more information. -- DEPRECATED `pool_size`, `auto_start_request` and `timeout` +- DEPRECATED ``pool_size``, ``auto_start_request`` and ``timeout`` parameters to :class:`~pymongo.connection.Connection`. DEPRECATED :meth:`~pymongo.connection.Connection.start_request`. - use :meth:`socket.sendall`. @@ -2520,7 +3536,7 @@ Other changes: - deprecate :meth:`~pymongo.database.Database._command` in favor of :meth:`~pymongo.database.Database.command`. - send all commands without wrapping as ``{"query": ...}``. -- support string as `key` argument to +- support string as ``key`` argument to :meth:`~pymongo.collection.Collection.group` (keyf) and run all groups as commands. - support for equality testing for :class:`~bson.code.Code` @@ -2568,7 +3584,7 @@ Changes in Version 1.2.1 Changes in Version 1.2 ---------------------- -- `spec` parameter for :meth:`~pymongo.collection.Collection.remove` is +- ``spec`` parameter for :meth:`~pymongo.collection.Collection.remove` is now optional to allow for deleting all documents in a :class:`~pymongo.collection.Collection` - always wrap queries with ``{query: ...}`` even when no special options - @@ -2602,15 +3618,15 @@ Changes in Version 1.1.2 Changes in Version 1.1.1 ------------------------ -- added `multi` parameter for +- added ``multi`` parameter for :meth:`~pymongo.collection.Collection.update` - fix unicode regex patterns with C extension - added :meth:`~pymongo.collection.Collection.distinct` -- added `database` support for :class:`~bson.dbref.DBRef` +- added ``database`` support for :class:`~bson.dbref.DBRef` - added :mod:`~bson.json_util` with helpers for encoding / decoding special types to JSON - DEPRECATED :meth:`pymongo.cursor.Cursor.__len__` in favor of - :meth:`~pymongo.cursor.Cursor.count` with `with_limit_and_skip` set + :meth:`~pymongo.cursor.Cursor.count` with ``with_limit_and_skip`` set to ``True`` due to performance regression - switch documentation to Sphinx @@ -2623,18 +3639,18 @@ Changes in Version 1.1 - fix :class:`~bson.objectid.ObjectId` generation when using :mod:`multiprocessing` - added :attr:`~pymongo.cursor.Cursor.collection` -- added `network_timeout` parameter for +- added ``network_timeout`` parameter for :meth:`~pymongo.connection.Connection` -- DEPRECATED `slave_okay` parameter for individual queries -- fix for `safe` mode when multi-threaded -- added `safe` parameter for :meth:`~pymongo.collection.Collection.remove` -- added `tailable` parameter for :meth:`~pymongo.collection.Collection.find` +- DEPRECATED ``slave_okay`` parameter for individual queries +- fix for ``safe`` mode when multi-threaded +- added ``safe`` parameter for :meth:`~pymongo.collection.Collection.remove` +- added ``tailable`` parameter for :meth:`~pymongo.collection.Collection.find` Changes in Version 1.0 ---------------------- - fixes for :class:`~pymongo.master_slave_connection.MasterSlaveConnection` -- added `finalize` parameter for :meth:`~pymongo.collection.Collection.group` +- added ``finalize`` parameter for :meth:`~pymongo.collection.Collection.group` - improvements to :meth:`~pymongo.collection.Collection.insert` speed - improvements to :mod:`gridfs` speed - added :meth:`~pymongo.cursor.Cursor.__getitem__` and @@ -2664,9 +3680,9 @@ Changes in Version 0.15 ----------------------- - fix string representation of :class:`~bson.objectid.ObjectId` instances -- added `timeout` parameter for +- added ``timeout`` parameter for :meth:`~pymongo.collection.Collection.find` -- allow scope for `reduce` function in +- allow scope for ``reduce`` function in :meth:`~pymongo.collection.Collection.group` Changes in Version 0.14.2 @@ -2683,7 +3699,7 @@ Changes in Version 0.14 ----------------------- - support for long in :class:`~bson.BSON` - added :meth:`~pymongo.collection.Collection.rename` -- added `snapshot` parameter for +- added ``snapshot`` parameter for :meth:`~pymongo.collection.Collection.find` Changes in Version 0.13 @@ -2725,7 +3741,7 @@ Changes in Version 0.11 - better build failure detection - driver support for selecting fields in sub-documents - disallow insertion of invalid key names -- added `timeout` parameter for :meth:`~pymongo.connection.Connection` +- added ``timeout`` parameter for :meth:`~pymongo.connection.Connection` Changes in Version 0.10.3 ------------------------- @@ -2756,9 +3772,3 @@ Changes in Version 0.9.7 :class:`~pymongo.collection.Collection` names - add version as :attr:`pymongo.version` - add ``--no_ext`` command line option to *setup.py* - -.. toctree:: - :hidden: - - python3 - examples/gevent diff --git a/doc/common-issues.rst b/doc/common-issues.rst new file mode 100644 index 0000000000..3d2d06a5a7 --- /dev/null +++ b/doc/common-issues.rst @@ -0,0 +1,96 @@ +Frequently Encountered Issues +============================= + +Also see the :ref:`TLSErrors` section. + +Server reports wire version X, PyMongo requires Y +------------------------------------------------- + +When one attempts to connect to a <=3.4 version server, PyMongo will throw the following error:: + + >>> client.admin.command('ping') + ... + pymongo.errors.ConfigurationError: Server at localhost:27017 reports wire version 5, but this version of PyMongo requires at least 6 (MongoDB 3.6). + +This is caused by the driver being too new for the server it is being run against. +To resolve this issue either upgrade your database to version >= 3.6 or downgrade to PyMongo 3.x which supports MongoDB >= 2.6. + + +'Cursor' object has no attribute '_Cursor__killed' +-------------------------------------------------- + +On versions of PyMongo <3.9, when supplying invalid arguments the constructor of Cursor, +there will be a TypeError raised, and an AttributeError printed to ``stderr``. The AttributeError is not relevant, +instead look at the TypeError for debugging information:: + + >>> coll.find(wrong=1) + Exception ignored in: + ... + AttributeError: 'Cursor' object has no attribute '_Cursor__killed' + ... + TypeError: __init__() got an unexpected keyword argument 'wrong' + +To fix this, make sure that you are supplying the correct keyword arguments. +In addition, you can also upgrade to PyMongo >=3.9, which will remove the spurious error. + + +MongoClient fails ConfigurationError +------------------------------------ + +This is a common issue stemming from using incorrect keyword argument names. + + >>> client = MongoClient(wrong=1) + ... + pymongo.errors.ConfigurationError: Unknown option wrong + +To fix this, check your spelling and make sure that the keyword argument you are specifying exists. + + +DeprecationWarning: count is deprecated +--------------------------------------- + +PyMongo no longer supports :meth:`pymongo.cursor.count`. +Instead, use :meth:`pymongo.collection.count_documents`:: + + >>> client = MongoClient() + >>> d = datetime.datetime(2009, 11, 12, 12) + >>> list(client.db.coll.find({"date": {"$lt": d}}, limit=2)) + [{'_id': ObjectId('6247b058cebb8b179b7039f8'), 'date': datetime.datetime(1, 1, 1, 0, 0)}, {'_id': ObjectId('6247b059cebb8b179b7039f9'), 'date': datetime.datetime(1, 1, 1, 0, 0)}] + >>> client.db.coll.count_documents({"date": {"$lt": d}}, limit=2) + 2 + +Note that this is NOT the same as ``Cursor.count_documents`` (which does not exist), +this is a method of the Collection class, so you must call it on a collection object +or you will receive the following error:: + + >>> Cursor(MongoClient().db.coll).count() + Traceback (most recent call last): + File "", line 1, in + AttributeError: 'Cursor' object has no attribute 'count' + >>> + +Timeout when accessing MongoDB from PyMongo with tunneling +---------------------------------------------------------- + +When attempting to connect to a replica set MongoDB instance over an SSH tunnel you +will receive the following error:: + + File "/Library/Python/2.7/site-packages/pymongo/collection.py", line 1560, in count + return self._count(cmd, collation, session) + File "/Library/Python/2.7/site-packages/pymongo/collection.py", line 1504, in _count + with self._socket_for_reads() as (connection, slave_ok): + File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/contextlib.py", line 17, in __enter__ + return self.gen.next() + File "/Library/Python/2.7/site-packages/pymongo/mongo_client.py", line 982, in _socket_for_reads + server = topology.select_server(read_preference) + File "/Library/Python/2.7/site-packages/pymongo/topology.py", line 224, in select_server + address)) + File "/Library/Python/2.7/site-packages/pymongo/topology.py", line 183, in select_servers + selector, server_timeout, address) + File "/Library/Python/2.7/site-packages/pymongo/topology.py", line 199, in _select_servers_loop + self._error_message(selector)) + pymongo.errors.ServerSelectionTimeoutError: localhost:27017: timed out + +This is due to the fact that PyMongo discovers replica set members using the response from the isMaster command which +then contains the address and ports of the other members. However, these addresses and ports will not be accessible through the SSH tunnel. Thus, this behavior is unsupported. +You can, however, connect directly to a single MongoDB node using the directConnection=True option with SSH tunneling. diff --git a/doc/compatibility-policy.rst b/doc/compatibility-policy.rst index 08c5d57937..834f86ce54 100644 --- a/doc/compatibility-policy.rst +++ b/doc/compatibility-policy.rst @@ -18,13 +18,13 @@ effort to release at least one minor version that *deprecates* it. We add `DeprecationWarning`_. You can ensure your code is future-proof by running your code with the latest PyMongo release and looking for DeprecationWarnings. -Starting with Python 2.7, the interpreter silences DeprecationWarnings by -default. For example, the following code uses the deprecated ``insert`` -method but does not raise any warning: +The interpreter silences DeprecationWarnings by default. For example, the +following code uses the deprecated ``insert`` method but does not raise any +warning: .. code-block:: python - # "insert.py" + # "insert.py" (with PyMongo 3.X) from pymongo import MongoClient client = MongoClient() @@ -32,13 +32,13 @@ method but does not raise any warning: To print deprecation warnings to stderr, run python with "-Wd":: - $ python -Wd insert.py + $ python3 -Wd insert.py insert.py:4: DeprecationWarning: insert is deprecated. Use insert_one or insert_many instead. client.test.test.insert({}) You can turn warnings into exceptions with "python -We":: - $ python -We insert.py + $ python3 -We insert.py Traceback (most recent call last): File "insert.py", line 4, in client.test.test.insert({}) @@ -55,8 +55,8 @@ deprecated PyMongo features. .. _semantic versioning: http://semver.org/ .. _DeprecationWarning: - https://docs.python.org/2/library/exceptions.html#exceptions.DeprecationWarning + https://docs.python.org/3/library/exceptions.html#DeprecationWarning -.. _the warnings module: https://docs.python.org/2/library/warnings.html +.. _the warnings module: https://docs.python.org/3/library/warnings.html -.. _the -W command line option: https://docs.python.org/2/using/cmdline.html#cmdoption-W +.. _the -W command line option: https://docs.python.org/3/using/cmdline.html#cmdoption-W diff --git a/doc/conf.py b/doc/conf.py index ad4c42b9e4..1ea51add88 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -1,34 +1,49 @@ -# -*- coding: utf-8 -*- # # PyMongo documentation build configuration file # # This file is execfile()d with the current directory set to its containing dir. +from __future__ import annotations -import sys, os -sys.path[0:0] = [os.path.abspath('..')] +import sys +from pathlib import Path -import pymongo +sys.path[0:0] = [Path("..").resolve()] + +import pymongo # noqa: E402 # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage', - 'sphinx.ext.todo', 'doc.mongo_extensions', - 'sphinx.ext.intersphinx'] +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.doctest", + "sphinx.ext.coverage", + "sphinx.ext.todo", + "sphinx.ext.intersphinx", +] + + +# Add optional extensions +try: + import sphinxcontrib.shellcheck # noqa: F401 + + extensions += ["sphinxcontrib.shellcheck"] +except ImportError: + pass # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix of source filenames. -source_suffix = '.rst' +source_suffix = ".rst" # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = u'PyMongo' -copyright = u'MongoDB, Inc. 2008-present. MongoDB, Mongo, and the leaf logo are registered trademarks of MongoDB, Inc' +project = "PyMongo" +copyright = "MongoDB, Inc. 2008-present. MongoDB, Mongo, and the leaf logo are registered trademarks of MongoDB, Inc" html_show_sphinx = False # The version info for the project you're documenting, acts as replacement for @@ -45,31 +60,43 @@ # List of directories, relative to source directory, that shouldn't be searched # for source files. -exclude_trees = ['_build'] +exclude_trees = ["_build"] # The reST default role (used for this markup: `text`) to use for all documents. -#default_role = None +# default_role = None # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. -#show_authors = False +# show_authors = False # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] +# modindex_common_prefix = [] + +# Options for link checking +# The anchors on the rendered markdown page are created after the fact, +# so those link results in a 404. +# wiki.centos.org has been flakey. +# sourceforge.net is giving a 403 error, but is still accessible from the browser. +linkcheck_ignore = [ + "https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-monitoring.rst#requesting-an-immediate-check", + "https://github.com/mongodb/libmongocrypt/blob/master/bindings/python/README.rst#installing-from-source", + r"https://wiki.centos.org/[\w/]*", + r"http://sourceforge.net/", +] # -- Options for extensions ---------------------------------------------------- -autoclass_content = 'init' +autoclass_content = "init" -doctest_path = [os.path.abspath('..')] +doctest_path = [Path("..").resolve()] -doctest_test_doctest_blocks = '' +doctest_test_doctest_blocks = "" doctest_global_setup = """ from pymongo.mongo_client import MongoClient @@ -80,94 +107,95 @@ # -- Options for HTML output --------------------------------------------------- -# Theme gratefully vendored from CPython source. -html_theme = "pydoctheme" -html_theme_path = ["."] -html_theme_options = { - 'collapsiblesidebar': True, - 'googletag': False -} +try: + import furo # noqa: F401 + + html_theme = "furo" +except ImportError: + # Theme gratefully vendored from CPython source. + html_theme = "pydoctheme" + html_theme_path = ["."] + html_theme_options = {"collapsiblesidebar": True, "googletag": False} -# Additional static files. -html_static_path = ['static'] + # Additional static files. + html_static_path = ["static"] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +# html_title = None # A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None +# html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. -#html_logo = None +# html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -#html_favicon = None +# html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -#html_static_path = ['_static'] +# html_static_path = ['_static'] # Custom sidebar templates, maps document names to template names. -#html_sidebars = {} +# html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. -#html_additional_pages = {} +# html_additional_pages = {} # If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True +# html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -#html_use_opensearch = '' +# html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = '' +# html_file_suffix = '' # Output file base name for HTML help builder. -htmlhelp_basename = 'PyMongo' + release.replace('.', '_') +htmlhelp_basename = "PyMongo" + release.replace(".", "_") # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). -#latex_paper_size = 'letter' +# latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). -#latex_font_size = '10pt' +# latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ - ('index', 'PyMongo.tex', u'PyMongo Documentation', - u'Michael Dirolf', 'manual'), + ("index", "PyMongo.tex", "PyMongo Documentation", "Michael Dirolf", "manual"), ] # The name of an image file (relative to this directory) to place at the top of # the title page. -#latex_logo = None +# latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. -#latex_use_parts = False +# latex_use_parts = False # Additional stuff for the LaTeX preamble. -#latex_preamble = '' +# latex_preamble = '' # Documents to append as an appendix to all manuals. -#latex_appendices = [] +# latex_appendices = [] # If false, no module index is generated. -#latex_use_modindex = True +# latex_use_modindex = True intersphinx_mapping = { - 'gevent': ('http://www.gevent.org/', None), - 'py': ('https://docs.python.org/3/', None), + "gevent": ("https://www.gevent.org/", None), + "py": ("https://docs.python.org/3/", None), } diff --git a/doc/contributors.rst b/doc/contributors.rst index 9773b38224..2a4ca1ea47 100644 --- a/doc/contributors.rst +++ b/doc/contributors.rst @@ -87,3 +87,14 @@ The following is a list of people who have contributed to - Felipe Rodrigues(fbidu) - Terence Honles (terencehonles) - Paul Fisher (thetorpedodog) +- Julius Park (juliusgeo) +- Khanh Nguyen (KN99HN) +- Henri Froese (henrifroese) +- Ishmum Jawad Khan (ishmum123) +- Arie Bovenberg (ariebovenberg) +- Ben Warner (bcwarner) +- Jean-Christophe Fillion-Robin (jcfr) +- Sean Cheah (thalassemia) +- Dainis Gorbunovs (DainisGorbunovs) +- Iris Ho (sleepyStick) +- Stephan Hof (stephan-hof) diff --git a/doc/developer/periodic_executor.rst b/doc/developer/periodic_executor.rst index 6327cfd835..effe18efca 100644 --- a/doc/developer/periodic_executor.rst +++ b/doc/developer/periodic_executor.rst @@ -5,7 +5,7 @@ Periodic Executors PyMongo implements a :class:`~periodic_executor.PeriodicExecutor` for two purposes: as the background thread for :class:`~monitor.Monitor`, and to -regularly check if there are `OP_KILL_CURSORS` messages that must be sent to the server. +regularly check if there are ``OP_KILL_CURSORS`` messages that must be sent to the server. Killing Cursors --------------- @@ -17,7 +17,7 @@ the cursor before finishing iteration:: for doc in collection.find(): raise Exception() -We try to send an `OP_KILL_CURSORS` to the server to tell it to clean up the +We try to send an ``OP_KILL_CURSORS`` to the server to tell it to clean up the server-side cursor. But we must not take any locks directly from the cursor's destructor (see `PYTHON-799`_), so we cannot safely use the PyMongo data structures required to send a message. The solution is to add the cursor's id @@ -26,7 +26,7 @@ to an array on the :class:`~mongo_client.MongoClient` without taking any locks. Each client has a :class:`~periodic_executor.PeriodicExecutor` devoted to checking the array for cursor ids. Any it sees are the result of cursors that were freed while the server-side cursor was still open. The executor can safely -take the locks it needs in order to send the `OP_KILL_CURSORS` message. +take the locks it needs in order to send the ``OP_KILL_CURSORS`` message. .. _PYTHON-799: https://jira.mongodb.org/browse/PYTHON-799 @@ -103,10 +103,10 @@ the exponential backoff is restarted frequently. Overall, the condition variable is not waking a few times a second, but hundreds of times. (See `PYTHON-983`_.) Thus the current design of periodic executors is surprisingly simple: they -do a simple `time.sleep` for a half-second, check if it is time to wake or +do a simple ``time.sleep`` for a half-second, check if it is time to wake or terminate, and sleep again. -.. _Server Discovery And Monitoring Spec: https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#requesting-an-immediate-check +.. _Server Discovery And Monitoring Spec: https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-monitoring.rst#requesting-an-immediate-check .. _PYTHON-863: https://jira.mongodb.org/browse/PYTHON-863 diff --git a/doc/docs-requirements.txt b/doc/docs-requirements.txt new file mode 100644 index 0000000000..d8fd12b54d --- /dev/null +++ b/doc/docs-requirements.txt @@ -0,0 +1,5 @@ +sphinx>=5.3,<7 +sphinx_rtd_theme~=0.5 +readthedocs-sphinx-search~=0.1 +sphinxcontrib-shellcheck~=1.1 +furo==2022.12.7 diff --git a/doc/examples/aggregation.rst b/doc/examples/aggregation.rst index fd58479b35..22e19e9842 100644 --- a/doc/examples/aggregation.rst +++ b/doc/examples/aggregation.rst @@ -8,8 +8,9 @@ group method. .. testsetup:: from pymongo import MongoClient + client = MongoClient() - client.drop_database('aggregation_example') + client.drop_database("aggregation_example") Setup ----- @@ -20,10 +21,14 @@ aggregations on: >>> from pymongo import MongoClient >>> db = MongoClient().aggregation_example - >>> result = db.things.insert_many([{"x": 1, "tags": ["dog", "cat"]}, - ... {"x": 2, "tags": ["cat"]}, - ... {"x": 2, "tags": ["mouse", "cat", "dog"]}, - ... {"x": 3, "tags": []}]) + >>> result = db.things.insert_many( + ... [ + ... {"x": 1, "tags": ["dog", "cat"]}, + ... {"x": 2, "tags": ["cat"]}, + ... {"x": 2, "tags": ["mouse", "cat", "dog"]}, + ... {"x": 3, "tags": []}, + ... ] + ... ) >>> result.inserted_ids [ObjectId('...'), ObjectId('...'), ObjectId('...'), ObjectId('...')] @@ -54,19 +59,27 @@ eg "$sort": >>> pipeline = [ ... {"$unwind": "$tags"}, ... {"$group": {"_id": "$tags", "count": {"$sum": 1}}}, - ... {"$sort": SON([("count", -1), ("_id", -1)])} + ... {"$sort": SON([("count", -1), ("_id", -1)])}, ... ] >>> import pprint >>> pprint.pprint(list(db.things.aggregate(pipeline))) - [{u'_id': u'cat', u'count': 3}, - {u'_id': u'dog', u'count': 2}, - {u'_id': u'mouse', u'count': 1}] + [{'_id': 'cat', 'count': 3}, + {'_id': 'dog', 'count': 2}, + {'_id': 'mouse', 'count': 1}] + +To run an explain plan for this aggregation use +`PyMongoExplain `_, +a companion library for PyMongo. It allows you to explain any CRUD operation +by providing a few convenience classes:: + + >>> from pymongoexplain import ExplainableCollection + >>> ExplainableCollection(collection).aggregate(pipeline) + {'ok': 1.0, 'queryPlanner': [...]} -To run an explain plan for this aggregation use the -:meth:`~pymongo.database.Database.command` method:: +Or, use the :meth:`~pymongo.database.Database.command` method:: >>> db.command('aggregate', 'things', pipeline=pipeline, explain=True) - {u'ok': 1.0, u'stages': [...]} + {'ok': 1.0, 'stages': [...]} As well as simple aggregations the aggregation framework provides projection capabilities to reshape the returned data. Using projections and aggregation, @@ -74,106 +87,4 @@ you can add computed fields, create new virtual sub-objects, and extract sub-fields into the top-level of results. .. seealso:: The full documentation for MongoDB's `aggregation framework - `_ - -Map/Reduce ----------- - -Another option for aggregation is to use the map reduce framework. Here we -will define **map** and **reduce** functions to also count the number of -occurrences for each tag in the ``tags`` array, across the entire collection. - -Our **map** function just emits a single `(key, 1)` pair for each tag in -the array: - -.. doctest:: - - >>> from bson.code import Code - >>> mapper = Code(""" - ... function () { - ... this.tags.forEach(function(z) { - ... emit(z, 1); - ... }); - ... } - ... """) - -The **reduce** function sums over all of the emitted values for a given key: - -.. doctest:: - - >>> reducer = Code(""" - ... function (key, values) { - ... var total = 0; - ... for (var i = 0; i < values.length; i++) { - ... total += values[i]; - ... } - ... return total; - ... } - ... """) - -.. note:: We can't just return ``values.length`` as the **reduce** function - might be called iteratively on the results of other reduce steps. - -Finally, we call :meth:`~pymongo.collection.Collection.map_reduce` and -iterate over the result collection: - -.. doctest:: - - >>> result = db.things.map_reduce(mapper, reducer, "myresults") - >>> for doc in result.find(): - ... pprint.pprint(doc) - ... - {u'_id': u'cat', u'value': 3.0} - {u'_id': u'dog', u'value': 2.0} - {u'_id': u'mouse', u'value': 1.0} - -Advanced Map/Reduce -------------------- - -PyMongo's API supports all of the features of MongoDB's map/reduce engine. -One interesting feature is the ability to get more detailed results when -desired, by passing `full_response=True` to -:meth:`~pymongo.collection.Collection.map_reduce`. This returns the full -response to the map/reduce command, rather than just the result collection: - -.. doctest:: - - >>> pprint.pprint( - ... db.things.map_reduce(mapper, reducer, "myresults", full_response=True)) - {...u'counts': {u'emit': 6, u'input': 4, u'output': 3, u'reduce': 2}, - u'ok': ..., - u'result': u'...', - u'timeMillis': ...} - -All of the optional map/reduce parameters are also supported, simply pass them -as keyword arguments. In this example we use the `query` parameter to limit the -documents that will be mapped over: - -.. doctest:: - - >>> results = db.things.map_reduce( - ... mapper, reducer, "myresults", query={"x": {"$lt": 2}}) - >>> for doc in results.find(): - ... pprint.pprint(doc) - ... - {u'_id': u'cat', u'value': 1.0} - {u'_id': u'dog', u'value': 1.0} - -You can use :class:`~bson.son.SON` or :class:`collections.OrderedDict` to -specify a different database to store the result collection: - -.. doctest:: - - >>> from bson.son import SON - >>> pprint.pprint( - ... db.things.map_reduce( - ... mapper, - ... reducer, - ... out=SON([("replace", "results"), ("db", "outdb")]), - ... full_response=True)) - {...u'counts': {u'emit': 6, u'input': 4, u'output': 3, u'reduce': 2}, - u'ok': ..., - u'result': {u'collection': ..., u'db': ...}, - u'timeMillis': ...} - -.. seealso:: The full list of options for MongoDB's `map reduce engine `_ + `_ diff --git a/doc/examples/authentication.rst b/doc/examples/authentication.rst index dabf06957a..b3ba89026e 100644 --- a/doc/examples/authentication.rst +++ b/doc/examples/authentication.rst @@ -5,12 +5,13 @@ MongoDB supports several different authentication mechanisms. These examples cover all authentication methods currently supported by PyMongo, documenting Python module and MongoDB version dependencies. +.. _percent escaped: + Percent-Escaping Username and Password -------------------------------------- Username and password must be percent-escaped with -:meth:`urllib.parse.quote_plus` in Python 3, or :meth:`urllib.quote_plus` in -Python 2, to be used in a MongoDB URI. For example, in Python 3:: +:py:func:`urllib.parse.quote_plus`, to be used in a MongoDB URI. For example:: >>> from pymongo import MongoClient >>> import urllib.parse @@ -96,9 +97,8 @@ the "MongoDB Challenge-Response" protocol:: Default Authentication Mechanism -------------------------------- -If no mechanism is specified, PyMongo automatically uses MONGODB-CR when -connected to a pre-3.0 version of MongoDB, SCRAM-SHA-1 when connected to -MongoDB 3.0 through 3.6, and negotiates the mechanism to use (SCRAM-SHA-1 +If no mechanism is specified, PyMongo automatically SCRAM-SHA-1 when connected +to MongoDB 3.6 and negotiates the mechanism to use (SCRAM-SHA-1 or SCRAM-SHA-256) when connected to MongoDB 4.0+. Default Database and "authSource" @@ -124,36 +124,27 @@ MONGODB-X509 ------------ .. versionadded:: 2.6 -The MONGODB-X509 mechanism authenticates a username derived from the -distinguished subject name of the X.509 certificate presented by the driver -during SSL negotiation. This authentication method requires the use of SSL -connections with certificate validation and is available in MongoDB 2.6 -and newer:: +The MONGODB-X509 mechanism authenticates via the X.509 certificate presented +by the driver during TLS/SSL negotiation. This authentication method requires +the use of TLS/SSL connections with certificate validation:: - >>> import ssl >>> from pymongo import MongoClient >>> client = MongoClient('example.com', - ... username="" ... authMechanism="MONGODB-X509", - ... ssl=True, - ... ssl_certfile='/path/to/client.pem', - ... ssl_cert_reqs=ssl.CERT_REQUIRED, - ... ssl_ca_certs='/path/to/ca.pem') + ... tls=True, + ... tlsCertificateKeyFile='/path/to/client.pem', + ... tlsCAFile='/path/to/ca.pem') MONGODB-X509 authenticates against the $external virtual database, so you do not have to specify a database in the URI:: - >>> uri = "mongodb://@example.com/?authMechanism=MONGODB-X509" + >>> uri = "mongodb://example.com/?authMechanism=MONGODB-X509" >>> client = MongoClient(uri, - ... ssl=True, - ... ssl_certfile='/path/to/client.pem', - ... ssl_cert_reqs=ssl.CERT_REQUIRED, - ... ssl_ca_certs='/path/to/ca.pem') + ... tls=True, + ... tlsCertificateKeyFile='/path/to/client.pem', + ... tlsCAFile='/path/to/ca.pem') >>> -.. versionchanged:: 3.4 - When connected to MongoDB >= 3.4 the username is no longer required. - .. _gssapi: GSSAPI (Kerberos) @@ -189,7 +180,7 @@ URI:: >>> client = MongoClient(uri) >>> -The default service name used by MongoDB and PyMongo is `mongodb`. You can +The default service name used by MongoDB and PyMongo is ``mongodb``. You can specify a custom service name with the ``authMechanismProperties`` option:: >>> from pymongo import MongoClient @@ -240,15 +231,156 @@ These examples use the $external virtual database for LDAP support:: >>> SASL PLAIN is a clear-text authentication mechanism. We **strongly** recommend -that you connect to MongoDB using SSL with certificate validation when using -the SASL PLAIN mechanism:: +that you connect to MongoDB using TLS/SSL with certificate validation when +using the SASL PLAIN mechanism:: - >>> import ssl >>> from pymongo import MongoClient >>> uri = "mongodb://user:password@example.com/?authMechanism=PLAIN" >>> client = MongoClient(uri, - ... ssl=True, - ... ssl_certfile='/path/to/client.pem', - ... ssl_cert_reqs=ssl.CERT_REQUIRED, - ... ssl_ca_certs='/path/to/ca.pem') + ... tls=True, + ... tlsCertificateKeyFile='/path/to/client.pem', + ... tlsCAFile='/path/to/ca.pem') >>> + +.. _MONGODB-AWS: + +MONGODB-AWS +----------- +.. versionadded:: 3.11 + +The MONGODB-AWS authentication mechanism is available in MongoDB 4.4+ and +requires extra pymongo dependencies. To use it, install pymongo with the +``aws`` extra:: + + $ python -m pip install 'pymongo[aws]' + +The MONGODB-AWS mechanism authenticates using AWS IAM credentials (an access +key ID and a secret access key), `temporary AWS IAM credentials`_ obtained +from an `AWS Security Token Service (STS)`_ `Assume Role`_ request, +AWS Lambda `environment variables`_, or temporary AWS IAM credentials assigned +to an `EC2 instance`_ or ECS task. The use of temporary credentials, in +addition to an access key ID and a secret access key, also requires a +security (or session) token. + +Credentials can be configured through the MongoDB URI, environment variables, +or the local EC2 or ECS endpoint. The order in which the client searches for +`credentials`_ is the same as the one used by the AWS ``boto3`` library +when using ``pymongo_auth_aws>=1.1.0``. + +Because we are now using ``boto3`` to handle credentials, the order and +locations of credentials are slightly different from before. Particularly, +if you have a shared AWS credentials or config file, +then those credentials will be used by default if AWS auth environment +variables are not set. To override this behavior, set +``AWS_SHARED_CREDENTIALS_FILE=""`` in your shell or add +``os.environ["AWS_SHARED_CREDENTIALS_FILE"] = ""`` to your script or +application. Alternatively, you can create an AWS profile specifically for +your MongoDB credentials and set ``AWS_PROFILE`` to that profile name. + +MONGODB-AWS authenticates against the "$external" virtual database, so none of +the URIs in this section need to include the ``authSource`` URI option. + +.. _credentials: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html + +AWS IAM credentials +~~~~~~~~~~~~~~~~~~~ + +Applications can authenticate using AWS IAM credentials by providing a valid +access key id and secret access key pair as the username and password, +respectively, in the MongoDB URI. A sample URI would be:: + + >>> from pymongo import MongoClient + >>> uri = "mongodb+srv://:@example.mongodb.net/?authMechanism=MONGODB-AWS" + >>> client = MongoClient(uri) + +.. note:: The access_key_id and secret_access_key passed into the URI MUST + be `percent escaped`_. + +AssumeRole +~~~~~~~~~~ + +Applications can authenticate using temporary credentials returned from an +assume role request. These temporary credentials consist of an access key +ID, a secret access key, and a security token passed into the URI. +A sample URI would be:: + + >>> from pymongo import MongoClient + >>> uri = "mongodb+srv://:@example.mongodb.net/?authMechanism=MONGODB-AWS&authMechanismProperties=AWS_SESSION_TOKEN:" + >>> client = MongoClient(uri) + +.. note:: The access_key_id, secret_access_key, and session_token passed into + the URI MUST be `percent escaped`_. + + +AWS Lambda (Environment Variables) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +When the username and password are not provided and the MONGODB-AWS mechanism +is set, the client will fallback to using the `environment variables`_ +``AWS_ACCESS_KEY_ID``, ``AWS_SECRET_ACCESS_KEY``, and ``AWS_SESSION_TOKEN`` +for the access key ID, secret access key, and session token, respectively:: + + $ export AWS_ACCESS_KEY_ID= + $ export AWS_SECRET_ACCESS_KEY= + $ export AWS_SESSION_TOKEN= + $ python + >>> from pymongo import MongoClient + >>> uri = "mongodb+srv://example.mongodb.net/?authMechanism=MONGODB-AWS" + >>> client = MongoClient(uri) + +.. note:: No username, password, or session token is passed into the URI. + PyMongo will use credentials set via the environment variables. + These environment variables MUST NOT be `percent escaped`_. + + +.. _EKS Clusters: + +EKS Clusters +~~~~~~~~~~~~ + +Applications using the `Authenticating users for your cluster from an OpenID Connect identity provider `_ capability on EKS can now +use the provided credentials, by giving the associated IAM User +`sts:AssumeRoleWithWebIdentity `_ +permission. + +When the username and password are not provided, the MONGODB-AWS mechanism +is set, and ``AWS_WEB_IDENTITY_TOKEN_FILE``, ``AWS_ROLE_ARN``, and +optional ``AWS_ROLE_SESSION_NAME`` are available, the driver will use +an ``AssumeRoleWithWebIdentity`` call to retrieve temporary credentials. +The application must be using ``pymongo_auth_aws`` >= 1.1.0 for EKS support. + +ECS Container +~~~~~~~~~~~~~ + +Applications can authenticate from an ECS container via temporary +credentials assigned to the machine. A sample URI on an ECS container +would be:: + + >>> from pymongo import MongoClient + >>> uri = "mongodb+srv://example.mongodb.com/?authMechanism=MONGODB-AWS" + >>> client = MongoClient(uri) + +.. note:: No username, password, or session token is passed into the URI. + PyMongo will query the ECS container endpoint to obtain these + credentials. + +EC2 Instance +~~~~~~~~~~~~ + +Applications can authenticate from an EC2 instance via temporary +credentials assigned to the machine. A sample URI on an EC2 machine +would be:: + + >>> from pymongo import MongoClient + >>> uri = "mongodb+srv://example.mongodb.com/?authMechanism=MONGODB-AWS" + >>> client = MongoClient(uri) + +.. note:: No username, password, or session token is passed into the URI. + PyMongo will query the EC2 instance endpoint to obtain these + credentials. + +.. _temporary AWS IAM credentials: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html +.. _AWS Security Token Service (STS): https://docs.aws.amazon.com/STS/latest/APIReference/Welcome.html +.. _Assume Role: https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html +.. _EC2 instance: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2.html +.. _environment variables: https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html#configuration-envvars-runtime diff --git a/doc/examples/bulk.rst b/doc/examples/bulk.rst index 019817fbd0..3ed8e09645 100644 --- a/doc/examples/bulk.rst +++ b/doc/examples/bulk.rst @@ -4,8 +4,9 @@ Bulk Write Operations .. testsetup:: from pymongo import MongoClient + client = MongoClient() - client.drop_database('bulk_example') + client.drop_database("bulk_example") This tutorial explains how to take advantage of PyMongo's bulk write operation features. Executing write operations in batches @@ -27,7 +28,7 @@ bulk insert operations. >>> import pymongo >>> db = pymongo.MongoClient().bulk_example - >>> db.test.insert_many([{'i': i} for i in range(10000)]).inserted_ids + >>> db.test.insert_many([{"i": i} for i in range(10000)]).inserted_ids [...] >>> db.test.count_documents({}) 10000 @@ -56,32 +57,30 @@ of operations performed. >>> from pprint import pprint >>> from pymongo import InsertOne, DeleteMany, ReplaceOne, UpdateOne - >>> result = db.test.bulk_write([ - ... DeleteMany({}), # Remove all documents from the previous example. - ... InsertOne({'_id': 1}), - ... InsertOne({'_id': 2}), - ... InsertOne({'_id': 3}), - ... UpdateOne({'_id': 1}, {'$set': {'foo': 'bar'}}), - ... UpdateOne({'_id': 4}, {'$inc': {'j': 1}}, upsert=True), - ... ReplaceOne({'j': 1}, {'j': 2})]) + >>> result = db.test.bulk_write( + ... [ + ... DeleteMany({}), # Remove all documents from the previous example. + ... InsertOne({"_id": 1}), + ... InsertOne({"_id": 2}), + ... InsertOne({"_id": 3}), + ... UpdateOne({"_id": 1}, {"$set": {"foo": "bar"}}), + ... UpdateOne({"_id": 4}, {"$inc": {"j": 1}}, upsert=True), + ... ReplaceOne({"j": 1}, {"j": 2}), + ... ] + ... ) >>> pprint(result.bulk_api_result) {'nInserted': 3, 'nMatched': 2, 'nModified': 2, 'nRemoved': 10000, 'nUpserted': 1, - 'upserted': [{u'_id': 4, u'index': 5}], + 'upserted': [{'_id': 4, 'index': 5}], 'writeConcernErrors': [], 'writeErrors': []} -.. warning:: ``nModified`` is only reported by MongoDB 2.6 and later. When - connected to an earlier server version, or in certain mixed version sharding - configurations, PyMongo omits this field from the results of a bulk - write operation. - The first write failure that occurs (e.g. duplicate key error) aborts the remaining operations, and PyMongo raises -:class:`~pymongo.errors.BulkWriteError`. The :attr:`details` attibute of +:class:`~pymongo.errors.BulkWriteError`. The :attr:`details` attribute of the exception instance provides the execution results up until the failure occurred and details about the failure - including the operation that caused the failure. @@ -92,9 +91,10 @@ the failure. >>> from pymongo import InsertOne, DeleteOne, ReplaceOne >>> from pymongo.errors import BulkWriteError >>> requests = [ - ... ReplaceOne({'j': 2}, {'i': 5}), - ... InsertOne({'_id': 4}), # Violates the unique key constraint on _id. - ... DeleteOne({'i': 5})] + ... ReplaceOne({"j": 2}, {"i": 5}), + ... InsertOne({"_id": 4}), # Violates the unique key constraint on _id. + ... DeleteOne({"i": 5}), + ... ] >>> try: ... db.test.bulk_write(requests) ... except BulkWriteError as bwe: @@ -107,10 +107,10 @@ the failure. 'nUpserted': 0, 'upserted': [], 'writeConcernErrors': [], - 'writeErrors': [{u'code': 11000, - u'errmsg': u'...E11000...duplicate key error...', - u'index': 1,... - u'op': {'_id': 4}}]} + 'writeErrors': [{'code': 11000, + 'errmsg': '...E11000...duplicate key error...', + 'index': 1,... + 'op': {'_id': 4}}]} .. _unordered_bulk: @@ -129,10 +129,11 @@ and fourth operations succeed. :options: +NORMALIZE_WHITESPACE >>> requests = [ - ... InsertOne({'_id': 1}), - ... DeleteOne({'_id': 2}), - ... InsertOne({'_id': 3}), - ... ReplaceOne({'_id': 4}, {'i': 1})] + ... InsertOne({"_id": 1}), + ... DeleteOne({"_id": 2}), + ... InsertOne({"_id": 3}), + ... ReplaceOne({"_id": 4}, {"i": 1}), + ... ] >>> try: ... db.test.bulk_write(requests, ordered=False) ... except BulkWriteError as bwe: @@ -145,14 +146,14 @@ and fourth operations succeed. 'nUpserted': 0, 'upserted': [], 'writeConcernErrors': [], - 'writeErrors': [{u'code': 11000, - u'errmsg': u'...E11000...duplicate key error...', - u'index': 0,... - u'op': {'_id': 1}}, - {u'code': 11000, - u'errmsg': u'...E11000...duplicate key error...', - u'index': 2,... - u'op': {'_id': 3}}]} + 'writeErrors': [{'code': 11000, + 'errmsg': '...E11000...duplicate key error...', + 'index': 0,... + 'op': {'_id': 1}}, + {'code': 11000, + 'errmsg': '...', + 'index': 2,... + 'op': {'_id': 3}}]} Write Concern ............. @@ -177,7 +178,7 @@ after all operations are attempted, regardless of execution order. 'nRemoved': 0, 'nUpserted': 0, 'upserted': [], - 'writeConcernErrors': [{u'code': 64... - u'errInfo': {u'wtimeout': True}, - u'errmsg': u'waiting for replication timed out'}], + 'writeConcernErrors': [{'code': 64... + 'errInfo': {'wtimeout': True}, + 'errmsg': 'waiting for replication timed out'}], 'writeErrors': []} diff --git a/doc/examples/collations.rst b/doc/examples/collations.rst index 1a5106039c..45e647d816 100644 --- a/doc/examples/collations.rst +++ b/doc/examples/collations.rst @@ -42,7 +42,7 @@ or with plain Python dictionaries. The structure is the same:: backwards=) The only required parameter is ``locale``, which the server parses as -an `ICU format locale ID `_. +an `ICU format locale ID `_. For example, set ``locale`` to ``en_US`` to represent US English or ``fr_CA`` to represent Canadian French. diff --git a/doc/examples/copydb.rst b/doc/examples/copydb.rst index 5cf5c66ded..76d0c97a36 100644 --- a/doc/examples/copydb.rst +++ b/doc/examples/copydb.rst @@ -1,8 +1,37 @@ Copying a Database ================== -To copy a database within a single mongod process, or between mongod -servers, simply connect to the target mongod and use the +MongoDB >= 4.2 +-------------- + +Starting in MongoDB version 4.2, the server removes the deprecated ``copydb`` command. +As an alternative, users can use ``mongodump`` and ``mongorestore`` (with the ``mongorestore`` +options ``--nsFrom`` and ``--nsTo``). + +For example, to copy the ``test`` database from a local instance running on the +default port 27017 to the ``examples`` database on the same instance, you can: + +#. Use ``mongodump`` to dump the test database to an archive ``mongodump-test-db``:: + + mongodump --archive="mongodump-test-db" --db=test + +#. Use ``mongorestore`` with ``--nsFrom`` and ``--nsTo`` to restore (with database name change) + from the archive:: + + mongorestore --archive="mongodump-test-db" --nsFrom='test.*' --nsTo='examples.*' + +Include additional options as necessary, such as to specify the uri or host, username, +password and authentication database. + +For more info about using ``mongodump`` and ``mongorestore`` see the `Copy a Database`_ example +in the official ``mongodump`` documentation. + +MongoDB <= 4.0 +-------------- + +When using MongoDB <= 4.0, it is possible to use the deprecated ``copydb`` command +to copy a database. To copy a database within a single ``mongod`` process, or +between ``mongod`` servers, connect to the target ``mongod`` and use the :meth:`~pymongo.database.Database.command` method:: >>> from pymongo import MongoClient @@ -38,4 +67,7 @@ Versions of PyMongo before 3.0 included a ``copy_database`` helper method, but it has been removed. .. _copyDatabase function in the mongo shell: - http://docs.mongodb.org/manual/reference/method/db.copyDatabase/ + http://mongodb.com/docs/manual/reference/method/db.copyDatabase/ + +.. _Copy a Database: + https://www.mongodb.com/docs/database-tools/mongodump/#std-label-mongodump-example-copy-clone-database diff --git a/doc/examples/custom_type.rst b/doc/examples/custom_type.rst index 591a250b6c..acf706deba 100644 --- a/doc/examples/custom_type.rst +++ b/doc/examples/custom_type.rst @@ -19,7 +19,7 @@ We'll start by getting a clean database to use for the example: >>> from pymongo import MongoClient >>> client = MongoClient() - >>> client.drop_database('custom_type_example') + >>> client.drop_database("custom_type_example") >>> db = client.custom_type_example @@ -36,7 +36,7 @@ to save an instance of ``Decimal`` with PyMongo, results in an >>> from decimal import Decimal >>> num = Decimal("45.321") - >>> db.test.insert_one({'num': num}) + >>> db.test.insert_one({"num": num}) Traceback (most recent call last): ... bson.errors.InvalidDocument: cannot encode object: Decimal('45.321'), of type: @@ -78,8 +78,8 @@ interested in both encoding and decoding our custom type, we use the >>> from bson.decimal128 import Decimal128 >>> from bson.codec_options import TypeCodec >>> class DecimalCodec(TypeCodec): - ... python_type = Decimal # the Python type acted upon by this type codec - ... bson_type = Decimal128 # the BSON type acted upon by this type codec + ... python_type = Decimal # the Python type acted upon by this type codec + ... bson_type = Decimal128 # the BSON type acted upon by this type codec ... def transform_python(self, value): ... """Function that transforms a custom type value into a type ... that BSON can encode.""" @@ -88,6 +88,7 @@ interested in both encoding and decoding our custom type, we use the ... """Function that transforms a vanilla BSON type value into our ... custom type.""" ... return value.to_decimal() + ... >>> decimal_codec = DecimalCodec() @@ -125,7 +126,7 @@ with our ``type_registry`` and use it to get a >>> from bson.codec_options import CodecOptions >>> codec_options = CodecOptions(type_registry=type_registry) - >>> collection = db.get_collection('test', codec_options=codec_options) + >>> collection = db.get_collection("test", codec_options=codec_options) Now, we can seamlessly encode and decode instances of @@ -133,12 +134,12 @@ Now, we can seamlessly encode and decode instances of .. doctest:: - >>> collection.insert_one({'num': Decimal("45.321")}) - + >>> collection.insert_one({"num": Decimal("45.321")}) + InsertOneResult(ObjectId('...'), acknowledged=True) >>> mydoc = collection.find_one() >>> import pprint >>> pprint.pprint(mydoc) - {u'_id': ObjectId('...'), u'num': Decimal('45.321')} + {'_id': ObjectId('...'), 'num': Decimal('45.321')} We can see what's actually being saved to the database by creating a fresh @@ -147,9 +148,9 @@ MongoDB: .. doctest:: - >>> vanilla_collection = db.get_collection('test') + >>> vanilla_collection = db.get_collection("test") >>> pprint.pprint(vanilla_collection.find_one()) - {u'_id': ObjectId('...'), u'num': Decimal128('45.321')} + {'_id': ObjectId('...'), 'num': Decimal128('45.321')} Encoding Subtypes @@ -170,13 +171,14 @@ an integer: ... def my_method(self): ... """Method implementing some custom logic.""" ... return int(self) + ... If we try to save an instance of this type without first registering a type codec for it, we get an error: .. doctest:: - >>> collection.insert_one({'num': DecimalInt("45.321")}) + >>> collection.insert_one({"num": DecimalInt("45.321")}) Traceback (most recent call last): ... bson.errors.InvalidDocument: cannot encode object: Decimal('45.321'), of type: @@ -192,6 +194,7 @@ This is trivial to do since the same transformation as the one used for ... def python_type(self): ... """The Python type acted upon by this type codec.""" ... return DecimalInt + ... >>> decimalint_codec = DecimalIntCodec() @@ -211,13 +214,13 @@ object, we can seamlessly encode instances of ``DecimalInt``: >>> type_registry = TypeRegistry([decimal_codec, decimalint_codec]) >>> codec_options = CodecOptions(type_registry=type_registry) - >>> collection = db.get_collection('test', codec_options=codec_options) + >>> collection = db.get_collection("test", codec_options=codec_options) >>> collection.drop() - >>> collection.insert_one({'num': DecimalInt("45.321")}) - + >>> collection.insert_one({"num": DecimalInt("45.321")}) + InsertOneResult(ObjectId('...'), acknowledged=True) >>> mydoc = collection.find_one() >>> pprint.pprint(mydoc) - {u'_id': ObjectId('...'), u'num': Decimal('45.321')} + {'_id': ObjectId('...'), 'num': Decimal('45.321')} Note that the ``transform_bson`` method of the base codec class results in these values being decoded as ``Decimal`` (and not ``DecimalInt``). @@ -236,26 +239,26 @@ writing a ``TypeDecoder`` that modifies how this datatype is decoded. On Python 3.x, :class:`~bson.binary.Binary` data (``subtype = 0``) is decoded as a ``bytes`` instance: -.. code-block:: python +.. code-block:: pycon >>> # On Python 3.x. >>> from bson.binary import Binary - >>> newcoll = db.get_collection('new') - >>> newcoll.insert_one({'_id': 1, 'data': Binary(b"123", subtype=0)}) + >>> newcoll = db.get_collection("new") + >>> newcoll.insert_one({"_id": 1, "data": Binary(b"123", subtype=0)}) >>> doc = newcoll.find_one() - >>> type(doc['data']) + >>> type(doc["data"]) bytes On Python 2.7.x, the same data is decoded as a :class:`~bson.binary.Binary` instance: -.. code-block:: python +.. code-block:: pycon >>> # On Python 2.7.x - >>> newcoll = db.get_collection('new') + >>> newcoll = db.get_collection("new") >>> doc = newcoll.find_one() - >>> type(doc['data']) + >>> type(doc["data"]) bson.binary.Binary @@ -291,6 +294,7 @@ BSON-encodable value. The following fallback encoder encodes python's ... if isinstance(value, Decimal): ... return Decimal128(value) ... return value + ... After declaring the callback, we must create a type registry and codec options with this fallback encoder before it can be used for initializing a collection: @@ -299,18 +303,18 @@ with this fallback encoder before it can be used for initializing a collection: >>> type_registry = TypeRegistry(fallback_encoder=fallback_encoder) >>> codec_options = CodecOptions(type_registry=type_registry) - >>> collection = db.get_collection('test', codec_options=codec_options) + >>> collection = db.get_collection("test", codec_options=codec_options) >>> collection.drop() We can now seamlessly encode instances of :py:class:`~decimal.Decimal`: .. doctest:: - >>> collection.insert_one({'num': Decimal("45.321")}) - + >>> collection.insert_one({"num": Decimal("45.321")}) + InsertOneResult(ObjectId('...'), acknowledged=True) >>> mydoc = collection.find_one() >>> pprint.pprint(mydoc) - {u'_id': ObjectId('...'), u'num': Decimal128('45.321')} + {'_id': ObjectId('...'), 'num': Decimal128('45.321')} .. note:: @@ -343,12 +347,15 @@ We start by defining some arbitrary custom types: class MyStringType(object): def __init__(self, value): self.__value = value + def __repr__(self): return "MyStringType('%s')" % (self.__value,) + class MyNumberType(object): def __init__(self, value): self.__value = value + def __repr__(self): return "MyNumberType(%s)" % (self.__value,) @@ -362,11 +369,15 @@ back into Python objects: import pickle from bson.binary import Binary, USER_DEFINED_SUBTYPE + + def fallback_pickle_encoder(value): return Binary(pickle.dumps(value), USER_DEFINED_SUBTYPE) + class PickledBinaryDecoder(TypeDecoder): bson_type = Binary + def transform_bson(self, value): if value.subtype == USER_DEFINED_SUBTYPE: return pickle.loads(value) @@ -384,19 +395,23 @@ Finally, we create a ``CodecOptions`` instance: .. code-block:: python - codec_options = CodecOptions(type_registry=TypeRegistry( - [PickledBinaryDecoder()], fallback_encoder=fallback_pickle_encoder)) + codec_options = CodecOptions( + type_registry=TypeRegistry( + [PickledBinaryDecoder()], fallback_encoder=fallback_pickle_encoder + ) + ) We can now round trip our custom objects to MongoDB: .. code-block:: python - collection = db.get_collection('test_fe', codec_options=codec_options) - collection.insert_one({'_id': 1, 'str': MyStringType("hello world"), - 'num': MyNumberType(2)}) + collection = db.get_collection("test_fe", codec_options=codec_options) + collection.insert_one( + {"_id": 1, "str": MyStringType("hello world"), "num": MyNumberType(2)} + ) mydoc = collection.find_one() - assert isinstance(mydoc['str'], MyStringType) - assert isinstance(mydoc['num'], MyNumberType) + assert isinstance(mydoc["str"], MyStringType) + assert isinstance(mydoc["num"], MyNumberType) Limitations diff --git a/doc/examples/datetimes.rst b/doc/examples/datetimes.rst index d712ce6138..5571880e94 100644 --- a/doc/examples/datetimes.rst +++ b/doc/examples/datetimes.rst @@ -6,8 +6,9 @@ Datetimes and Timezones import datetime from pymongo import MongoClient from bson.codec_options import CodecOptions + client = MongoClient() - client.drop_database('dt_example') + client.drop_database("dt_example") db = client.dt_example These examples show how to handle Python :class:`datetime.datetime` objects @@ -25,39 +26,38 @@ time into MongoDB: .. doctest:: >>> result = db.objects.insert_one( - ... {"last_modified": datetime.datetime.utcnow()}) + ... {"last_modified": datetime.datetime.now(tz=datetime.timezone.utc)} + ... ) -Always use :meth:`datetime.datetime.utcnow`, which returns the current time in -UTC, instead of :meth:`datetime.datetime.now`, which returns the current local +Always use :meth:`datetime.datetime.now(tz=datetime.timezone.utc)`, which explicitly returns the current time in +UTC, instead of :meth:`datetime.datetime.now`, with no arguments, which returns the current local time. Avoid doing this: .. doctest:: - >>> result = db.objects.insert_one( - ... {"last_modified": datetime.datetime.now()}) + >>> result = db.objects.insert_one({"last_modified": datetime.datetime.now()}) -The value for `last_modified` is very different between these two examples, even +The value for ``last_modified`` is very different between these two examples, even though both documents were stored at around the same local time. This will be confusing to the application that reads them: .. doctest:: - >>> [doc['last_modified'] for doc in db.objects.find()] # doctest: +SKIP + >>> [doc["last_modified"] for doc in db.objects.find()] # doctest: +SKIP [datetime.datetime(2015, 7, 8, 18, 17, 28, 324000), datetime.datetime(2015, 7, 8, 11, 17, 42, 911000)] -:class:`bson.codec_options.CodecOptions` has a `tz_aware` option that enables +:class:`bson.codec_options.CodecOptions` has a ``tz_aware`` option that enables "aware" :class:`datetime.datetime` objects, i.e., datetimes that know what timezone they're in. By default, PyMongo retrieves naive datetimes: .. doctest:: - >>> result = db.tzdemo.insert_one( - ... {'date': datetime.datetime(2002, 10, 27, 6, 0, 0)}) - >>> db.tzdemo.find_one()['date'] + >>> result = db.tzdemo.insert_one({"date": datetime.datetime(2002, 10, 27, 6, 0, 0)}) + >>> db.tzdemo.find_one()["date"] datetime.datetime(2002, 10, 27, 6, 0) >>> options = CodecOptions(tz_aware=True) - >>> db.get_collection('tzdemo', codec_options=options).find_one()['date'] # doctest: +SKIP + >>> db.get_collection("tzdemo", codec_options=options).find_one()["date"] # doctest: +SKIP datetime.datetime(2002, 10, 27, 6, 0, tzinfo=) @@ -65,17 +65,16 @@ Saving Datetimes with Timezones ------------------------------- When storing :class:`datetime.datetime` objects that specify a timezone -(i.e. they have a `tzinfo` property that isn't ``None``), PyMongo will convert +(i.e. they have a ``tzinfo`` property that isn't ``None``), PyMongo will convert those datetimes to UTC automatically: .. doctest:: >>> import pytz - >>> pacific = pytz.timezone('US/Pacific') - >>> aware_datetime = pacific.localize( - ... datetime.datetime(2002, 10, 27, 6, 0, 0)) + >>> pacific = pytz.timezone("US/Pacific") + >>> aware_datetime = pacific.localize(datetime.datetime(2002, 10, 27, 6, 0, 0)) >>> result = db.times.insert_one({"date": aware_datetime}) - >>> db.times.find_one()['date'] + >>> db.times.find_one()["date"] datetime.datetime(2002, 10, 27, 14, 0) Reading Time @@ -83,12 +82,12 @@ Reading Time As previously mentioned, by default all :class:`datetime.datetime` objects returned by PyMongo will be naive but reflect UTC (i.e. the time as stored in -MongoDB). By setting the `tz_aware` option on +MongoDB). By setting the ``tz_aware`` option on :class:`~bson.codec_options.CodecOptions`, :class:`datetime.datetime` objects -will be timezone-aware and have a `tzinfo` property that reflects the UTC +will be timezone-aware and have a ``tzinfo`` property that reflects the UTC timezone. -PyMongo 3.1 introduced a `tzinfo` property that can be set on +PyMongo 3.1 introduced a ``tzinfo`` property that can be set on :class:`~bson.codec_options.CodecOptions` to convert :class:`datetime.datetime` objects to local time automatically. For example, if we wanted to read all times out of MongoDB in US/Pacific time: @@ -102,3 +101,77 @@ out of MongoDB in US/Pacific time: >>> result = aware_times.find_one() datetime.datetime(2002, 10, 27, 6, 0, # doctest: +NORMALIZE_WHITESPACE tzinfo=) + +.. _handling-out-of-range-datetimes: + +Handling out of range datetimes +------------------------------- + +Python's :class:`~datetime.datetime` can only represent datetimes within the +range allowed by +:attr:`~datetime.datetime.min` and :attr:`~datetime.datetime.max`, whereas +the range of datetimes allowed in BSON can represent any 64-bit number +of milliseconds from the Unix epoch. To deal with this, we can use the +:class:`bson.datetime_ms.DatetimeMS` object, which is a wrapper for the +:class:`int` built-in. + +To decode UTC datetime values as :class:`~bson.datetime_ms.DatetimeMS`, +:class:`~bson.codec_options.CodecOptions` should have its +``datetime_conversion`` parameter set to one of the options available in +:class:`bson.datetime_ms.DatetimeConversion`. These include +:attr:`~bson.datetime_ms.DatetimeConversion.DATETIME`, +:attr:`~bson.datetime_ms.DatetimeConversion.DATETIME_MS`, +:attr:`~bson.datetime_ms.DatetimeConversion.DATETIME_AUTO`, +:attr:`~bson.datetime_ms.DatetimeConversion.DATETIME_CLAMP`. +:attr:`~bson.datetime_ms.DatetimeConversion.DATETIME` is the default +option and has the behavior of raising an :class:`~builtin.OverflowError` upon +attempting to decode an out-of-range date. +:attr:`~bson.datetime_ms.DatetimeConversion.DATETIME_MS` will only return +:class:`~bson.datetime_ms.DatetimeMS` objects, regardless of whether the +represented datetime is in- or out-of-range: + +.. doctest:: + + >>> from datetime import datetime + >>> from bson import encode, decode + >>> from bson.datetime_ms import DatetimeMS + >>> from bson.codec_options import CodecOptions, DatetimeConversion + >>> x = encode({"x": datetime(1970, 1, 1)}) + >>> codec_ms = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_MS) + >>> decode(x, codec_options=codec_ms) + {'x': DatetimeMS(0)} + +:attr:`~bson.datetime_ms.DatetimeConversion.DATETIME_AUTO` will return +:class:`~datetime.datetime` if the underlying UTC datetime is within range, +or :class:`~bson.datetime_ms.DatetimeMS` if the underlying datetime +cannot be represented using the builtin Python :class:`~datetime.datetime`: + +.. doctest:: + + >>> x = encode({"x": datetime(1970, 1, 1)}) + >>> y = encode({"x": DatetimeMS(-(2**62))}) + >>> codec_auto = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_AUTO) + >>> decode(x, codec_options=codec_auto) + {'x': datetime.datetime(1970, 1, 1, 0, 0)} + >>> decode(y, codec_options=codec_auto) + {'x': DatetimeMS(-4611686018427387904)} + +:attr:`~bson.datetime_ms.DatetimeConversion.DATETIME_CLAMP` will clamp +resulting :class:`~datetime.datetime` objects to be within +:attr:`~datetime.datetime.min` and :attr:`~datetime.datetime.max` +(trimmed to ``999000`` microseconds): + +.. doctest:: + + >>> x = encode({"x": DatetimeMS(2**62)}) + >>> y = encode({"x": DatetimeMS(-(2**62))}) + >>> codec_clamp = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_CLAMP) + >>> decode(x, codec_options=codec_clamp) + {'x': datetime.datetime(9999, 12, 31, 23, 59, 59, 999000)} + >>> decode(y, codec_options=codec_clamp) + {'x': datetime.datetime(1, 1, 1, 0, 0)} + +:class:`~bson.datetime_ms.DatetimeMS` objects have support for rich comparison +methods against other instances of :class:`~bson.datetime_ms.DatetimeMS`. +They can also be converted to :class:`~datetime.datetime` objects with +:meth:`~bson.datetime_ms.DatetimeMS.to_datetime()`. diff --git a/doc/examples/encryption.rst b/doc/examples/encryption.rst index 2f8e2c7a93..fb61189499 100644 --- a/doc/examples/encryption.rst +++ b/doc/examples/encryption.rst @@ -1,5 +1,12 @@ +.. _In-Use Encryption: + +In-Use Encryption +================= + +.. _Client-Side Field Level Encryption: + Client-Side Field Level Encryption -================================== +---------------------------------- New in MongoDB 4.2, client-side field level encryption allows an application to encrypt specific data fields in addition to pre-existing MongoDB @@ -14,16 +21,17 @@ level encryption supports workloads where applications must guarantee that unauthorized parties, including server administrators, cannot read the encrypted data. -.. mongodoc:: client-side-field-level-encryption +.. seealso:: The MongoDB documentation on `Client Side Field Level Encryption `_. Dependencies ------------- +~~~~~~~~~~~~ To get started using client-side field level encryption in your project, you will need to install the -`pymongocrypt `_ library +`pymongocrypt `_ and +`pymongo-auth-aws `_ libraries as well as the driver itself. Install both the driver and a compatible -version of pymongocrypt like this:: +version of the dependencies like this:: $ python -m pip install 'pymongo[encryption]' @@ -32,8 +40,30 @@ support. For more information about installing pymongocrypt see `the installation instructions on the project's PyPI page `_. +Additionally, either `crypt_shared`_ or `mongocryptd`_ are required in order +to use *automatic* client-side encryption. + +crypt_shared +```````````` + +The Automatic Encryption Shared Library (crypt_shared) provides the same +functionality as `mongocryptd`_, but does not require you to spawn another +process to perform automatic encryption. + +By default, pymongo attempts to load crypt_shared from the system and if +found uses it automatically. To load crypt_shared from another location, +use the ``crypt_shared_lib_path`` argument to +:class:`~pymongo.encryption_options.AutoEncryptionOpts`. +If pymongo cannot load crypt_shared it will attempt to fallback to using +`mongocryptd`_ by default. Set ``crypt_shared_lib_required=True`` to make +the app always use crypt_shared and fail if it could not be loaded. + +For detailed installation instructions see +`the MongoDB documentation on Automatic Encryption Shared Library +`_. + mongocryptd ------------ +``````````` The ``mongocryptd`` binary is required for automatic client-side encryption and is included as a component in the `MongoDB Enterprise Server package @@ -116,21 +146,19 @@ the client into sending unencrypted data that should be encrypted. JSON Schemas supplied in the ``schema_map`` only apply to configuring automatic client-side field level encryption. Other validation rules in the JSON schema will not be enforced by the driver and -will result in an error.:: +will result in an error. - import os +.. code-block:: python + import os from bson.codec_options import CodecOptions from bson import json_util - from pymongo import MongoClient - from pymongo.encryption import (Algorithm, - ClientEncryption) + from pymongo.encryption import Algorithm, ClientEncryption from pymongo.encryption_options import AutoEncryptionOpts - def create_json_schema_file(kms_providers, key_vault_namespace, - key_vault_client): + def create_json_schema_file(kms_providers, key_vault_namespace, key_vault_client): client_encryption = ClientEncryption( kms_providers, key_vault_namespace, @@ -140,31 +168,33 @@ will result in an error.:: # on MongoClient, Database, or Collection. We will not be calling # encrypt() or decrypt() in this example so we can use any # CodecOptions. - CodecOptions()) + CodecOptions(), + ) # Create a new data key and json schema for the encryptedField. # https://dochub.mongodb.org/core/client-side-field-level-encryption-automatic-encryption-rules data_key_id = client_encryption.create_data_key( - 'local', key_alt_names=['pymongo_encryption_example_1']) + "local", key_alt_names=["pymongo_encryption_example_1"] + ) schema = { "properties": { "encryptedField": { "encrypt": { "keyId": [data_key_id], "bsonType": "string", - "algorithm": - Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic + "algorithm": Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, } } }, - "bsonType": "object" + "bsonType": "object", } # Use CANONICAL_JSON_OPTIONS so that other drivers and tools will be # able to parse the MongoDB extended JSON file. json_schema_string = json_util.dumps( - schema, json_options=json_util.CANONICAL_JSON_OPTIONS) + schema, json_options=json_util.CANONICAL_JSON_OPTIONS + ) - with open('jsonSchema.json', 'w') as file: + with open("jsonSchema.json", "w") as file: file.write(json_schema_string) @@ -191,19 +221,20 @@ will result in an error.:: key_vault.create_index( "keyAltNames", unique=True, - partialFilterExpression={"keyAltNames": {"$exists": True}}) + partialFilterExpression={"keyAltNames": {"$exists": True}}, + ) - create_json_schema_file( - kms_providers, key_vault_namespace, key_vault_client) + create_json_schema_file(kms_providers, key_vault_namespace, key_vault_client) # Load the JSON Schema and construct the local schema_map option. - with open('jsonSchema.json', 'r') as file: + with open("jsonSchema.json", "r") as file: json_schema_string = file.read() json_schema = json_util.loads(json_schema_string) schema_map = {encrypted_namespace: json_schema} auto_encryption_opts = AutoEncryptionOpts( - kms_providers, key_vault_namespace, schema_map=schema_map) + kms_providers, key_vault_namespace, schema_map=schema_map + ) client = MongoClient(auto_encryption_opts=auto_encryption_opts) db_name, coll_name = encrypted_namespace.split(".", 1) @@ -212,14 +243,15 @@ will result in an error.:: coll.drop() coll.insert_one({"encryptedField": "123456789"}) - print('Decrypted document: %s' % (coll.find_one(),)) + print("Decrypted document: %s" % (coll.find_one(),)) unencrypted_coll = MongoClient()[db_name][coll_name] - print('Encrypted document: %s' % (unencrypted_coll.find_one(),)) + print("Encrypted document: %s" % (unencrypted_coll.find_one(),)) if __name__ == "__main__": main() + Server-Side Field Level Encryption Enforcement `````````````````````````````````````````````` @@ -233,7 +265,9 @@ encryption using :class:`~pymongo.encryption.ClientEncryption` to create a new encryption data key and create a collection with the `Automatic Encryption JSON Schema Syntax -`_:: +`_: + +.. code-block:: python import os @@ -241,8 +275,7 @@ data key and create a collection with the from bson.binary import STANDARD from pymongo import MongoClient - from pymongo.encryption import (Algorithm, - ClientEncryption) + from pymongo.encryption import Algorithm, ClientEncryption from pymongo.encryption_options import AutoEncryptionOpts from pymongo.errors import OperationFailure from pymongo.write_concern import WriteConcern @@ -271,7 +304,8 @@ data key and create a collection with the key_vault.create_index( "keyAltNames", unique=True, - partialFilterExpression={"keyAltNames": {"$exists": True}}) + partialFilterExpression={"keyAltNames": {"$exists": True}}, + ) client_encryption = ClientEncryption( kms_providers, @@ -282,27 +316,27 @@ data key and create a collection with the # on MongoClient, Database, or Collection. We will not be calling # encrypt() or decrypt() in this example so we can use any # CodecOptions. - CodecOptions()) + CodecOptions(), + ) # Create a new data key and json schema for the encryptedField. data_key_id = client_encryption.create_data_key( - 'local', key_alt_names=['pymongo_encryption_example_2']) + "local", key_alt_names=["pymongo_encryption_example_2"] + ) json_schema = { "properties": { "encryptedField": { "encrypt": { "keyId": [data_key_id], "bsonType": "string", - "algorithm": - Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic + "algorithm": Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, } } }, - "bsonType": "object" + "bsonType": "object", } - auto_encryption_opts = AutoEncryptionOpts( - kms_providers, key_vault_namespace) + auto_encryption_opts = AutoEncryptionOpts(kms_providers, key_vault_namespace) client = MongoClient(auto_encryption_opts=auto_encryption_opts) db_name, coll_name = encrypted_namespace.split(".", 1) db = client[db_name] @@ -318,22 +352,24 @@ data key and create a collection with the # JSON Schema. codec_options=CodecOptions(uuid_representation=STANDARD), write_concern=WriteConcern(w="majority"), - validator={"$jsonSchema": json_schema}) + validator={"$jsonSchema": json_schema}, + ) coll = client[db_name][coll_name] coll.insert_one({"encryptedField": "123456789"}) - print('Decrypted document: %s' % (coll.find_one(),)) + print("Decrypted document: %s" % (coll.find_one(),)) unencrypted_coll = MongoClient()[db_name][coll_name] - print('Encrypted document: %s' % (unencrypted_coll.find_one(),)) + print("Encrypted document: %s" % (unencrypted_coll.find_one(),)) try: unencrypted_coll.insert_one({"encryptedField": "123456789"}) except OperationFailure as exc: - print('Unencrypted insert failed: %s' % (exc.details,)) + print("Unencrypted insert failed: %s" % (exc.details,)) if __name__ == "__main__": main() + .. _explicit-client-side-encryption: Explicit Encryption @@ -341,13 +377,14 @@ Explicit Encryption Explicit encryption is a MongoDB community feature and does not use the ``mongocryptd`` process. Explicit encryption is provided by the -:class:`~pymongo.encryption.ClientEncryption` class, for example:: +:class:`~pymongo.encryption.ClientEncryption` class, for example: + +.. code-block:: python import os from pymongo import MongoClient - from pymongo.encryption import (Algorithm, - ClientEncryption) + from pymongo.encryption import Algorithm, ClientEncryption def main(): @@ -374,7 +411,8 @@ Explicit encryption is a MongoDB community feature and does not use the key_vault.create_index( "keyAltNames", unique=True, - partialFilterExpression={"keyAltNames": {"$exists": True}}) + partialFilterExpression={"keyAltNames": {"$exists": True}}, + ) client_encryption = ClientEncryption( kms_providers, @@ -385,24 +423,27 @@ Explicit encryption is a MongoDB community feature and does not use the # The CodecOptions class used for encrypting and decrypting. # This should be the same CodecOptions instance you have configured # on MongoClient, Database, or Collection. - coll.codec_options) + coll.codec_options, + ) # Create a new data key for the encryptedField. data_key_id = client_encryption.create_data_key( - 'local', key_alt_names=['pymongo_encryption_example_3']) + "local", key_alt_names=["pymongo_encryption_example_3"] + ) # Explicitly encrypt a field: encrypted_field = client_encryption.encrypt( "123456789", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=data_key_id) + key_id=data_key_id, + ) coll.insert_one({"encryptedField": encrypted_field}) doc = coll.find_one() - print('Encrypted document: %s' % (doc,)) + print("Encrypted document: %s" % (doc,)) # Explicitly decrypt the field: doc["encryptedField"] = client_encryption.decrypt(doc["encryptedField"]) - print('Decrypted document: %s' % (doc,)) + print("Decrypted document: %s" % (doc,)) # Cleanup resources. client_encryption.close() @@ -420,13 +461,14 @@ Although automatic encryption requires MongoDB 4.2 enterprise or a MongoDB 4.2 Atlas cluster, automatic *decryption* is supported for all users. To configure automatic *decryption* without automatic *encryption* set ``bypass_auto_encryption=True`` in -:class:`~pymongo.encryption_options.AutoEncryptionOpts`:: +:class:`~pymongo.encryption_options.AutoEncryptionOpts`: + +.. code-block:: python import os from pymongo import MongoClient - from pymongo.encryption import (Algorithm, - ClientEncryption) + from pymongo.encryption import Algorithm, ClientEncryption from pymongo.encryption_options import AutoEncryptionOpts @@ -445,7 +487,8 @@ To configure automatic *decryption* without automatic *encryption* set # the automatic _decryption_ behavior. bypass_auto_encryption will # also disable spawning mongocryptd. auto_encryption_opts = AutoEncryptionOpts( - kms_providers, key_vault_namespace, bypass_auto_encryption=True) + kms_providers, key_vault_namespace, bypass_auto_encryption=True + ) client = MongoClient(auto_encryption_opts=auto_encryption_opts) coll = client.test.coll @@ -459,7 +502,8 @@ To configure automatic *decryption* without automatic *encryption* set key_vault.create_index( "keyAltNames", unique=True, - partialFilterExpression={"keyAltNames": {"$exists": True}}) + partialFilterExpression={"keyAltNames": {"$exists": True}}, + ) client_encryption = ClientEncryption( kms_providers, @@ -470,23 +514,26 @@ To configure automatic *decryption* without automatic *encryption* set # The CodecOptions class used for encrypting and decrypting. # This should be the same CodecOptions instance you have configured # on MongoClient, Database, or Collection. - coll.codec_options) + coll.codec_options, + ) # Create a new data key for the encryptedField. data_key_id = client_encryption.create_data_key( - 'local', key_alt_names=['pymongo_encryption_example_4']) + "local", key_alt_names=["pymongo_encryption_example_4"] + ) # Explicitly encrypt a field: encrypted_field = client_encryption.encrypt( "123456789", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_alt_name='pymongo_encryption_example_4') + key_alt_name="pymongo_encryption_example_4", + ) coll.insert_one({"encryptedField": encrypted_field}) # Automatically decrypts any encrypted fields. doc = coll.find_one() - print('Decrypted document: %s' % (doc,)) + print("Decrypted document: %s" % (doc,)) unencrypted_coll = MongoClient().test.coll - print('Encrypted document: %s' % (unencrypted_coll.find_one(),)) + print("Encrypted document: %s" % (unencrypted_coll.find_one(),)) # Cleanup resources. client_encryption.close() @@ -495,3 +542,303 @@ To configure automatic *decryption* without automatic *encryption* set if __name__ == "__main__": main() + + +.. _CSFLE on-demand credentials: + + +CSFLE on-demand credentials +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +``pymongocrypt`` 1.4 adds support for fetching on-demand KMS credentials for +AWS, GCP, and Azure cloud environments. + +To enable the driver's behavior to obtain credentials from the environment, add the appropriate key ("aws", "gcp", or "azure") with an empty map to +"kms_providers" in either :class:`~pymongo.encryption_options.AutoEncryptionOpts` or :class:`~pymongo.encryption.ClientEncryption` options. + +An application using AWS credentials would look like: + +.. code-block:: python + + from pymongo import MongoClient + from pymongo.encryption import ClientEncryption + + client = MongoClient() + client_encryption = ClientEncryption( + # The empty dictionary enables on-demand credentials. + kms_providers={"aws": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client, + codec_options=client.codec_options, + ) + master_key = { + "region": "us-east-1", + "key": ("arn:aws:kms:us-east-1:123456789:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), + } + client_encryption.create_data_key("aws", master_key) + +The above will enable the same behavior of obtaining AWS credentials from the environment as is used for :ref:`MONGODB-AWS` authentication, including the +caching to avoid rate limiting. + +An application using GCP credentials would look like: + +.. code-block:: python + + from pymongo import MongoClient + from pymongo.encryption import ClientEncryption + + client = MongoClient() + client_encryption = ClientEncryption( + # The empty dictionary enables on-demand credentials. + kms_providers={"gcp": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client, + codec_options=client.codec_options, + ) + master_key = { + "projectId": "my-project", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle", + } + client_encryption.create_data_key("gcp", master_key) + +The driver will query the `VM instance metadata `_ to obtain credentials. + +An application using Azure credentials would look like, this time using +:class:`~pymongo.encryption_options.AutoEncryptionOpts`: + +.. code-block:: python + + from pymongo import MongoClient + from pymongo.encryption_options import AutoEncryptionOpts + + # The empty dictionary enables on-demand credentials. + kms_providers = ({"azure": {}},) + key_vault_namespace = "keyvault.datakeys" + auto_encryption_opts = AutoEncryptionOpts(kms_providers, key_vault_namespace) + client = MongoClient(auto_encryption_opts=auto_encryption_opts) + coll = client.test.coll + coll.insert_one({"encryptedField": "123456789"}) + +The driver will `acquire an access token `_ from the Azure VM. + +.. _Queryable Encryption: + +Queryable Encryption +-------------------- + +.. _automatic-queryable-client-side-encryption: + +Automatic Queryable Encryption +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Automatic Queryable Encryption requires MongoDB 7.0+ Enterprise or a MongoDB 7.0+ Atlas cluster. + +Queryable Encryption is the second version of Client-Side Field Level Encryption. +Data is encrypted client-side. Queryable Encryption supports indexed encrypted fields, +which are further processed server-side. + +Automatic encryption in Queryable Encryption is configured with an ``encrypted_fields`` mapping, +as demonstrated by the following example: + +.. code-block:: python + + import os + from bson.codec_options import CodecOptions + from pymongo import MongoClient + from pymongo.encryption import Algorithm, ClientEncryption, QueryType + from pymongo.encryption_options import AutoEncryptionOpts + + local_master_key = os.urandom(96) + kms_providers = {"local": {"key": local_master_key}} + key_vault_namespace = "keyvault.datakeys" + key_vault_client = MongoClient() + client_encryption = ClientEncryption( + kms_providers, key_vault_namespace, key_vault_client, CodecOptions() + ) + key_vault = key_vault_client["keyvault"]["datakeys"] + key_vault.drop() + # Ensure that two data keys cannot share the same keyAltName. + key_vault.create_index( + "keyAltNames", + unique=True, + partialFilterExpression={"keyAltNames": {"$exists": True}}, + ) + key1_id = client_encryption.create_data_key("local", key_alt_names=["firstName"]) + key2_id = client_encryption.create_data_key("local", key_alt_names=["lastName"]) + + encrypted_fields_map = { + "default.encryptedCollection": { + "escCollection": "encryptedCollection.esc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": key1_id, + "queries": [{"queryType": "equality"}], + }, + { + "path": "lastName", + "bsonType": "string", + "keyId": key2_id, + }, + ], + } + } + + auto_encryption_opts = AutoEncryptionOpts( + kms_providers, + key_vault_namespace, + encrypted_fields_map=encrypted_fields_map, + ) + client = MongoClient(auto_encryption_opts=auto_encryption_opts) + client.default.drop_collection("encryptedCollection") + coll = client.default.create_collection("encryptedCollection") + coll.insert_one({"_id": 1, "firstName": "Jane", "lastName": "Doe"}) + docs = list(coll.find({"firstName": "Jane"})) + print(docs) + +In the above example, the ``firstName`` and ``lastName`` fields are +automatically encrypted and decrypted. + +Explicit Queryable Encryption +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Explicit Queryable Encryption requires MongoDB 7.0+. + +Queryable Encryption is the second version of Client-Side Field Level Encryption. +Data is encrypted client-side. Queryable Encryption supports indexed encrypted fields, +which are further processed server-side. + +Explicit encryption in Queryable Encryption is performed using the ``encrypt`` and ``decrypt`` +methods. Automatic encryption (to allow the ``find_one`` to automatically decrypt) is configured +using an ``encrypted_fields`` mapping, as demonstrated by the following example: + +.. code-block:: python + + import os + from pymongo import MongoClient + from pymongo.encryption import ( + Algorithm, + AutoEncryptionOpts, + ClientEncryption, + QueryType, + ) + + + def main(): + # This must be the same master key that was used to create + # the encryption key. + local_master_key = os.urandom(96) + kms_providers = {"local": {"key": local_master_key}} + + # The MongoDB namespace (db.collection) used to store + # the encryption data keys. + key_vault_namespace = "encryption.__pymongoTestKeyVault" + key_vault_db_name, key_vault_coll_name = key_vault_namespace.split(".", 1) + + # Set up the key vault (key_vault_namespace) for this example. + client = MongoClient() + key_vault = client[key_vault_db_name][key_vault_coll_name] + + # Ensure that two data keys cannot share the same keyAltName. + key_vault.drop() + key_vault.create_index( + "keyAltNames", + unique=True, + partialFilterExpression={"keyAltNames": {"$exists": True}}, + ) + + client_encryption = ClientEncryption( + kms_providers, + key_vault_namespace, + # The MongoClient to use for reading/writing to the key vault. + # This can be the same MongoClient used by the main application. + client, + # The CodecOptions class used for encrypting and decrypting. + # This should be the same CodecOptions instance you have configured + # on MongoClient, Database, or Collection. + client.codec_options, + ) + + # Create a new data key for the encryptedField. + indexed_key_id = client_encryption.create_data_key("local") + unindexed_key_id = client_encryption.create_data_key("local") + + encrypted_fields = { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": indexed_key_id, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": {"queryType": "equality"}, + }, + { + "keyId": unindexed_key_id, + "path": "encryptedUnindexed", + "bsonType": "string", + }, + ], + } + + opts = AutoEncryptionOpts( + {"local": {"key": local_master_key}}, + key_vault.full_name, + bypass_query_analysis=True, + key_vault_client=client, + ) + + # The MongoClient used to read/write application data. + encrypted_client = MongoClient(auto_encryption_opts=opts) + encrypted_client.drop_database("test") + db = encrypted_client.test + + # Create the collection with encrypted fields. + coll = db.create_collection("coll", encryptedFields=encrypted_fields) + + # Create and encrypt an indexed and unindexed value. + val = "encrypted indexed value" + unindexed_val = "encrypted unindexed value" + insert_payload_indexed = client_encryption.encrypt( + val, Algorithm.INDEXED, indexed_key_id, contention_factor=1 + ) + insert_payload_unindexed = client_encryption.encrypt( + unindexed_val, Algorithm.UNINDEXED, unindexed_key_id + ) + + # Insert the payloads. + coll.insert_one( + { + "encryptedIndexed": insert_payload_indexed, + "encryptedUnindexed": insert_payload_unindexed, + } + ) + + # Encrypt our find payload using QueryType.EQUALITY. + # The value of "indexed_key_id" must be the same as used to encrypt + # the values above. + find_payload = client_encryption.encrypt( + val, + Algorithm.INDEXED, + indexed_key_id, + query_type=QueryType.EQUALITY, + contention_factor=1, + ) + + # Find the document we inserted using the encrypted payload. + # The returned document is automatically decrypted. + doc = coll.find_one({"encryptedIndexed": find_payload}) + print("Returned document: %s" % (doc,)) + + # Cleanup resources. + client_encryption.close() + encrypted_client.close() + client.close() + + + if __name__ == "__main__": + main() diff --git a/doc/examples/geo.rst b/doc/examples/geo.rst index 26de95cb6a..e7da156720 100644 --- a/doc/examples/geo.rst +++ b/doc/examples/geo.rst @@ -4,13 +4,14 @@ Geospatial Indexing Example .. testsetup:: from pymongo import MongoClient + client = MongoClient() - client.drop_database('geo_example') + client.drop_database("geo_example") This example shows how to create and use a :data:`~pymongo.GEO2D` index in PyMongo. To create a spherical (earth-like) geospatial index use :data:`~pymongo.GEOSPHERE` instead. -.. mongodoc:: geo +.. seealso:: The MongoDB documentation on `Geospatial Indexes `_. Creating a Geospatial Index --------------------------- @@ -22,7 +23,7 @@ Creating a geospatial index in pymongo is easy: >>> from pymongo import MongoClient, GEO2D >>> db = MongoClient().geo_example >>> db.places.create_index([("loc", GEO2D)]) - u'loc_2d' + 'loc_2d' Inserting Places ---------------- @@ -33,10 +34,9 @@ insert a couple of example locations: .. doctest:: - >>> result = db.places.insert_many([{"loc": [2, 5]}, - ... {"loc": [30, 5]}, - ... {"loc": [1, 2]}, - ... {"loc": [4, 4]}]) # doctest: +ELLIPSIS + >>> result = db.places.insert_many( + ... [{"loc": [2, 5]}, {"loc": [30, 5]}, {"loc": [1, 2]}, {"loc": [4, 4]}] + ... ) >>> result.inserted_ids [ObjectId('...'), ObjectId('...'), ObjectId('...'), ObjectId('...')] @@ -51,11 +51,11 @@ Using the geospatial index we can find documents near another point: >>> import pprint >>> for doc in db.places.find({"loc": {"$near": [3, 6]}}).limit(3): - ... pprint.pprint(doc) + ... pprint.pprint(doc) ... - {u'_id': ObjectId('...'), u'loc': [2, 5]} - {u'_id': ObjectId('...'), u'loc': [4, 4]} - {u'_id': ObjectId('...'), u'loc': [1, 2]} + {'_id': ObjectId('...'), 'loc': [2, 5]} + {'_id': ObjectId('...'), 'loc': [4, 4]} + {'_id': ObjectId('...'), 'loc': [1, 2]} .. note:: If using :data:`pymongo.GEOSPHERE`, using $nearSphere is recommended. @@ -66,11 +66,11 @@ The $maxDistance operator requires the use of :class:`~bson.son.SON`: >>> from bson.son import SON >>> query = {"loc": SON([("$near", [3, 6]), ("$maxDistance", 100)])} >>> for doc in db.places.find(query).limit(3): - ... pprint.pprint(doc) + ... pprint.pprint(doc) ... - {u'_id': ObjectId('...'), u'loc': [2, 5]} - {u'_id': ObjectId('...'), u'loc': [4, 4]} - {u'_id': ObjectId('...'), u'loc': [1, 2]} + {'_id': ObjectId('...'), 'loc': [2, 5]} + {'_id': ObjectId('...'), 'loc': [4, 4]} + {'_id': ObjectId('...'), 'loc': [1, 2]} It's also possible to query for all items within a given rectangle (specified by lower-left and upper-right coordinates): @@ -78,28 +78,29 @@ It's also possible to query for all items within a given rectangle .. doctest:: >>> query = {"loc": {"$within": {"$box": [[2, 2], [5, 6]]}}} - >>> for doc in db.places.find(query).sort('_id'): + >>> for doc in db.places.find(query).sort("_id"): ... pprint.pprint(doc) - {u'_id': ObjectId('...'), u'loc': [2, 5]} - {u'_id': ObjectId('...'), u'loc': [4, 4]} + ... + {'_id': ObjectId('...'), 'loc': [2, 5]} + {'_id': ObjectId('...'), 'loc': [4, 4]} Or circle (specified by center point and radius): .. doctest:: >>> query = {"loc": {"$within": {"$center": [[0, 0], 6]}}} - >>> for doc in db.places.find(query).sort('_id'): - ... pprint.pprint(doc) + >>> for doc in db.places.find(query).sort("_id"): + ... pprint.pprint(doc) ... - {u'_id': ObjectId('...'), u'loc': [2, 5]} - {u'_id': ObjectId('...'), u'loc': [1, 2]} - {u'_id': ObjectId('...'), u'loc': [4, 4]} + {'_id': ObjectId('...'), 'loc': [2, 5]} + {'_id': ObjectId('...'), 'loc': [1, 2]} + {'_id': ObjectId('...'), 'loc': [4, 4]} geoNear queries are also supported using :class:`~bson.son.SON`:: >>> from bson.son import SON >>> db.command(SON([('geoNear', 'places'), ('near', [1, 2])])) - {u'ok': 1.0, u'stats': ...} + {'ok': 1.0, 'stats': ...} .. warning:: Starting in MongoDB version 4.0, MongoDB deprecates the **geoNear** command. Use one of the following operations instead. diff --git a/doc/examples/gevent.rst b/doc/examples/gevent.rst index 6eb283dca9..0ab41c1ec6 100644 --- a/doc/examples/gevent.rst +++ b/doc/examples/gevent.rst @@ -4,7 +4,7 @@ Gevent PyMongo supports `Gevent `_. Simply call Gevent's ``monkey.patch_all()`` before loading any other modules: -.. doctest:: +.. code-block:: pycon >>> # You must call patch_all() *before* importing any other modules >>> from gevent import monkey @@ -38,10 +38,12 @@ handler to end background greenlets when your application receives SIGHUP: import signal + def graceful_reload(signum, traceback): """Explicitly close some global MongoClient object.""" client.close() + signal.signal(signal.SIGHUP, graceful_reload) Applications using uWSGI prior to 1.9.16 are affected by this issue, diff --git a/doc/examples/gridfs.rst b/doc/examples/gridfs.rst index db55bd2b59..5f40805d79 100644 --- a/doc/examples/gridfs.rst +++ b/doc/examples/gridfs.rst @@ -4,8 +4,9 @@ GridFS Example .. testsetup:: from pymongo import MongoClient + client = MongoClient() - client.drop_database('gridfs_example') + client.drop_database("gridfs_example") This example shows how to use :mod:`gridfs` to store large binary objects (e.g. files) in MongoDB. @@ -52,7 +53,7 @@ file: .. doctest:: >>> fs.get(a).read() - 'hello world' + b'hello world' :meth:`~gridfs.GridFS.get` returns a file-like object, so we get the file's contents by calling :meth:`~gridfs.grid_file.GridOut.read`. @@ -68,11 +69,11 @@ keyword arguments: >>> b = fs.put(fs.get(a), filename="foo", bar="baz") >>> out = fs.get(b) >>> out.read() - 'hello world' + b'hello world' >>> out.filename - u'foo' + 'foo' >>> out.bar - u'baz' + 'baz' >>> out.upload_date datetime.datetime(...) diff --git a/doc/examples/high_availability.rst b/doc/examples/high_availability.rst index b52b22a106..8f94aba074 100644 --- a/doc/examples/high_availability.rst +++ b/doc/examples/high_availability.rst @@ -4,7 +4,7 @@ High Availability and PyMongo PyMongo makes it easy to write highly available applications whether you use a `single replica set `_ or a `large sharded cluster -`_. +`_. Connecting to a Replica Set --------------------------- @@ -14,7 +14,7 @@ PyMongo makes working with `replica sets replica set and show how to handle both initialization and normal connections with PyMongo. -.. mongodoc:: rs +.. seealso:: The MongoDB documentation on `replication `_. Starting a Replica Set ~~~~~~~~~~~~~~~~~~~~~~ @@ -52,11 +52,11 @@ At this point all of our nodes are up and running, but the set has yet to be initialized. Until the set is initialized no node will become the primary, and things are essentially "offline". -To initialize the set we need to connect to a single node and run the -initiate command:: +To initialize the set we need to connect directly to a single node and run the +initiate command using the ``directConnection`` option:: >>> from pymongo import MongoClient - >>> c = MongoClient('localhost', 27017) + >>> c = MongoClient('localhost', 27017, directConnection=True) .. note:: We could have connected to any of the other nodes instead, but only the node we initiate from is allowed to contain any @@ -81,15 +81,19 @@ The initial connection as made above is a special case for an uninitialized replica set. Normally we'll want to connect differently. A connection to a replica set can be made using the :meth:`~pymongo.mongo_client.MongoClient` constructor, specifying -one or more members of the set, along with the replica set name. Any of -the following connects to the replica set we just created:: +one or more members of the set and optionally the replica set name. +Any of the following connects to the replica set we just created:: + >>> MongoClient('localhost') + MongoClient(host=['localhost:27017'], ...) >>> MongoClient('localhost', replicaset='foo') MongoClient(host=['localhost:27017'], replicaset='foo', ...) >>> MongoClient('localhost:27018', replicaset='foo') MongoClient(['localhost:27018'], replicaset='foo', ...) >>> MongoClient('localhost', 27019, replicaset='foo') MongoClient(['localhost:27019'], replicaset='foo', ...) + >>> MongoClient('mongodb://localhost:27017,localhost:27018/') + MongoClient(['localhost:27017', 'localhost:27018'], ...) >>> MongoClient('mongodb://localhost:27017,localhost:27018/?replicaSet=foo') MongoClient(['localhost:27017', 'localhost:27018'], replicaset='foo', ...) @@ -111,7 +115,7 @@ set:: >>> from time import sleep >>> c = MongoClient(replicaset='foo'); print(c.nodes); sleep(0.1); print(c.nodes) frozenset([]) - frozenset([(u'localhost', 27019), (u'localhost', 27017), (u'localhost', 27018)]) + frozenset([('localhost', 27019), ('localhost', 27017), ('localhost', 27018)]) You need not wait for replica set discovery in your application, however. If you need to do any operation with a MongoClient, such as a @@ -132,7 +136,7 @@ connect to the replica set and perform a couple of basic operations:: >>> db.test.insert_one({"x": 1}).inserted_id ObjectId('...') >>> db.test.find_one() - {u'x': 1, u'_id': ObjectId('...')} + {'x': 1, '_id': ObjectId('...')} By checking the host and port, we can see that we're connected to *localhost:27017*, which is the current primary:: @@ -162,7 +166,7 @@ general). At that point the driver will connect to the new primary and the operation will succeed:: >>> db.test.find_one() - {u'x': 1, u'_id': ObjectId('...')} + {'x': 1, '_id': ObjectId('...')} >>> db.client.address ('localhost', 27018) @@ -257,7 +261,7 @@ attributes: **Tag sets**: Replica-set members can be `tagged -`_ according to any +`_ according to any criteria you choose. By default, PyMongo ignores tags when choosing a member to read from, but your read preference can be configured with a ``tag_sets`` parameter. ``tag_sets`` must be a list of dictionaries, each @@ -304,7 +308,7 @@ milliseconds of the closest member's ping time. replica set *through* a mongos. The equivalent is the localThreshold_ command line option. -.. _localThreshold: http://docs.mongodb.org/manual/reference/mongos/#cmdoption--localThreshold +.. _localThreshold: https://mongodb.com/docs/manual/reference/program/mongos/#std-option-mongos.--localThreshold .. _health-monitoring: diff --git a/doc/examples/index.rst b/doc/examples/index.rst index baadd74464..23f7a6f181 100644 --- a/doc/examples/index.rst +++ b/doc/examples/index.rst @@ -28,7 +28,11 @@ MongoDB, you can start it like so: gridfs high_availability mod_wsgi + network_compression server_selection tailable + timeouts tls + type_hints encryption + uuid diff --git a/doc/examples/mod_wsgi.rst b/doc/examples/mod_wsgi.rst index 832d779fd8..96d6ce892f 100644 --- a/doc/examples/mod_wsgi.rst +++ b/doc/examples/mod_wsgi.rst @@ -3,7 +3,7 @@ PyMongo and mod_wsgi ==================== -To run your application under `mod_wsgi `_, +To run your application under `mod_wsgi `_, follow these guidelines: * Run ``mod_wsgi`` in daemon mode with the ``WSGIDaemonProcess`` directive. @@ -48,9 +48,9 @@ interpreter. Python C extensions in general have issues running in multiple Python sub interpreters. These difficulties are explained in the documentation for -`Py_NewInterpreter `_ +`Py_NewInterpreter `_ and in the `Multiple Python Sub Interpreters -`_ +`_ section of the ``mod_wsgi`` documentation. Beginning with PyMongo 2.7, the C extension for BSON detects when it is running diff --git a/doc/examples/network_compression.rst b/doc/examples/network_compression.rst new file mode 100644 index 0000000000..c270dff4b3 --- /dev/null +++ b/doc/examples/network_compression.rst @@ -0,0 +1,39 @@ + +.. _network-compression-example: + +Network Compression +=================== + +PyMongo supports network compression where network traffic between the client +and MongoDB server are compressed which reduces the amount of data passed +over the network. By default no compression is used. + +The driver supports the following algorithms: + +- `snappy `_ available in MongoDB 3.4 and later. +- :mod:`zlib` available in MongoDB 3.6 and later. +- `zstandard `_ available in MongoDB 4.2 and later. + +.. note:: snappy and zstandard compression require additional dependencies. See :ref:`optional-deps`. + +Applications can enable wire protocol compression via the ``compressors`` URI and +keyword argument to :meth:`~pymongo.mongo_client.MongoClient`. For example:: + + >>> client = MongoClient(compressors='zlib') + +When multiple compression algorithms are given, the driver selects the first one in the +list supported by the MongoDB instance to which it is connected. For example:: + + >>> client = MongoClient(compressors='snappy,zstandard,zlib') + +The ``compressors`` option can also be set via the URI:: + + >>> client = MongoClient('mongodb://example.com/?compressors=snappy,zstandard,zlib') + +Additionally, zlib compression allows specifying a compression level with supported values from -1 to 9:: + + >>> client = MongoClient(compressors='zlib', zlibCompressionLevel=-1) + +The ``zlibCompressionLevel`` is passed as the ``level`` argument to :func:`zlib.compress`. + +.. seealso:: The MongoDB documentation on `network compression URI options `_. diff --git a/doc/examples/server_selection.rst b/doc/examples/server_selection.rst index 28659c133e..227e849df3 100644 --- a/doc/examples/server_selection.rst +++ b/doc/examples/server_selection.rst @@ -2,7 +2,7 @@ Server Selector Example ======================= Users can exert fine-grained control over the `server selection algorithm`_ -by setting the `server_selector` option on the :class:`~pymongo.MongoClient` +by setting the ``server_selector`` option on the :class:`~pymongo.MongoClient` to an appropriate callable. This example shows how to use this functionality to prefer servers running on ``localhost``. @@ -19,7 +19,7 @@ to prefer servers running on ``localhost``. from pymongo import MongoClient -.. _server selection algorithm: https://docs.mongodb.com/manual/core/read-preference-mechanics/ +.. _server selection algorithm: https://mongodb.com/docs/manual/core/read-preference-mechanics/ Example: Selecting Servers Running on ``localhost`` @@ -55,12 +55,12 @@ selector function: >>> def server_selector(server_descriptions): ... servers = [ - ... server for server in server_descriptions - ... if server.address[0] == 'localhost' + ... server for server in server_descriptions if server.address[0] == "localhost" ... ] ... if not servers: ... return server_descriptions ... return servers + ... @@ -105,4 +105,4 @@ list of known hosts. As an example, for a 3-member replica set with a all available secondaries. -.. _server selection algorithm: https://docs.mongodb.com/manual/core/read-preference-mechanics/ \ No newline at end of file +.. _server selection algorithm: https://mongodb.com/docs/manual/core/read-preference-mechanics/ diff --git a/doc/examples/tailable.rst b/doc/examples/tailable.rst index b9b6dcd74d..79458dc2ff 100644 --- a/doc/examples/tailable.rst +++ b/doc/examples/tailable.rst @@ -3,9 +3,9 @@ Tailable Cursors By default, MongoDB will automatically close a cursor when the client has exhausted all results in the cursor. However, for `capped collections -`_ you may +`_ you may use a `tailable cursor -`_ +`_ that remains open after the client exhausts the results in the initial cursor. The following is a basic example of using a tailable cursor to tail the oplog @@ -24,9 +24,11 @@ of a replica set member:: while True: # For a regular capped collection CursorType.TAILABLE_AWAIT is the # only option required to create a tailable cursor. When querying the - # oplog the oplog_replay option enables an optimization to quickly + # oplog, the oplog_replay option enables an optimization to quickly # find the 'ts' value we're looking for. The oplog_replay option - # can only be used when querying the oplog. + # can only be used when querying the oplog. Starting in MongoDB 4.4 + # this option is ignored by the server as queries against the oplog + # are optimized automatically by the MongoDB query engine. cursor = oplog.find({'ts': {'$gt': ts}}, cursor_type=pymongo.CursorType.TAILABLE_AWAIT, oplog_replay=True) diff --git a/doc/examples/timeouts.rst b/doc/examples/timeouts.rst new file mode 100644 index 0000000000..5171588962 --- /dev/null +++ b/doc/examples/timeouts.rst @@ -0,0 +1,162 @@ + +.. _timeout-example: + +Client Side Operation Timeout +============================= + +PyMongo 4.2 introduced :meth:`~pymongo.timeout` and the ``timeoutMS`` +URI and keyword argument to :class:`~pymongo.mongo_client.MongoClient`. +These features allow applications to more easily limit the amount of time that +one or more operations can execute before control is returned to the app. This +timeout applies to all of the work done to execute the operation, including +but not limited to server selection, connection checkout, serialization, and +server-side execution. + +Basic Usage +----------- + +The following example uses :meth:`~pymongo.timeout` to configure a 10-second +timeout for an :meth:`~pymongo.collection.Collection.insert_one` operation:: + + import pymongo + with pymongo.timeout(10): + coll.insert_one({"name": "Nunu"}) + +The :meth:`~pymongo.timeout` applies to all pymongo operations within the block. +The following example ensures that both the ``insert`` and the ``find`` complete +within 10 seconds total, or raise a timeout error:: + + with pymongo.timeout(10): + coll.insert_one({"name": "Nunu"}) + coll.find_one({"name": "Nunu"}) + +When nesting :func:`~pymongo.timeout`, the nested deadline is capped by the outer +deadline. The deadline can only be shortened, not extended. +When exiting the block, the previous deadline is restored:: + + with pymongo.timeout(5): + coll.find_one() # Uses the 5 second deadline. + with pymongo.timeout(3): + coll.find_one() # Uses the 3 second deadline. + coll.find_one() # Uses the original 5 second deadline. + with pymongo.timeout(10): + coll.find_one() # Still uses the original 5 second deadline. + coll.find_one() # Uses the original 5 second deadline. + +Timeout errors +-------------- + +When the :meth:`~pymongo.timeout` with-statement is entered, a deadline is set +for the entire block. When that deadline is exceeded, any blocking pymongo operation +will raise a timeout exception. For example:: + + try: + with pymongo.timeout(10): + coll.insert_one({"name": "Nunu"}) + time.sleep(10) + # The deadline has now expired, the next operation will raise + # a timeout exception. + coll.find_one({"name": "Nunu"}) + except PyMongoError as exc: + if exc.timeout: + print(f"block timed out: {exc!r}") + else: + print(f"failed with non-timeout error: {exc!r}") + +The :attr:`pymongo.errors.PyMongoError.timeout` property (added in PyMongo 4.2) +will be ``True`` when the error was caused by a timeout and ``False`` otherwise. + +The timeoutMS URI option +------------------------ + +PyMongo 4.2 also added support for the ``timeoutMS`` URI and keyword argument to +:class:`~pymongo.mongo_client.MongoClient`. When this option is configured, the +client will automatically apply the timeout to each API call. For example:: + + client = MongoClient("mongodb://localhost/?timeoutMS=10000") + coll = client.test.test + coll.insert_one({"name": "Nunu"}) # Uses a 10-second timeout. + coll.find_one({"name": "Nunu"}) # Also uses a 10-second timeout. + +The above is roughly equivalent to:: + + client = MongoClient() + coll = client.test.test + with pymongo.timeout(10): + coll.insert_one({"name": "Nunu"}) + with pymongo.timeout(10): + coll.find_one({"name": "Nunu"}) + +pymongo.timeout overrides timeoutMS +----------------------------------- + +:meth:`~pymongo.timeout` overrides ``timeoutMS``; within a +:meth:`~pymongo.timeout` block a client's ``timeoutMS`` option is ignored:: + + client = MongoClient("mongodb://localhost/?timeoutMS=10000") + coll = client.test.test + coll.insert_one({"name": "Nunu"}) # Uses the client's 10-second timeout. + # pymongo.timeout overrides the client's timeoutMS. + with pymongo.timeout(20): + coll.insert_one({"name": "Nunu"}) # Uses the 20-second timeout. + with pymongo.timeout(5): + coll.find_one({"name": "Nunu"}) # Uses the 5-second timeout. + +pymongo.timeout is thread safe +------------------------------ + +:meth:`~pymongo.timeout` is thread safe; the timeout only applies to current +thread and multiple threads can configure different timeouts in parallel. + +pymongo.timeout is asyncio safe +------------------------------- + +:meth:`~pymongo.timeout` is asyncio safe; the timeout only applies to current +Task and multiple Tasks can configure different timeouts concurrently. +:meth:`~pymongo.timeout` can be used identically in +`Motor `_, for example:: + + import motor.motor_asyncio + client = motor.motor_asyncio.AsyncIOMotorClient() + coll = client.test.test + with pymongo.timeout(10): + await coll.insert_one({"name": "Nunu"}) + await coll.find_one({"name": "Nunu"}) + +Troubleshooting +--------------- + +There are many timeout errors that can be raised depending on when the timeout +expires. In code, these can be identified with the :attr:`pymongo.errors.PyMongoError.timeout` +property. Some specific timeout errors examples are described below. + +When the client was unable to find an available server to run the operation +within the given timeout:: + + pymongo.errors.ServerSelectionTimeoutError: No servers found yet, Timeout: -0.00202266700216569s, Topology Description: ]> + +When either the client was unable to establish a connection within the given +timeout or the operation was sent but the server was not able to respond in time:: + + pymongo.errors.NetworkTimeout: localhost:27017: timed out + +When the server cancelled the operation because it exceeded the given timeout. +Note that the operation may have partially completed on the server (depending +on the operation):: + + pymongo.errors.ExecutionTimeout: operation exceeded time limit, full error: {'ok': 0.0, 'errmsg': 'operation exceeded time limit', 'code': 50, 'codeName': 'MaxTimeMSExpired'} + +When the client cancelled the operation because it was not possible to complete +within the given timeout:: + + pymongo.errors.ExecutionTimeout: operation would exceed time limit, remaining timeout:0.00196 <= network round trip time:0.00427 + +When the client attempted a write operation but the server could not replicate +that write (according to the configured write concern) within the given timeout:: + + pymongo.errors.WTimeoutError: operation exceeded time limit, full error: {'code': 50, 'codeName': 'MaxTimeMSExpired', 'errmsg': 'operation exceeded time limit', 'errInfo': {'writeConcern': {'w': 1, 'wtimeout': 0}}} + +The same error as above but for :meth:`~pymongo.collection.Collection.insert_many` +or :meth:`~pymongo.collection.Collection.bulk_write`:: + + pymongo.errors.BulkWriteError: batch op errors occurred, full error: {'writeErrors': [], 'writeConcernErrors': [{'code': 50, 'codeName': 'MaxTimeMSExpired', 'errmsg': 'operation exceeded time limit', 'errInfo': {'writeConcern': {'w': 1, 'wtimeout': 0}}}], 'nInserted': 2, 'nUpserted': 0, 'nMatched': 0, 'nModified': 0, 'nRemoved': 0, 'upserted': []} diff --git a/doc/examples/tls.rst b/doc/examples/tls.rst index 4454a1e4b7..9241ac23e7 100644 --- a/doc/examples/tls.rst +++ b/doc/examples/tls.rst @@ -3,35 +3,9 @@ TLS/SSL and PyMongo PyMongo supports connecting to MongoDB over TLS/SSL. This guide covers the configuration options supported by PyMongo. See `the server documentation -`_ to configure +`_ to configure MongoDB. -Dependencies -............ - -For connections using TLS/SSL, PyMongo may require third party dependencies as -determined by your version of Python. With PyMongo 3.3+, you can install -PyMongo 3.3+ and any TLS/SSL-related dependencies using the following pip -command:: - - $ python -m pip install pymongo[tls] - -Earlier versions of PyMongo require you to manually install the dependencies -listed below. - -Python 2.x -`````````` -The `ipaddress`_ module is required on all platforms. - -When using CPython < 2.7.9 or PyPy < 2.5.1: - -- On Windows, the `wincertstore`_ module is required. -- On all other platforms, the `certifi`_ module is required. - -.. _ipaddress: https://pypi.python.org/pypi/ipaddress -.. _wincertstore: https://pypi.python.org/pypi/wincertstore -.. _certifi: https://pypi.python.org/pypi/certifi - .. warning:: Industry best practices recommend, and some regulations require, the use of TLS 1.1 or newer. Though no application changes are required for PyMongo to make use of the newest protocols, some operating systems or @@ -58,7 +32,7 @@ When using CPython < 2.7.9 or PyPy < 2.5.1: You can read more about TLS versions and their security implications here: - ``_ + ``_ .. _python.org: https://www.python.org/downloads/ .. _homebrew: https://brew.sh/ @@ -69,14 +43,14 @@ Basic configuration ................... In many cases connecting to MongoDB over TLS/SSL requires nothing more than -passing ``ssl=True`` as a keyword argument to +passing ``tls=True`` as a keyword argument to :class:`~pymongo.mongo_client.MongoClient`:: - >>> client = pymongo.MongoClient('example.com', ssl=True) + >>> client = pymongo.MongoClient('example.com', tls=True) -Or passing ``ssl=true`` in the URI:: +Or passing ``tls=true`` in the URI:: - >>> client = pymongo.MongoClient('mongodb://example.com/?ssl=true') + >>> client = pymongo.MongoClient('mongodb://example.com/?tls=true') This configures PyMongo to connect to the server using TLS, verify the server's certificate and verify that the host you are attempting to connect to is listed @@ -86,95 +60,129 @@ Certificate verification policy ............................... By default, PyMongo is configured to require a certificate from the server when -TLS is enabled. This is configurable using the `ssl_cert_reqs` option. To -disable this requirement pass ``ssl.CERT_NONE`` as a keyword parameter:: +TLS is enabled. This is configurable using the ``tlsAllowInvalidCertificates`` +option. To disable this requirement pass ``tlsAllowInvalidCertificates=True`` +as a keyword parameter:: - >>> import ssl >>> client = pymongo.MongoClient('example.com', - ... ssl=True, - ... ssl_cert_reqs=ssl.CERT_NONE) + ... tls=True, + ... tlsAllowInvalidCertificates=True) Or, in the URI:: - >>> uri = 'mongodb://example.com/?ssl=true&ssl_cert_reqs=CERT_NONE' + >>> uri = 'mongodb://example.com/?tls=true&tlsAllowInvalidCertificates=true' >>> client = pymongo.MongoClient(uri) Specifying a CA file .................... In some cases you may want to configure PyMongo to use a specific set of CA -certificates. This is most often the case when using "self-signed" server -certificates. The `ssl_ca_certs` option takes a path to a CA file. It can be +certificates. This is most often the case when you are acting as your own +certificate authority rather than using server certificates signed by a well +known authority. The ``tlsCAFile`` option takes a path to a CA file. It can be passed as a keyword argument:: >>> client = pymongo.MongoClient('example.com', - ... ssl=True, - ... ssl_ca_certs='/path/to/ca.pem') + ... tls=True, + ... tlsCAFile='/path/to/ca.pem') Or, in the URI:: - >>> uri = 'mongodb://example.com/?ssl=true&ssl_ca_certs=/path/to/ca.pem' + >>> uri = 'mongodb://example.com/?tls=true&tlsCAFile=/path/to/ca.pem' >>> client = pymongo.MongoClient(uri) Specifying a certificate revocation list ........................................ -Python 2.7.9+ (pypy 2.5.1+) and 3.4+ provide support for certificate revocation -lists. The `ssl_crlfile` option takes a path to a CRL file. It can be passed as -a keyword argument:: +The ``tlsCRLFile`` option takes a path to a CRL file. It can be passed +as a keyword argument:: >>> client = pymongo.MongoClient('example.com', - ... ssl=True, - ... ssl_crlfile='/path/to/crl.pem') + ... tls=True, + ... tlsCRLFile='/path/to/crl.pem') Or, in the URI:: - >>> uri = 'mongodb://example.com/?ssl=true&ssl_crlfile=/path/to/crl.pem' + >>> uri = 'mongodb://example.com/?tls=true&tlsCRLFile=/path/to/crl.pem' >>> client = pymongo.MongoClient(uri) +.. note:: Certificate revocation lists and :ref:`OCSP` cannot be used together. + Client certificates ................... PyMongo can be configured to present a client certificate using the -`ssl_certfile` option:: +``tlsCertificateKeyFile`` option:: >>> client = pymongo.MongoClient('example.com', - ... ssl=True, - ... ssl_certfile='/path/to/client.pem') + ... tls=True, + ... tlsCertificateKeyFile='/path/to/client.pem') -If the private key for the client certificate is stored in a separate file use -the `ssl_keyfile` option:: +If the private key for the client certificate is stored in a separate file, +it should be concatenated with the certificate file. For example, to +concatenate a PEM-formatted certificate file ``cert.pem`` and a PEM-formatted +keyfile ``key.pem`` into a single file ``combined.pem``, on Unix systems, +users can run:: - >>> client = pymongo.MongoClient('example.com', - ... ssl=True, - ... ssl_certfile='/path/to/client.pem', - ... ssl_keyfile='/path/to/key.pem') + $ cat key.pem cert.pem > combined.pem -Python 2.7.9+ (pypy 2.5.1+) and 3.3+ support providing a password or passphrase -to decrypt encrypted private keys. Use the `ssl_pem_passphrase` option:: +PyMongo can be configured with the concatenated certificate keyfile using the +``tlsCertificateKeyFile`` option:: >>> client = pymongo.MongoClient('example.com', - ... ssl=True, - ... ssl_certfile='/path/to/client.pem', - ... ssl_keyfile='/path/to/key.pem', - ... ssl_pem_passphrase=) + ... tls=True, + ... tlsCertificateKeyFile='/path/to/combined.pem') +If the private key contained in the certificate keyfile is encrypted, users +can provide a password or passphrase to decrypt the encrypted private keys +using the ``tlsCertificateKeyFilePassword`` option:: + + >>> client = pymongo.MongoClient('example.com', + ... tls=True, + ... tlsCertificateKeyFile='/path/to/combined.pem', + ... tlsCertificateKeyFilePassword=) These options can also be passed as part of the MongoDB URI. +.. _OCSP: + +OCSP +.... + +Starting with PyMongo 3.11, if PyMongo was installed with the "ocsp" extra:: + + python -m pip install pymongo[ocsp] + +certificate revocation checking is enabled by way of `OCSP (Online Certification +Status Protocol) `_. +MongoDB 4.4+ `staples OCSP responses `_ +to the TLS handshake which PyMongo will verify, failing the TLS handshake if +the stapled OCSP response is invalid or indicates that the peer certificate is +revoked. + +When connecting to a server version older than 4.4, or when a 4.4+ version of +MongoDB does not staple an OCSP response, PyMongo will attempt to connect +directly to an OCSP endpoint if the peer certificate specified one. The TLS +handshake will only fail in this case if the response indicates that the +certificate is revoked. Invalid or malformed responses will be ignored, +favoring availability over maximum security. + +.. _TLSErrors: + Troubleshooting TLS Errors .......................... -TLS errors often fall into two categories, certificate verification failure or -protocol version mismatch. An error message similar to the following means that -OpenSSL was not able to verify the server's certificate:: +TLS errors often fall into three categories - certificate verification failure, +protocol version mismatch or certificate revocation checking failure. An error +message similar to the following means that OpenSSL was not able to verify the +server's certificate:: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed This often occurs because OpenSSL does not have access to the system's root certificates or the certificates are out of date. Linux users should ensure that they have the latest root certificate updates installed from -their Linux vendor. macOS users using Python 3.6.0 or newer downloaded +their Linux vendor. macOS users using Python 3.7 or newer downloaded from python.org `may have to run a script included with python `_ to install root certificates:: @@ -200,3 +208,27 @@ TLS protocols be disabled in some MongoDB deployments. Some deployments may disable TLS 1.0, others may disable TLS 1.0 and TLS 1.1. See the warning earlier in this document for troubleshooting steps and solutions. +An error message similar to the following message means that certificate +revocation checking failed:: + + [('SSL routines', 'tls_process_initial_server_flight', 'invalid status response')] + +See :ref:`OCSP` for more details. + +Python 3.10+ incompatibilities with TLS/SSL on MongoDB <= 4.0 +............................................................. + +Note that `changes made to the ssl module in Python 3.10+ +`_ may cause incompatibilities +with MongoDB <= 4.0. The following are some example errors that may occur with this +combination:: + + SSL handshake failed: localhost:27017: [SSL: SSLV3_ALERT_HANDSHAKE_FAILURE] sslv3 alert handshake failure (_ssl.c:997) + SSL handshake failed: localhost:27017: EOF occurred in violation of protocol (_ssl.c:997) + +The MongoDB server logs may show the following error:: + + 2021-06-30T21:22:44.917+0100 E NETWORK [conn16] SSL: error:1408A0C1:SSL routines:ssl3_get_client_hello:no shared cipher + +To resolve this issue, use Python <=3.10, upgrade to MongoDB 4.2+, or install +pymongo with the :ref:`OCSP` extra which relies on PyOpenSSL. diff --git a/doc/examples/type_hints.rst b/doc/examples/type_hints.rst new file mode 100644 index 0000000000..375ad14330 --- /dev/null +++ b/doc/examples/type_hints.rst @@ -0,0 +1,332 @@ + +.. _type_hints-example: + +Type Hints +========== + +As of version 4.1, PyMongo ships with `type hints`_. With type hints, Python +type checkers can easily find bugs before they reveal themselves in your code. + +If your IDE is configured to use type hints, +it can suggest more appropriate completions and highlight errors in your code. +Some examples include `PyCharm`_, `Sublime Text`_, and `Visual Studio Code`_. + +You can also use the `mypy`_ tool from your command line or in Continuous Integration tests. + +All of the public APIs in PyMongo are fully type hinted, and +several of them support generic parameters for the +type of document object returned when decoding BSON documents. + +Due to `limitations in mypy`_, the default +values for generic document types are not yet provided (they will eventually be ``Dict[str, any]``). + +For a larger set of examples that use types, see the PyMongo `test_typing module`_. + +If you would like to opt out of using the provided types, add the following to +your `mypy config`_: :: + + [mypy-pymongo] + follow_imports = False + + +Basic Usage +----------- + +Note that a type for :class:`~pymongo.mongo_client.MongoClient` must be specified. Here we use the +default, unspecified document type: + +.. doctest:: + + >>> from pymongo import MongoClient + >>> client: MongoClient = MongoClient() + >>> collection = client.test.test + >>> inserted = collection.insert_one({"x": 1, "tags": ["dog", "cat"]}) + >>> retrieved = collection.find_one({"x": 1}) + >>> assert isinstance(retrieved, dict) + +For a more accurate typing for document type you can use: + +.. doctest:: + + >>> from typing import Any, Dict + >>> from pymongo import MongoClient + >>> client: MongoClient[Dict[str, Any]] = MongoClient() + >>> collection = client.test.test + >>> inserted = collection.insert_one({"x": 1, "tags": ["dog", "cat"]}) + >>> retrieved = collection.find_one({"x": 1}) + >>> assert isinstance(retrieved, dict) + +Typed Client +------------ + +:class:`~pymongo.mongo_client.MongoClient` is generic on the document type used to decode BSON documents. + +You can specify a :class:`~bson.raw_bson.RawBSONDocument` document type: + +.. doctest:: + + >>> from pymongo import MongoClient + >>> from bson.raw_bson import RawBSONDocument + >>> client = MongoClient(document_class=RawBSONDocument) + >>> collection = client.test.test + >>> inserted = collection.insert_one({"x": 1, "tags": ["dog", "cat"]}) + >>> result = collection.find_one({"x": 1}) + >>> assert isinstance(result, RawBSONDocument) + +Subclasses of :py:class:`collections.abc.Mapping` can also be used, such as :class:`~bson.son.SON`: + +.. doctest:: + + >>> from bson import SON + >>> from pymongo import MongoClient + >>> client = MongoClient(document_class=SON[str, int]) + >>> collection = client.test.test + >>> inserted = collection.insert_one({"x": 1, "y": 2}) + >>> result = collection.find_one({"x": 1}) + >>> assert result is not None + >>> assert result["x"] == 1 + +Note that when using :class:`~bson.son.SON`, the key and value types must be given, e.g. ``SON[str, Any]``. + + +Typed Collection +---------------- + +You can use :py:class:`~typing.TypedDict` (Python 3.8+) when using a well-defined schema for the data in a +:class:`~pymongo.collection.Collection`. Note that all `schema validation`_ for inserts and updates is done on the server. +These methods automatically add an "_id" field. + +.. doctest:: + :pyversion: >= 3.8 + + >>> from typing import TypedDict + >>> from pymongo import MongoClient + >>> from pymongo.collection import Collection + >>> class Movie(TypedDict): + ... name: str + ... year: int + ... + >>> client: MongoClient = MongoClient() + >>> collection: Collection[Movie] = client.test.test + >>> inserted = collection.insert_one(Movie(name="Jurassic Park", year=1993)) + >>> result = collection.find_one({"name": "Jurassic Park"}) + >>> assert result is not None + >>> assert result["year"] == 1993 + >>> # This will raise a type-checking error, despite being present, because it is added by PyMongo. + >>> assert result["_id"] # type:ignore[typeddict-item] + +This same typing scheme works for all of the insert methods (:meth:`~pymongo.collection.Collection.insert_one`, +:meth:`~pymongo.collection.Collection.insert_many`, and :meth:`~pymongo.collection.Collection.bulk_write`). +For ``bulk_write`` both :class:`~pymongo.operations.InsertOne` and :class:`~pymongo.operations.ReplaceOne` operators are generic. + +.. doctest:: + :pyversion: >= 3.8 + + >>> from typing import TypedDict + >>> from pymongo import MongoClient + >>> from pymongo.operations import InsertOne + >>> from pymongo.collection import Collection + >>> client: MongoClient = MongoClient() + >>> collection: Collection[Movie] = client.test.test + >>> inserted = collection.bulk_write([InsertOne(Movie(name="Jurassic Park", year=1993))]) + >>> result = collection.find_one({"name": "Jurassic Park"}) + >>> assert result is not None + >>> assert result["year"] == 1993 + >>> # This will raise a type-checking error, despite being present, because it is added by PyMongo. + >>> assert result["_id"] # type:ignore[typeddict-item] + +Modeling Document Types with TypedDict +-------------------------------------- + +You can use :py:class:`~typing.TypedDict` (Python 3.8+) to model structured data. +As noted above, PyMongo will automatically add an ``_id`` field if it is not present. This also applies to TypedDict. +There are three approaches to this: + + 1. Do not specify ``_id`` at all. It will be inserted automatically, and can be retrieved at run-time, but will yield a type-checking error unless explicitly ignored. + + 2. Specify ``_id`` explicitly. This will mean that every instance of your custom TypedDict class will have to pass a value for ``_id``. + + 3. Make use of :py:class:`~typing.NotRequired`. This has the flexibility of option 1, but with the ability to access the ``_id`` field without causing a type-checking error. + +Note: to use :py:class:`~typing.TypedDict` and :py:class:`~typing.NotRequired` in earlier versions of Python (<3.8, <3.11), use the ``typing_extensions`` package. + +.. doctest:: typed-dict-example + :pyversion: >= 3.11 + + >>> from typing import TypedDict, NotRequired + >>> from pymongo import MongoClient + >>> from pymongo.collection import Collection + >>> from bson import ObjectId + >>> class Movie(TypedDict): + ... name: str + ... year: int + ... + >>> class ExplicitMovie(TypedDict): + ... _id: ObjectId + ... name: str + ... year: int + ... + >>> class NotRequiredMovie(TypedDict): + ... _id: NotRequired[ObjectId] + ... name: str + ... year: int + ... + >>> client: MongoClient = MongoClient() + >>> collection: Collection[Movie] = client.test.test + >>> inserted = collection.insert_one(Movie(name="Jurassic Park", year=1993)) + >>> result = collection.find_one({"name": "Jurassic Park"}) + >>> assert result is not None + >>> # This will yield a type-checking error, despite being present, because it is added by PyMongo. + >>> assert result["_id"] # type:ignore[typeddict-item] + >>> collection: Collection[ExplicitMovie] = client.test.test + >>> # Note that the _id keyword argument must be supplied + >>> inserted = collection.insert_one( + ... ExplicitMovie(_id=ObjectId(), name="Jurassic Park", year=1993) + ... ) + >>> result = collection.find_one({"name": "Jurassic Park"}) + >>> assert result is not None + >>> # This will not raise a type-checking error. + >>> assert result["_id"] + >>> collection: Collection[NotRequiredMovie] = client.test.test + >>> # Note the lack of _id, similar to the first example + >>> inserted = collection.insert_one(NotRequiredMovie(name="Jurassic Park", year=1993)) + >>> result = collection.find_one({"name": "Jurassic Park"}) + >>> assert result is not None + >>> # This will not raise a type-checking error, despite not being provided explicitly. + >>> assert result["_id"] + + +Typed Database +-------------- + +While less common, you could specify that the documents in an entire database +match a well-defined schema using :py:class:`~typing.TypedDict` (Python 3.8+). + + +.. doctest:: + + >>> from typing import TypedDict + >>> from pymongo import MongoClient + >>> from pymongo.database import Database + >>> class Movie(TypedDict): + ... name: str + ... year: int + ... + >>> client: MongoClient = MongoClient() + >>> db: Database[Movie] = client.test + >>> collection = db.test + >>> inserted = collection.insert_one({"name": "Jurassic Park", "year": 1993}) + >>> result = collection.find_one({"name": "Jurassic Park"}) + >>> assert result is not None + >>> assert result["year"] == 1993 + +Typed Command +------------- +When using the :meth:`~pymongo.database.Database.command`, you can specify the document type by providing a custom :class:`~bson.codec_options.CodecOptions`: + +.. doctest:: + + >>> from pymongo import MongoClient + >>> from bson.raw_bson import RawBSONDocument + >>> from bson import CodecOptions + >>> client: MongoClient = MongoClient() + >>> options = CodecOptions(RawBSONDocument) + >>> result = client.admin.command("ping", codec_options=options) + >>> assert isinstance(result, RawBSONDocument) + +Custom :py:class:`collections.abc.Mapping` subclasses and :py:class:`~typing.TypedDict` (Python 3.8+) are also supported. +For :py:class:`~typing.TypedDict`, use the form: ``options: CodecOptions[MyTypedDict] = CodecOptions(...)``. + +Typed BSON Decoding +------------------- +You can specify the document type returned by :mod:`bson` decoding functions by providing :class:`~bson.codec_options.CodecOptions`: + +.. doctest:: + + >>> from typing import Any, Dict + >>> from bson import CodecOptions, encode, decode + >>> class MyDict(Dict[str, Any]): + ... def foo(self): + ... return "bar" + ... + >>> options = CodecOptions(document_class=MyDict) + >>> doc = {"x": 1, "y": 2} + >>> bsonbytes = encode(doc, codec_options=options) + >>> rt_document = decode(bsonbytes, codec_options=options) + >>> assert rt_document.foo() == "bar" + +:class:`~bson.raw_bson.RawBSONDocument` and :py:class:`~typing.TypedDict` (Python 3.8+) are also supported. +For :py:class:`~typing.TypedDict`, use the form: ``options: CodecOptions[MyTypedDict] = CodecOptions(...)``. + + +Troubleshooting +--------------- + +Client Type Annotation +~~~~~~~~~~~~~~~~~~~~~~ +If you forget to add a type annotation for a :class:`~pymongo.mongo_client.MongoClient` object you may get the following ``mypy`` error:: + + from pymongo import MongoClient + client = MongoClient() # error: Need type annotation for "client" + +The solution is to annotate the type as ``client: MongoClient`` or ``client: MongoClient[Dict[str, Any]]``. See `Basic Usage`_. + +Incompatible Types +~~~~~~~~~~~~~~~~~~ +If you use the generic form of :class:`~pymongo.mongo_client.MongoClient` you +may encounter a ``mypy`` error like:: + + from pymongo import MongoClient + + client: MongoClient = MongoClient() + client.test.test.insert_many( + {"a": 1} + ) # error: Dict entry 0 has incompatible type "str": "int"; + # expected "Mapping[str, Any]": "int" + + +The solution is to use ``client: MongoClient[Dict[str, Any]]`` as used in +`Basic Usage`_ . + +Actual Type Errors +~~~~~~~~~~~~~~~~~~ + +Other times ``mypy`` will catch an actual error, like the following code:: + + from pymongo import MongoClient + from typing import Mapping + client: MongoClient = MongoClient() + client.test.test.insert_one( + [{}] + ) # error: Argument 1 to "insert_one" of "Collection" has + # incompatible type "List[Dict[, ]]"; + # expected "Mapping[str, Any]" + +In this case the solution is to use ``insert_one({})``, passing a document instead of a list. + +Another example is trying to set a value on a :class:`~bson.raw_bson.RawBSONDocument`, which is read-only.:: + + from bson.raw_bson import RawBSONDocument + from pymongo import MongoClient + + client = MongoClient(document_class=RawBSONDocument) + coll = client.test.test + doc = {"my": "doc"} + coll.insert_one(doc) + retrieved = coll.find_one({"_id": doc["_id"]}) + assert retrieved is not None + assert len(retrieved.raw) > 0 + retrieved[ + "foo" + ] = "bar" # error: Unsupported target for indexed assignment + # ("RawBSONDocument") [index] + +.. _PyCharm: https://www.jetbrains.com/help/pycharm/type-hinting-in-product.html +.. _Visual Studio Code: https://code.visualstudio.com/docs/languages/python +.. _Sublime Text: https://github.com/sublimelsp/LSP-pyright +.. _type hints: https://docs.python.org/3/library/typing.html +.. _mypy: https://mypy.readthedocs.io/en/stable/cheat_sheet_py3.html +.. _limitations in mypy: https://github.com/python/mypy/issues/3737 +.. _mypy config: https://mypy.readthedocs.io/en/stable/config_file.html +.. _test_typing module: https://github.com/mongodb/mongo-python-driver/blob/master/test/test_typing.py +.. _schema validation: https://www.mongodb.com/docs/manual/core/schema-validation/#when-to-use-schema-validation diff --git a/doc/examples/uuid.rst b/doc/examples/uuid.rst new file mode 100644 index 0000000000..90ec71ebe2 --- /dev/null +++ b/doc/examples/uuid.rst @@ -0,0 +1,512 @@ + +.. _handling-uuid-data-example: + +Handling UUID Data +================== + +PyMongo ships with built-in support for dealing with UUID types. +It is straightforward to store native :class:`uuid.UUID` objects +to MongoDB and retrieve them as native :class:`uuid.UUID` objects:: + + from pymongo import MongoClient + from bson.binary import UuidRepresentation + from uuid import uuid4 + + # use the 'standard' representation for cross-language compatibility. + client = MongoClient(uuidRepresentation='standard') + collection = client.get_database('uuid_db').get_collection('uuid_coll') + + # remove all documents from collection + collection.delete_many({}) + + # create a native uuid object + uuid_obj = uuid4() + + # save the native uuid object to MongoDB + collection.insert_one({'uuid': uuid_obj}) + + # retrieve the stored uuid object from MongoDB + document = collection.find_one({}) + + # check that the retrieved UUID matches the inserted UUID + assert document['uuid'] == uuid_obj + +Native :class:`uuid.UUID` objects can also be used as part of MongoDB +queries:: + + document = collection.find({'uuid': uuid_obj}) + assert document['uuid'] == uuid_obj + +The above examples illustrate the simplest of use-cases - one where the +UUID is generated by, and used in the same application. However, +the situation can be significantly more complex when dealing with a MongoDB +deployment that contains UUIDs created by other drivers as the Java and CSharp +drivers have historically encoded UUIDs using a byte-order that is different +from the one used by PyMongo. Applications that require interoperability across +these drivers must specify the appropriate +:class:`~bson.binary.UuidRepresentation`. + +In the following sections, we describe how drivers have historically differed +in their encoding of UUIDs, and how applications can use the +:class:`~bson.binary.UuidRepresentation` configuration option to maintain +cross-language compatibility. + +.. attention:: New applications that do not share a MongoDB deployment with + any other application and that have never stored UUIDs in MongoDB + should use the ``standard`` UUID representation for cross-language + compatibility. See :ref:`configuring-uuid-representation` for details + on how to configure the :class:`~bson.binary.UuidRepresentation`. + +.. _example-legacy-uuid: + +Legacy Handling of UUID Data +---------------------------- + +Historically, MongoDB Drivers have used different byte-ordering +while serializing UUID types to :class:`~bson.binary.Binary`. +Consider, for instance, a UUID with the following canonical textual +representation:: + + 00112233-4455-6677-8899-aabbccddeeff + +This UUID would historically be serialized by the Python driver as:: + + 00112233-4455-6677-8899-aabbccddeeff + +The same UUID would historically be serialized by the C# driver as:: + + 33221100-5544-7766-8899-aabbccddeeff + +Finally, the same UUID would historically be serialized by the Java driver as:: + + 77665544-3322-1100-ffee-ddccbbaa9988 + +.. note:: For in-depth information about the the byte-order historically + used by different drivers, see the `Handling of Native UUID Types + Specification + `_. + +This difference in the byte-order of UUIDs encoded by different drivers can +result in highly unintuitive behavior in some scenarios. We detail two such +scenarios in the next sections. + +Scenario 1: Applications Share a MongoDB Deployment +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Consider the following situation: + +* Application ``C`` written in C# generates a UUID and uses it as the ``_id`` + of a document that it proceeds to insert into the ``uuid_test`` collection of + the ``example_db`` database. Let's assume that the canonical textual + representation of the generated UUID is:: + + 00112233-4455-6677-8899-aabbccddeeff + +* Application ``P`` written in Python attempts to ``find`` the document + written by application ``C`` in the following manner:: + + from uuid import UUID + collection = client.example_db.uuid_test + result = collection.find_one({'_id': UUID('00112233-4455-6677-8899-aabbccddeeff')}) + + In this instance, ``result`` will never be the document that + was inserted by application ``C`` in the previous step. This is because of + the different byte-order used by the C# driver for representing UUIDs as + BSON Binary. The following query, on the other hand, will successfully find + this document:: + + result = collection.find_one({'_id': UUID('33221100-5544-7766-8899-aabbccddeeff')}) + +This example demonstrates how the differing byte-order used by different +drivers can hamper interoperability. To workaround this problem, users should +configure their ``MongoClient`` with the appropriate +:class:`~bson.binary.UuidRepresentation` (in this case, ``client`` in application +``P`` can be configured to use the +:data:`~bson.binary.UuidRepresentation.CSHARP_LEGACY` representation to +avoid the unintuitive behavior) as described in +:ref:`configuring-uuid-representation`. + +Scenario 2: Round-Tripping UUIDs +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In the following examples, we see how using a misconfigured +:class:`~bson.binary.UuidRepresentation` can cause an application +to inadvertently change the :class:`~bson.binary.Binary` subtype, and in some +cases, the bytes of the :class:`~bson.binary.Binary` field itself when +round-tripping documents containing UUIDs. + +Consider the following situation:: + + from bson.codec_options import CodecOptions, DEFAULT_CODEC_OPTIONS + from bson.binary import Binary, UuidRepresentation + from uuid import uuid4 + + # Using UuidRepresentation.PYTHON_LEGACY stores a Binary subtype-3 UUID + python_opts = CodecOptions(uuid_representation=UuidRepresentation.PYTHON_LEGACY) + input_uuid = uuid4() + collection = client.testdb.get_collection('test', codec_options=python_opts) + collection.insert_one({'_id': 'foo', 'uuid': input_uuid}) + assert collection.find_one({'uuid': Binary(input_uuid.bytes, 3)})['_id'] == 'foo' + + # Retrieving this document using UuidRepresentation.STANDARD returns a Binary instance + std_opts = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + std_collection = client.testdb.get_collection('test', codec_options=std_opts) + doc = std_collection.find_one({'_id': 'foo'}) + assert isinstance(doc['uuid'], Binary) + + # Round-tripping the retrieved document yields the exact same document + std_collection.replace_one({'_id': 'foo'}, doc) + round_tripped_doc = collection.find_one({'uuid': Binary(input_uuid.bytes, 3)}) + assert doc == round_tripped_doc + + +In this example, round-tripping the document using the incorrect +:class:`~bson.binary.UuidRepresentation` (``STANDARD`` instead of +``PYTHON_LEGACY``) changes the :class:`~bson.binary.Binary` subtype as a +side-effect. **Note that this can also happen when the situation is reversed - +i.e. when the original document is written using ``STANDARD`` representation +and then round-tripped using the ``PYTHON_LEGACY`` representation.** + +In the next example, we see the consequences of incorrectly using a +representation that modifies byte-order (``CSHARP_LEGACY`` or ``JAVA_LEGACY``) +when round-tripping documents:: + + from bson.codec_options import CodecOptions, DEFAULT_CODEC_OPTIONS + from bson.binary import Binary, UuidRepresentation + from uuid import uuid4 + + # Using UuidRepresentation.STANDARD stores a Binary subtype-4 UUID + std_opts = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + input_uuid = uuid4() + collection = client.testdb.get_collection('test', codec_options=std_opts) + collection.insert_one({'_id': 'baz', 'uuid': input_uuid}) + assert collection.find_one({'uuid': Binary(input_uuid.bytes, 4)})['_id'] == 'baz' + + # Retrieving this document using UuidRepresentation.JAVA_LEGACY returns a native UUID + # without modifying the UUID byte-order + java_opts = CodecOptions(uuid_representation=UuidRepresentation.JAVA_LEGACY) + java_collection = client.testdb.get_collection('test', codec_options=java_opts) + doc = java_collection.find_one({'_id': 'baz'}) + assert doc['uuid'] == input_uuid + + # Round-tripping the retrieved document silently changes the Binary bytes and subtype + java_collection.replace_one({'_id': 'baz'}, doc) + assert collection.find_one({'uuid': Binary(input_uuid.bytes, 3)}) is None + assert collection.find_one({'uuid': Binary(input_uuid.bytes, 4)}) is None + round_tripped_doc = collection.find_one({'_id': 'baz'}) + assert round_tripped_doc['uuid'] == Binary(input_uuid.bytes, 3).as_uuid(UuidRepresentation.JAVA_LEGACY) + + +In this case, using the incorrect :class:`~bson.binary.UuidRepresentation` +(``JAVA_LEGACY`` instead of ``STANDARD``) changes the +:class:`~bson.binary.Binary` bytes and subtype as a side-effect. +**Note that this happens when any representation that +manipulates byte-order (``CSHARP_LEGACY`` or ``JAVA_LEGACY``) is incorrectly +used to round-trip UUIDs written with ``STANDARD``. When the situation is +reversed - i.e. when the original document is written using ``CSHARP_LEGACY`` +or ``JAVA_LEGACY`` and then round-tripped using ``STANDARD`` - +only the :class:`~bson.binary.Binary` subtype is changed.** + +.. note:: Starting in PyMongo 4.0, these issue will be resolved as + the ``STANDARD`` representation will decode Binary subtype 3 fields as + :class:`~bson.binary.Binary` objects of subtype 3 (instead of + :class:`uuid.UUID`), and each of the ``LEGACY_*`` representations will + decode Binary subtype 4 fields to :class:`~bson.binary.Binary` objects of + subtype 4 (instead of :class:`uuid.UUID`). + +.. _configuring-uuid-representation: + +Configuring a UUID Representation +--------------------------------- + +Users can workaround the problems described above by configuring their +applications with the appropriate :class:`~bson.binary.UuidRepresentation`. +Configuring the representation modifies PyMongo's behavior while +encoding :class:`uuid.UUID` objects to BSON and decoding +Binary subtype 3 and 4 fields from BSON. + +Applications can set the UUID representation in one of the following ways: + +#. At the ``MongoClient`` level using the ``uuidRepresentation`` URI option, + e.g.:: + + client = MongoClient("mongodb://a:27107/?uuidRepresentation=standard") + + Valid values are: + + .. list-table:: + :header-rows: 1 + + * - Value + - UUID Representation + + * - ``unspecified`` + - :ref:`unspecified-representation-details` + + * - ``standard`` + - :ref:`standard-representation-details` + + * - ``pythonLegacy`` + - :ref:`python-legacy-representation-details` + + * - ``javaLegacy`` + - :ref:`java-legacy-representation-details` + + * - ``csharpLegacy`` + - :ref:`csharp-legacy-representation-details` + +#. At the ``MongoClient`` level using the ``uuidRepresentation`` kwarg + option, e.g.:: + + from bson.binary import UuidRepresentation + client = MongoClient(uuidRepresentation=UuidRepresentation.STANDARD) + +#. At the ``Database`` or ``Collection`` level by supplying a suitable + :class:`~bson.codec_options.CodecOptions` instance, e.g.:: + + from bson.codec_options import CodecOptions + csharp_opts = CodecOptions(uuid_representation=UuidRepresentation.CSHARP_LEGACY) + java_opts = CodecOptions(uuid_representation=UuidRepresentation.JAVA_LEGACY) + + # Get database/collection from client with csharpLegacy UUID representation + csharp_database = client.get_database('csharp_db', codec_options=csharp_opts) + csharp_collection = client.testdb.get_collection('csharp_coll', codec_options=csharp_opts) + + # Get database/collection from existing database/collection with javaLegacy UUID representation + java_database = csharp_database.with_options(codec_options=java_opts) + java_collection = csharp_collection.with_options(codec_options=java_opts) + +Supported UUID Representations +------------------------------ + +.. list-table:: + :header-rows: 1 + + * - UUID Representation + - Default? + - Encode :class:`uuid.UUID` to + - Decode :class:`~bson.binary.Binary` subtype 4 to + - Decode :class:`~bson.binary.Binary` subtype 3 to + + * - :ref:`standard-representation-details` + - No + - :class:`~bson.binary.Binary` subtype 4 + - :class:`uuid.UUID` + - :class:`~bson.binary.Binary` subtype 3 + + * - :ref:`unspecified-representation-details` + - Yes, in PyMongo>=4 + - Raise :exc:`ValueError` + - :class:`~bson.binary.Binary` subtype 4 + - :class:`~bson.binary.Binary` subtype 3 + + * - :ref:`python-legacy-representation-details` + - No + - :class:`~bson.binary.Binary` subtype 3 with standard byte-order + - :class:`~bson.binary.Binary` subtype 4 + - :class:`uuid.UUID` + + * - :ref:`java-legacy-representation-details` + - No + - :class:`~bson.binary.Binary` subtype 3 with Java legacy byte-order + - :class:`~bson.binary.Binary` subtype 4 + - :class:`uuid.UUID` + + * - :ref:`csharp-legacy-representation-details` + - No + - :class:`~bson.binary.Binary` subtype 3 with C# legacy byte-order + - :class:`~bson.binary.Binary` subtype 4 + - :class:`uuid.UUID` + +We now detail the behavior and use-case for each supported UUID +representation. + +.. _unspecified-representation-details: + +``UNSPECIFIED`` +^^^^^^^^^^^^^^^ + +.. attention:: Starting in PyMongo 4.0, + :data:`~bson.binary.UuidRepresentation.UNSPECIFIED` is the default + UUID representation used by PyMongo. + +The :data:`~bson.binary.UuidRepresentation.UNSPECIFIED` representation +prevents the incorrect interpretation of UUID bytes by stopping short of +automatically converting UUID fields in BSON to native UUID types. Decoding +a UUID when using this representation returns a :class:`~bson.binary.Binary` +object instead. If required, users can coerce the decoded +:class:`~bson.binary.Binary` objects into native UUIDs using the +:meth:`~bson.binary.Binary.as_uuid` method and specifying the appropriate +representation format. The following example shows +what this might look like for a UUID stored by the C# driver:: + + from bson.codec_options import CodecOptions, DEFAULT_CODEC_OPTIONS + from bson.binary import Binary, UuidRepresentation + from uuid import uuid4 + + # Using UuidRepresentation.CSHARP_LEGACY + csharp_opts = CodecOptions(uuid_representation=UuidRepresentation.CSHARP_LEGACY) + + # Store a legacy C#-formatted UUID + input_uuid = uuid4() + collection = client.testdb.get_collection('test', codec_options=csharp_opts) + collection.insert_one({'_id': 'foo', 'uuid': input_uuid}) + + # Using UuidRepresentation.UNSPECIFIED + unspec_opts = CodecOptions(uuid_representation=UuidRepresentation.UNSPECIFIED) + unspec_collection = client.testdb.get_collection('test', codec_options=unspec_opts) + + # UUID fields are decoded as Binary when UuidRepresentation.UNSPECIFIED is configured + document = unspec_collection.find_one({'_id': 'foo'}) + decoded_field = document['uuid'] + assert isinstance(decoded_field, Binary) + + # Binary.as_uuid() can be used to coerce the decoded value to a native UUID + decoded_uuid = decoded_field.as_uuid(UuidRepresentation.CSHARP_LEGACY) + assert decoded_uuid == input_uuid + +Native :class:`uuid.UUID` objects cannot directly be encoded to +:class:`~bson.binary.Binary` when the UUID representation is ``UNSPECIFIED`` +and attempting to do so will result in an exception:: + + unspec_collection.insert_one({'_id': 'bar', 'uuid': uuid4()}) + Traceback (most recent call last): + ... + ValueError: cannot encode native uuid.UUID with UuidRepresentation.UNSPECIFIED. UUIDs can be manually converted to bson.Binary instances using bson.Binary.from_uuid() or a different UuidRepresentation can be configured. See the documentation for UuidRepresentation for more information. + +Instead, applications using :data:`~bson.binary.UuidRepresentation.UNSPECIFIED` +must explicitly coerce a native UUID using the +:meth:`~bson.binary.Binary.from_uuid` method:: + + explicit_binary = Binary.from_uuid(uuid4(), UuidRepresentation.STANDARD) + unspec_collection.insert_one({'_id': 'bar', 'uuid': explicit_binary}) + +.. _standard-representation-details: + +``STANDARD`` +^^^^^^^^^^^^ + +.. attention:: This UUID representation should be used by new applications or + applications that are encoding and/or decoding UUIDs in MongoDB for the + first time. + +The :data:`~bson.binary.UuidRepresentation.STANDARD` representation +enables cross-language compatibility by ensuring the same byte-ordering +when encoding UUIDs from all drivers. UUIDs written by a driver with this +representation configured will be handled correctly by every other provided +it is also configured with the ``STANDARD`` representation. + +``STANDARD`` encodes native :class:`uuid.UUID` objects to +:class:`~bson.binary.Binary` subtype 4 objects. + +.. _python-legacy-representation-details: + +``PYTHON_LEGACY`` +^^^^^^^^^^^^^^^^^ + +.. attention:: This uuid representation should be used when reading UUIDs + generated by existing applications that use the Python driver + but **don't** explicitly set a UUID representation. + +.. attention:: :data:`~bson.binary.UuidRepresentation.PYTHON_LEGACY` + was the default uuid representation in PyMongo 3. + +The :data:`~bson.binary.UuidRepresentation.PYTHON_LEGACY` representation +corresponds to the legacy representation of UUIDs used by PyMongo. This +representation conforms with +`RFC 4122 Section 4.1.2 `_. + +The following example illustrates the use of this representation:: + + from bson.codec_options import CodecOptions, DEFAULT_CODEC_OPTIONS + from bson.binary import Binary, UuidRepresentation + + # No configured UUID representation + collection = client.python_legacy.get_collection('test', codec_options=DEFAULT_CODEC_OPTIONS) + + # Using UuidRepresentation.PYTHON_LEGACY + pylegacy_opts = CodecOptions(uuid_representation=UuidRepresentation.PYTHON_LEGACY) + pylegacy_collection = client.python_legacy.get_collection('test', codec_options=pylegacy_opts) + + # UUIDs written by PyMongo 3 with no UuidRepresentation configured + # (or PyMongo 4.0 with PYTHON_LEGACY) can be queried using PYTHON_LEGACY + uuid_1 = uuid4() + pylegacy_collection.insert_one({'uuid': uuid_1}) + document = pylegacy_collection.find_one({'uuid': uuid_1}) + +``PYTHON_LEGACY`` encodes native :class:`uuid.UUID` objects to +:class:`~bson.binary.Binary` subtype 3 objects, preserving the same +byte-order as :attr:`~uuid.UUID.bytes`:: + + from bson.binary import Binary + + document = collection.find_one({'uuid': Binary(uuid_2.bytes, subtype=3)}) + assert document['uuid'] == uuid_2 + +.. _java-legacy-representation-details: + +``JAVA_LEGACY`` +^^^^^^^^^^^^^^^ + +.. attention:: This UUID representation should be used when reading UUIDs + written to MongoDB by the legacy applications (i.e. applications that don't + use the ``STANDARD`` representation) using the Java driver. + +The :data:`~bson.binary.UuidRepresentation.JAVA_LEGACY` representation +corresponds to the legacy representation of UUIDs used by the MongoDB Java +Driver. + +.. note:: The ``JAVA_LEGACY`` representation reverses the order of bytes 0-7, + and bytes 8-15. + +As an example, consider the same UUID described in :ref:`example-legacy-uuid`. +Let us assume that an application used the Java driver without an explicitly +specified UUID representation to insert the example UUID +``00112233-4455-6677-8899-aabbccddeeff`` into MongoDB. If we try to read this +value using ``PYTHON_LEGACY``, we end up with an entirely different UUID:: + + UUID('77665544-3322-1100-ffee-ddccbbaa9988') + +However, if we explicitly set the representation to +:data:`~bson.binary.UuidRepresentation.JAVA_LEGACY`, we get the correct result:: + + UUID('00112233-4455-6677-8899-aabbccddeeff') + +PyMongo uses the specified UUID representation to reorder the BSON bytes and +load them correctly. ``JAVA_LEGACY`` encodes native :class:`uuid.UUID` objects +to :class:`~bson.binary.Binary` subtype 3 objects, while performing the same +byte-reordering as the legacy Java driver's UUID to BSON encoder. + +.. _csharp-legacy-representation-details: + +``CSHARP_LEGACY`` +^^^^^^^^^^^^^^^^^ + +.. attention:: This UUID representation should be used when reading UUIDs + written to MongoDB by the legacy applications (i.e. applications that don't + use the ``STANDARD`` representation) using the C# driver. + +The :data:`~bson.binary.UuidRepresentation.CSHARP_LEGACY` representation +corresponds to the legacy representation of UUIDs used by the MongoDB Java +Driver. + +.. note:: The ``CSHARP_LEGACY`` representation reverses the order of bytes 0-3, + bytes 4-5, and bytes 6-7. + +As an example, consider the same UUID described in :ref:`example-legacy-uuid`. +Let us assume that an application used the C# driver without an explicitly +specified UUID representation to insert the example UUID +``00112233-4455-6677-8899-aabbccddeeff`` into MongoDB. If we try to read this +value using PYTHON_LEGACY, we end up with an entirely different UUID:: + + UUID('33221100-5544-7766-8899-aabbccddeeff') + +However, if we explicitly set the representation to +:data:`~bson.binary.UuidRepresentation.CSHARP_LEGACY`, we get the correct result:: + + UUID('00112233-4455-6677-8899-aabbccddeeff') + +PyMongo uses the specified UUID representation to reorder the BSON bytes and +load them correctly. ``CSHARP_LEGACY`` encodes native :class:`uuid.UUID` +objects to :class:`~bson.binary.Binary` subtype 3 objects, while performing +the same byte-reordering as the legacy C# driver's UUID to BSON encoder. diff --git a/doc/faq.rst b/doc/faq.rst index 2f72746b40..2d211c756c 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -1,8 +1,6 @@ Frequently Asked Questions ========================== -.. contents:: - Is PyMongo thread-safe? ----------------------- @@ -38,13 +36,47 @@ created by ``fork()`` only has one thread, so any locks that were taken out by other threads in the parent will never be released in the child. The next time the child process attempts to acquire one of these locks, deadlock occurs. +Starting in version 4.3, PyMongo utilizes :py:func:`os.register_at_fork` to +reset its locks and other shared state in the child process after a +:py:func:`os.fork` to reduce the frequency of deadlocks. However deadlocks +are still possible because libraries that PyMongo depends on, like `OpenSSL`_ +and `getaddrinfo(3)`_ (on some platforms), are not fork() safe in a +multithreaded application. Linux also imposes the restriction that: + + After a `fork()`_ in a multithreaded program, the child can + safely call only async-signal-safe functions (see + `signal-safety(7)`_) until such time as it calls `execve(2)`_. + +PyMongo relies on functions that are *not* `async-signal-safe`_ and hence the +child process can experience deadlocks or crashes when attempting to call +a non `async-signal-safe`_ function. For examples of deadlocks or crashes +that could occur see `PYTHON-3406`_. + For a long but interesting read about the problems of Python locks in multithreaded contexts with ``fork()``, see http://bugs.python.org/issue6721. .. _not fork-safe: http://bugs.python.org/issue6721 +.. _OpenSSL: https://github.com/openssl/openssl/issues/19066 +.. _fork(): https://man7.org/linux/man-pages/man2/fork.2.html +.. _signal-safety(7): https://man7.org/linux/man-pages/man7/signal-safety.7.html +.. _async-signal-safe: https://man7.org/linux/man-pages/man7/signal-safety.7.html +.. _execve(2): https://man7.org/linux/man-pages/man2/execve.2.html +.. _getaddrinfo(3): https://man7.org/linux/man-pages/man3/gai_strerror.3.html +.. _PYTHON-3406: https://jira.mongodb.org/browse/PYTHON-3406 .. _connection-pooling: +Can PyMongo help me load the results of my query as a Pandas ``DataFrame``? +--------------------------------------------------------------------------- + +While PyMongo itself does not provide any APIs for working with +numerical or columnar data, +`PyMongoArrow `_ +is a companion library to PyMongo that makes it easy to load MongoDB query result sets as +`Pandas DataFrames `_, +`NumPy ndarrays `_, or +`Apache Arrow Tables `_. + How does connection pooling work in PyMongo? -------------------------------------------- @@ -58,17 +90,32 @@ to 100. If there are ``maxPoolSize`` connections to a server and all are in use, the next request to that server will wait until one of the connections becomes available. -The client instance opens one additional socket per server in your MongoDB +The client instance opens two additional sockets per server in your MongoDB topology for monitoring the server's state. -For example, a client connected to a 3-node replica set opens 3 monitoring +For example, a client connected to a 3-node replica set opens 6 monitoring sockets. It also opens as many sockets as needed to support a multi-threaded application's concurrent operations on each server, up to ``maxPoolSize``. With a ``maxPoolSize`` of 100, if the application only uses the primary (the default), then only the primary connection pool grows and the total connections -is at most 103. If the application uses a +is at most 106. If the application uses a :class:`~pymongo.read_preferences.ReadPreference` to query the secondaries, -their pools also grow and the total connections can reach 303. +their pools also grow and the total connections can reach 306. + +Additionally, the pools are rate limited such that each connection pool can +only create at most 2 connections in parallel at any time. The connection +creation covers covers all the work required to setup a new connection +including DNS, TCP, SSL/TLS, MongoDB handshake, and MongoDB authentication. +For example, if three threads concurrently attempt to check out a connection +from an empty pool, the first two threads will begin creating new connections +while the third thread will wait. The third thread stops waiting when either: + +- one of the first two threads finishes creating a connection, or +- an existing connection is checked back into the pool. + +Rate limiting concurrent connection creation reduces the likelihood of +connection storms and improves the driver's ability to reuse existing +connections. It is possible to set the minimum number of concurrent connections to each server with ``minPoolSize``, which defaults to 0. The connection pool will be @@ -77,8 +124,8 @@ network errors, causing the total number of sockets (both in use and idle) to drop below the minimum, more sockets are opened until the minimum is reached. The maximum number of milliseconds that a connection can remain idle in the -pool before being removed and replaced can be set with ``maxIdleTime``, which -defaults to `None` (no limit). +pool before being removed and replaced can be set with ``maxIdleTimeMS``, which +defaults to ``None`` (no limit). The default configuration for a :class:`~pymongo.mongo_client.MongoClient` works for most applications:: @@ -119,7 +166,7 @@ they are returned to the pool. Does PyMongo support Python 3? ------------------------------ -PyMongo supports CPython 3.4+ and PyPy3.5+. See the :doc:`python3` for details. +PyMongo supports CPython 3.7+ and PyPy3.8+. See the :doc:`python3` for details. Does PyMongo support asynchronous frameworks like Gevent, asyncio, Tornado, or Twisted? --------------------------------------------------------------------------------------- @@ -148,7 +195,7 @@ instance of :class:`~bson.objectid.ObjectId`. For example:: >>> my_doc = {'x': 1} >>> collection.insert_one(my_doc) - + InsertOneResult(ObjectId('560db337fba522189f171720'), acknowledged=True) >>> my_doc {'x': 1, '_id': ObjectId('560db337fba522189f171720')} @@ -187,6 +234,9 @@ documents that already have an ``_id`` field, added by your application. Key order in subdocuments -- why does my query work in the shell but not PyMongo? --------------------------------------------------------------------------------- +.. + Note: We should rework this section now that Python 3.6+ has ordered dict. + .. testsetup:: key-order from bson.son import SON @@ -194,8 +244,7 @@ Key order in subdocuments -- why does my query work in the shell but not PyMongo collection = MongoClient().test.collection collection.drop() - collection.insert_one({'_id': 1.0, - 'subdocument': SON([('b', 1.0), ('a', 1.0)])}) + collection.insert_one({"_id": 1.0, "subdocument": SON([("b", 1.0), ("a", 1.0)])}) The key-value pairs in a BSON document can have any order (except that ``_id`` is always first). The mongo shell preserves key order when reading and writing @@ -205,9 +254,9 @@ is displayed: .. code-block:: javascript > // mongo shell. - > db.collection.insert( { "_id" : 1, "subdocument" : { "b" : 1, "a" : 1 } } ) + > db.collection.insertOne( { "_id" : 1, "subdocument" : { "b" : 1, "a" : 1 } } ) WriteResult({ "nInserted" : 1 }) - > db.collection.find() + > db.collection.findOne() { "_id" : 1, "subdocument" : { "b" : 1, "a" : 1 } } PyMongo represents BSON documents as Python dicts by default, and the order @@ -223,7 +272,7 @@ Therefore, Python dicts are not guaranteed to show keys in the order they are stored in BSON. Here, "a" is shown before "b": >>> print(collection.find_one()) - {u'_id': 1.0, u'subdocument': {u'a': 1.0, u'b': 1.0}} + {'_id': 1.0, 'subdocument': {'a': 1.0, 'b': 1.0}} To preserve order when reading BSON, use the :class:`~bson.son.SON` class, which is a dict that remembers its key order. First, get a handle to the @@ -235,12 +284,7 @@ collection, configured to use :class:`~bson.son.SON` instead of dict: >>> from bson import CodecOptions, SON >>> opts = CodecOptions(document_class=SON) >>> opts - CodecOptions(document_class=, - tz_aware=False, - uuid_representation=PYTHON_LEGACY, - unicode_decode_error_handler='strict', - tzinfo=None, type_registry=TypeRegistry(type_codecs=[], - fallback_encoder=None)) + CodecOptions(document_class=...SON..., tz_aware=False, uuid_representation=UuidRepresentation.UNSPECIFIED, unicode_decode_error_handler='strict', tzinfo=None, type_registry=TypeRegistry(type_codecs=[], fallback_encoder=None), datetime_conversion=DatetimeConversion.DATETIME) >>> collection_son = collection.with_options(codec_options=opts) Now, documents and subdocuments in query results are represented with @@ -249,7 +293,7 @@ Now, documents and subdocuments in query results are represented with .. doctest:: key-order >>> print(collection_son.find_one()) - SON([(u'_id', 1.0), (u'subdocument', SON([(u'b', 1.0), (u'a', 1.0)]))]) + SON([('_id', 1.0), ('subdocument', SON([('b', 1.0), ('a', 1.0)]))]) The subdocument's actual storage layout is now visible: "b" is before "a". @@ -272,7 +316,7 @@ There are two solutions. First, you can match the subdocument field-by-field: >>> collection.find_one({'subdocument.a': 1.0, ... 'subdocument.b': 1.0}) - {u'_id': 1.0, u'subdocument': {u'a': 1.0, u'b': 1.0}} + {'_id': 1.0, 'subdocument': {'a': 1.0, 'b': 1.0}} The query matches any subdocument with an "a" of 1.0 and a "b" of 1.0, regardless of the order you specify them in Python or the order they are stored @@ -283,14 +327,14 @@ The second solution is to use a :class:`~bson.son.SON` to specify the key order: >>> query = {'subdocument': SON([('b', 1.0), ('a', 1.0)])} >>> collection.find_one(query) - {u'_id': 1.0, u'subdocument': {u'a': 1.0, u'b': 1.0}} + {'_id': 1.0, 'subdocument': {'a': 1.0, 'b': 1.0}} The key order you use when you create a :class:`~bson.son.SON` is preserved when it is serialized to BSON and used as a query. Thus you can create a subdocument that exactly matches the subdocument in the collection. .. seealso:: `MongoDB Manual entry on subdocument matching - `_. + `_. What does *CursorNotFound* cursor id not valid at server mean? -------------------------------------------------------------- @@ -440,20 +484,20 @@ No. PyMongo creates Python threads which `PythonAnywhere `_ does not support. For more information see `PYTHON-1495 `_. -How can I use something like Python's :mod:`json` module to encode my documents to JSON? ----------------------------------------------------------------------------------------- +How can I use something like Python's ``json`` module to encode my documents to JSON? +------------------------------------------------------------------------------------- :mod:`~bson.json_util` is PyMongo's built in, flexible tool for using Python's :mod:`json` module with BSON documents and `MongoDB Extended JSON -`_. The +`_. The :mod:`json` module won't work out of the box with all documents from PyMongo as PyMongo supports some special types (like :class:`~bson.objectid.ObjectId` and :class:`~bson.dbref.DBRef`) that are not supported in JSON. `python-bsonjs `_ is a fast BSON to MongoDB Extended JSON converter built on top of -`libbson `_. `python-bsonjs` does not +`libbson `_. ``python-bsonjs`` does not depend on PyMongo and can offer a nice performance improvement over -:mod:`~bson.json_util`. `python-bsonjs` works best with PyMongo when using +:mod:`~bson.json_util`. ``python-bsonjs`` works best with PyMongo when using :class:`~bson.raw_bson.RawBSONDocument`. Why do I get OverflowError decoding dates stored by another language's driver? @@ -465,9 +509,43 @@ limited to years between :data:`datetime.MINYEAR` (usually 1) and driver) can store BSON datetimes with year values far outside those supported by :class:`datetime.datetime`. -There are a few ways to work around this issue. One option is to filter -out documents with values outside of the range supported by -:class:`datetime.datetime`:: +There are a few ways to work around this issue. Starting with PyMongo 4.3, +:func:`bson.decode` can decode BSON datetimes in one of four ways, and can +be specified using the ``datetime_conversion`` parameter of +:class:`~bson.codec_options.CodecOptions`. + +The default option is +:attr:`~bson.codec_options.DatetimeConversion.DATETIME`, which will +attempt to decode as a :class:`datetime.datetime`, allowing +:class:`~builtin.OverflowError` to occur upon out-of-range dates. +:attr:`~bson.codec_options.DatetimeConversion.DATETIME_AUTO` alters +this behavior to instead return :class:`~bson.datetime_ms.DatetimeMS` when +representations are out-of-range, while returning :class:`~datetime.datetime` +objects as before: + +.. doctest:: + + >>> from datetime import datetime + >>> from bson.datetime_ms import DatetimeMS + >>> from bson.codec_options import DatetimeConversion + >>> from pymongo import MongoClient + >>> client = MongoClient(datetime_conversion=DatetimeConversion.DATETIME_AUTO) + >>> client.db.collection.insert_one({"x": datetime(1970, 1, 1)}) + InsertOneResult(ObjectId('...'), acknowledged=True) + >>> client.db.collection.insert_one({"x": DatetimeMS(2**62)}) + InsertOneResult(ObjectId('...'), acknowledged=True) + >>> for x in client.db.collection.find(): + ... print(x) + ... + {'_id': ObjectId('...'), 'x': datetime.datetime(1970, 1, 1, 0, 0)} + {'_id': ObjectId('...'), 'x': DatetimeMS(4611686018427387904)} + +For other options, please refer to +:class:`~bson.codec_options.DatetimeConversion`. + +Another option that does not involve setting ``datetime_conversion`` is to to +filter out documents values outside of the range supported by +:class:`~datetime.datetime`: >>> from datetime import datetime >>> coll = client.test.dates diff --git a/doc/index.rst b/doc/index.rst index 3a4aa316b2..2f0ba1d36a 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -26,13 +26,16 @@ everything you need to know to use **PyMongo**. Using PyMongo with TLS / SSL. :doc:`examples/encryption` - Using PyMongo with client side encryption. + Using PyMongo with In-Use Encryption. + +:doc:`examples/type_hints` + Using PyMongo with type hints. :doc:`faq` Some questions that come up often. -:doc:`migrate-to-pymongo3` - A PyMongo 2.x to 3.x migration guide. +:doc:`migrate-to-pymongo4` + A PyMongo 3.x to 4.x migration guide. :doc:`python3` Frequently asked questions about python 3 support. @@ -51,9 +54,17 @@ everything you need to know to use **PyMongo**. :doc:`developer/index` Developer guide for contributors to PyMongo. +:doc:`common-issues` + Common issues encountered when using PyMongo. + Getting Help ------------ -If you're having trouble or have questions about PyMongo, the best place to ask is the `MongoDB user group `_. Once you get an answer, it'd be great if you could work it back into this documentation and contribute! +If you're having trouble or have questions about PyMongo, ask your question on +our `MongoDB Community Forum `_. +You may also want to consider a +`commercial support subscription `_. +Once you get an answer, it'd be great if you could work it back into this +documentation and contribute! Issues ------ @@ -62,6 +73,11 @@ commented on) at the main `MongoDB JIRA bug tracker `_, in the "Python Driver" project. +Feature Requests / Feedback +--------------------------- +Use our `feedback engine `_ +to send us feature requests and general feedback about PyMongo. + Contributing ------------ **PyMongo** has a large :doc:`community ` and @@ -79,14 +95,15 @@ For older versions of the documentation please see the About This Documentation ------------------------ This documentation is generated using the `Sphinx -`_ documentation generator. The source files +`_ documentation generator. The source files for the documentation are located in the *doc/* directory of the **PyMongo** distribution. To generate the docs locally run the following command from the root directory of the **PyMongo** source: .. code-block:: bash - $ python setup.py doc + $ pip install tox + $ tox -m doc Indices and tables ------------------ @@ -109,5 +126,6 @@ Indices and tables contributors changelog python3 - migrate-to-pymongo3 + migrate-to-pymongo4 developer/index + common-issues diff --git a/doc/installation.rst b/doc/installation.rst index ca702b6fb7..edbdc0ac63 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -15,85 +15,73 @@ Installing with pip We recommend using `pip `_ to install pymongo on all platforms:: - $ python -m pip install pymongo + $ python3 -m pip install pymongo To get a specific version of pymongo:: - $ python -m pip install pymongo==3.5.1 + $ python3 -m pip install pymongo==3.5.1 To upgrade using pip:: - $ python -m pip install --upgrade pymongo + $ python3 -m pip install --upgrade pymongo -.. note:: - pip does not support installing python packages in .egg format. If you would - like to install PyMongo from a .egg provided on pypi use easy_install - instead. - -Installing with easy_install ----------------------------- - -To use ``easy_install`` from -`setuptools `_ do:: - - $ python -m easy_install pymongo +Dependencies +------------ -To upgrade do:: +PyMongo supports CPython 3.7+ and PyPy3.7+. - $ python -m easy_install -U pymongo +Required dependencies +..................... -Dependencies ------------- +Support for mongodb+srv:// URIs requires `dnspython +`_ -PyMongo supports CPython 2.7, 3.4+, PyPy, and PyPy3.5+. +.. _optional-deps: -Optional dependencies: +Optional dependencies +..................... GSSAPI authentication requires `pykerberos `_ on Unix or `WinKerberos `_ on Windows. The correct dependency can be installed automatically along with PyMongo:: - $ python -m pip install pymongo[gssapi] + $ python3 -m pip install "pymongo[gssapi]" -Support for mongodb+srv:// URIs requires `dnspython -`_:: +:ref:`MONGODB-AWS` authentication requires `pymongo-auth-aws +`_:: + + $ python3 -m pip install "pymongo[aws]" - $ python -m pip install pymongo[srv] -TLS / SSL support may require `ipaddress -`_ and `certifi -`_ or `wincertstore -`_ depending on the Python -version in use. The necessary dependencies can be installed along with -PyMongo:: - $ python -m pip install pymongo[tls] +:ref:`OCSP` requires `PyOpenSSL +`_, `requests +`_ and `service_identity +`_:: + + $ python3 -m pip install "pymongo[ocsp]" Wire protocol compression with snappy requires `python-snappy `_:: - $ python -m pip install pymongo[snappy] + $ python3 -m pip install "pymongo[snappy]" Wire protocol compression with zstandard requires `zstandard `_:: - $ python -m pip install pymongo[zstd] - -You can install all dependencies automatically with the following -command:: + $ python3 -m pip install "pymongo[zstd]" - $ python -m pip install pymongo[snappy,gssapi,srv,tls,zstd] +:ref:`Client-Side Field Level Encryption` requires `pymongocrypt +`_ and +`pymongo-auth-aws `_:: -Other optional packages: + $ python3 -m pip install "pymongo[encryption]" -- `backports.pbkdf2 `_, - improves authentication performance with SCRAM-SHA-1 and SCRAM-SHA-256. - It especially improves performance on Python versions older than 2.7.8. -- `monotonic `_ adds support for - a monotonic clock, which improves reliability in environments - where clock adjustments are frequent. Not needed in Python 3. +You can install all dependencies automatically with the following +command:: + $ python3 -m pip install "pymongo[gssapi,aws,ocsp,snappy,zstd,encryption]" Installing from source ---------------------- @@ -102,9 +90,9 @@ If you'd rather install directly from the source (i.e. to stay on the bleeding edge), install the C extension dependencies then check out the latest source from GitHub and install the driver from the resulting tree:: - $ git clone git://github.com/mongodb/mongo-python-driver.git pymongo + $ git clone https://github.com/mongodb/mongo-python-driver.git pymongo $ cd pymongo/ - $ python setup.py install + $ pip install . Installing from source on Unix .............................. @@ -152,9 +140,8 @@ See `http://bugs.python.org/issue11623 `_ for a more detailed explanation. **Lion (10.7) and newer** - PyMongo's C extensions can be built against -versions of Python 2.7 >= 2.7.4 or Python 3.4+ downloaded from -python.org. In all cases Xcode must be installed with 'UNIX Development -Support'. +versions of Python 3.7+ downloaded from python.org. In all cases Xcode must be +installed with 'UNIX Development Support'. **Xcode 5.1**: Starting with version 5.1 the version of clang that ships with Xcode throws an error when it encounters compiler flags it doesn't recognize. @@ -183,25 +170,10 @@ Installing from source on Windows If you want to install PyMongo with C extensions from source the following requirements apply to both CPython and ActiveState's ActivePython: -64-bit Windows -~~~~~~~~~~~~~~ +Windows +~~~~~~~ -For Python 3.5 and newer install Visual Studio 2015. For Python 3.4 -install Visual Studio 2010. You must use the full version of Visual Studio -2010 as Visual C++ Express does not provide 64-bit compilers. Make sure that -you check the "x64 Compilers and Tools" option under Visual C++. For Python 2.7 -install the `Microsoft Visual C++ Compiler for Python 2.7`_. - -32-bit Windows -~~~~~~~~~~~~~~ - -For Python 3.5 and newer install Visual Studio 2015. - -For Python 3.4 install Visual C++ 2010 Express. - -For Python 2.7 install the `Microsoft Visual C++ Compiler for Python 2.7`_ - -.. _`Microsoft Visual C++ Compiler for Python 2.7`: https://www.microsoft.com/en-us/download/details.aspx?id=44266 +Install Visual Studio 2015+. .. _install-no-c: @@ -215,36 +187,9 @@ warning will be printed. If you wish to install PyMongo without the C extensions, even if the extensions build properly, it can be done using a command line option to -*setup.py*:: - - $ python setup.py --no_ext install - -Building PyMongo egg Packages ------------------------------ - -Some organizations do not allow compilers and other build tools on production -systems. To install PyMongo on these systems with C extensions you may need to -build custom egg packages. Make sure that you have installed the dependencies -listed above for your operating system then run the following command in the -PyMongo source directory:: +*pip install*:: - $ python setup.py bdist_egg - -The egg package can be found in the dist/ subdirectory. The file name will -resemble “pymongo-3.6-py2.7-linux-x86_64.egg” but may have a different name -depending on your platform and the version of python you use to compile. - -.. warning:: - - These “binary distributions,” will only work on systems that resemble the - environment on which you built the package. In other words, ensure that - operating systems and versions of Python and architecture (i.e. “32” or “64” - bit) match. - -Copy this file to the target system and issue the following command to install the -package:: - - $ sudo python -m easy_install pymongo-3.6-py2.7-linux-x86_64.egg + $ NO_EXT=1 python -m pip install . Installing a beta or release candidate -------------------------------------- @@ -255,8 +200,4 @@ but can be found on the `GitHub tags page `_. They can be installed by passing the full URL for the tag to pip:: - $ python -m pip install https://github.com/mongodb/mongo-python-driver/archive/3.9.0b1.tar.gz - -or easy_install:: - - $ python -m easy_install https://github.com/mongodb/mongo-python-driver/archive/3.9.0b1.tar.gz + $ python3 -m pip install https://github.com/mongodb/mongo-python-driver/archive/4.4.0b0.tar.gz diff --git a/doc/make.bat b/doc/make.bat index 4ccc1590eb..2119f51099 100644 --- a/doc/make.bat +++ b/doc/make.bat @@ -1,113 +1,35 @@ -@ECHO OFF - -REM Command file for Sphinx documentation - -set SPHINXBUILD=sphinx-build -set BUILDDIR=_build -set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . -if NOT "%PAPER%" == "" ( - set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% -) - -if "%1" == "" goto help - -if "%1" == "help" ( - :help - echo.Please use `make ^` where ^ is one of - echo. html to make standalone HTML files - echo. dirhtml to make HTML files named index.html in directories - echo. pickle to make pickle files - echo. json to make JSON files - echo. htmlhelp to make HTML files and a HTML help project - echo. qthelp to make HTML files and a qthelp project - echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter - echo. changes to make an overview over all changed/added/deprecated items - echo. linkcheck to check all external links for integrity - echo. doctest to run all doctests embedded in the documentation if enabled - goto end -) - -if "%1" == "clean" ( - for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i - del /q /s %BUILDDIR%\* - goto end -) - -if "%1" == "html" ( - %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html - echo. - echo.Build finished. The HTML pages are in %BUILDDIR%/html. - goto end -) - -if "%1" == "dirhtml" ( - %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml - echo. - echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. - goto end -) - -if "%1" == "pickle" ( - %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle - echo. - echo.Build finished; now you can process the pickle files. - goto end -) - -if "%1" == "json" ( - %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json - echo. - echo.Build finished; now you can process the JSON files. - goto end -) - -if "%1" == "htmlhelp" ( - %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp - echo. - echo.Build finished; now you can run HTML Help Workshop with the ^ -.hhp project file in %BUILDDIR%/htmlhelp. - goto end -) - -if "%1" == "qthelp" ( - %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp - echo. - echo.Build finished; now you can run "qcollectiongenerator" with the ^ -.qhcp project file in %BUILDDIR%/qthelp, like this: - echo.^> qcollectiongenerator %BUILDDIR%\qthelp\PyMongo.qhcp - echo.To view the help file: - echo.^> assistant -collectionFile %BUILDDIR%\qthelp\PyMongo.ghc - goto end -) - -if "%1" == "latex" ( - %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex - echo. - echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. - goto end -) - -if "%1" == "changes" ( - %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes - echo. - echo.The overview file is in %BUILDDIR%/changes. - goto end -) - -if "%1" == "linkcheck" ( - %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck - echo. - echo.Link check complete; look for any errors in the above output ^ -or in %BUILDDIR%/linkcheck/output.txt. - goto end -) - -if "%1" == "doctest" ( - %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest - echo. - echo.Testing of doctests in the sources finished, look at the ^ -results in %BUILDDIR%/doctest/output.txt. - goto end -) - -:end +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=. +set BUILDDIR=_build + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/doc/migrate-to-pymongo3.rst b/doc/migrate-to-pymongo3.rst deleted file mode 100644 index bb396ddee0..0000000000 --- a/doc/migrate-to-pymongo3.rst +++ /dev/null @@ -1,546 +0,0 @@ -PyMongo 3 Migration Guide -========================= - -.. contents:: - -.. testsetup:: - - from pymongo import MongoClient, ReadPreference - client = MongoClient() - collection = client.my_database.my_collection - -PyMongo 3 is a partial rewrite bringing a large number of improvements. It -also brings a number of backward breaking changes. This guide provides a -roadmap for migrating an existing application from PyMongo 2.x to 3.x or -writing libraries that will work with both PyMongo 2.x and 3.x. - -PyMongo 2.9 ------------ - -The first step in any successful migration involves upgrading to, or -requiring, at least PyMongo 2.9. If your project has a -requirements.txt file, add the line "pymongo >= 2.9, < 3.0" until you have -completely migrated to PyMongo 3. Most of the key new -methods and options from PyMongo 3.0 are backported in PyMongo 2.9 making -migration much easier. - -Enable Deprecation Warnings ---------------------------- - -Starting with PyMongo 2.9, :exc:`DeprecationWarning` is raised by most methods -removed in PyMongo 3.0. Make sure you enable runtime warnings to see -where deprecated functions and methods are being used in your application:: - - python -Wd - -Warnings can also be changed to errors:: - - python -Wd -Werror - -.. note:: Not all deprecated features raise :exc:`DeprecationWarning` when - used. For example, the :meth:`~pymongo.collection.Collection.find` options - renamed in PyMongo 3.0 do not raise :exc:`DeprecationWarning` when used in - PyMongo 2.x. See also `Removed features with no migration path`_. - -CRUD API --------- - -Changes to find() and find_one() -................................ - -"spec" renamed "filter" -~~~~~~~~~~~~~~~~~~~~~~~ - -The `spec` option has been renamed to `filter`. Code like this:: - - >>> cursor = collection.find(spec={"a": 1}) - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> cursor = collection.find(filter={"a": 1}) - -or this with any version of PyMongo: - -.. doctest:: - - >>> cursor = collection.find({"a": 1}) - -"fields" renamed "projection" -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The `fields` option has been renamed to `projection`. Code like this:: - - >>> cursor = collection.find({"a": 1}, fields={"_id": False}) - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> cursor = collection.find({"a": 1}, projection={"_id": False}) - -or this with any version of PyMongo: - -.. doctest:: - - >>> cursor = collection.find({"a": 1}, {"_id": False}) - -"partial" renamed "allow_partial_results" -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The `partial` option has been renamed to `allow_partial_results`. Code like -this:: - - >>> cursor = collection.find({"a": 1}, partial=True) - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> cursor = collection.find({"a": 1}, allow_partial_results=True) - -"timeout" replaced by "no_cursor_timeout" -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The `timeout` option has been replaced by `no_cursor_timeout`. Code like this:: - - >>> cursor = collection.find({"a": 1}, timeout=False) - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> cursor = collection.find({"a": 1}, no_cursor_timeout=True) - -"network_timeout" is removed -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The `network_timeout` option has been removed. This option was always the -wrong solution for timing out long running queries and should never be used -in production. Starting with **MongoDB 2.6** you can use the $maxTimeMS query -modifier. Code like this:: - - # Set a 5 second select() timeout. - >>> cursor = collection.find({"a": 1}, network_timeout=5) - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - # Set a 5 second (5000 millisecond) server side query timeout. - >>> cursor = collection.find({"a": 1}, modifiers={"$maxTimeMS": 5000}) - -or with PyMongo 3.5 or later: - - >>> cursor = collection.find({"a": 1}, max_time_ms=5000) - -or with any version of PyMongo: - -.. doctest:: - - >>> cursor = collection.find({"$query": {"a": 1}, "$maxTimeMS": 5000}) - -.. seealso:: `$maxTimeMS - `_ - -Tailable cursors -~~~~~~~~~~~~~~~~ - -The `tailable` and `await_data` options have been replaced by `cursor_type`. -Code like this:: - - >>> cursor = collection.find({"a": 1}, tailable=True) - >>> cursor = collection.find({"a": 1}, tailable=True, await_data=True) - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> from pymongo import CursorType - >>> cursor = collection.find({"a": 1}, cursor_type=CursorType.TAILABLE) - >>> cursor = collection.find({"a": 1}, cursor_type=CursorType.TAILABLE_AWAIT) - -Other removed options -~~~~~~~~~~~~~~~~~~~~~ - -The `slave_okay`, `read_preference`, `tag_sets`, -and `secondary_acceptable_latency_ms` options have been removed. See the `Read -Preferences`_ section for solutions. - -The aggregate method always returns a cursor -............................................ - -PyMongo 2.6 added an option to return an iterable cursor from -:meth:`~pymongo.collection.Collection.aggregate`. In PyMongo 3 -:meth:`~pymongo.collection.Collection.aggregate` always returns a cursor. Use -the `cursor` option for consistent behavior with PyMongo 2.9 and later: - -.. doctest:: - - >>> for result in collection.aggregate([], cursor={}): - ... pass - -Read Preferences ----------------- - -The "slave_okay" option is removed -.................................. - -The `slave_okay` option is removed from PyMongo's API. The -secondaryPreferred read preference provides the same behavior. -Code like this:: - - >>> client = MongoClient(slave_okay=True) - -can be changed to this with PyMongo 2.9 or newer: - -.. doctest:: - - >>> client = MongoClient(readPreference="secondaryPreferred") - -The "read_preference" attribute is immutable -............................................ - -Code like this:: - - >>> from pymongo import ReadPreference - >>> db = client.my_database - >>> db.read_preference = ReadPreference.SECONDARY - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> db = client.get_database("my_database", - ... read_preference=ReadPreference.SECONDARY) - -Code like this:: - - >>> cursor = collection.find({"a": 1}, - ... read_preference=ReadPreference.SECONDARY) - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> coll2 = collection.with_options(read_preference=ReadPreference.SECONDARY) - >>> cursor = coll2.find({"a": 1}) - -.. seealso:: :meth:`~pymongo.database.Database.get_collection` - -The "tag_sets" option and attribute are removed -............................................... - -The `tag_sets` MongoClient option is removed. The `read_preference` -option can be used instead. Code like this:: - - >>> client = MongoClient( - ... read_preference=ReadPreference.SECONDARY, - ... tag_sets=[{"dc": "ny"}, {"dc": "sf"}]) - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> from pymongo.read_preferences import Secondary - >>> client = MongoClient(read_preference=Secondary([{"dc": "ny"}])) - -To change the tags sets for a Database or Collection, code like this:: - - >>> db = client.my_database - >>> db.read_preference = ReadPreference.SECONDARY - >>> db.tag_sets = [{"dc": "ny"}] - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> db = client.get_database("my_database", - ... read_preference=Secondary([{"dc": "ny"}])) - -Code like this:: - - >>> cursor = collection.find( - ... {"a": 1}, - ... read_preference=ReadPreference.SECONDARY, - ... tag_sets=[{"dc": "ny"}]) - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> from pymongo.read_preferences import Secondary - >>> coll2 = collection.with_options( - ... read_preference=Secondary([{"dc": "ny"}])) - >>> cursor = coll2.find({"a": 1}) - -.. seealso:: :meth:`~pymongo.database.Database.get_collection` - -The "secondary_acceptable_latency_ms" option and attribute are removed -...................................................................... - -PyMongo 2.x supports `secondary_acceptable_latency_ms` as an option to methods -throughout the driver, but mongos only supports a global latency option. -PyMongo 3.x has changed to match the behavior of mongos, allowing migration -from a single server, to a replica set, to a sharded cluster without a -surprising change in server selection behavior. A new option, -`localThresholdMS`, is available through MongoClient and should be used in -place of `secondaryAcceptableLatencyMS`. Code like this:: - - >>> client = MongoClient(readPreference="nearest", - ... secondaryAcceptableLatencyMS=100) - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> client = MongoClient(readPreference="nearest", - ... localThresholdMS=100) - -Write Concern -------------- - -The "safe" option is removed -............................ - -In PyMongo 3 the `safe` option is removed from the entire API. -:class:`~pymongo.mongo_client.MongoClient` has always defaulted to acknowledged -write operations and continues to do so in PyMongo 3. - -The "write_concern" attribute is immutable -.......................................... - -The `write_concern` attribute is immutable in PyMongo 3. Code like this:: - - >>> client = MongoClient() - >>> client.write_concern = {"w": "majority"} - -can be changed to this with any version of PyMongo: - -.. doctest:: - - >>> client = MongoClient(w="majority") - -Code like this:: - - >>> db = client.my_database - >>> db.write_concern = {"w": "majority"} - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> from pymongo import WriteConcern - >>> db = client.get_database("my_database", - ... write_concern=WriteConcern(w="majority")) - -The new CRUD API write methods do not accept write concern options. Code like -this:: - - >>> oid = collection.insert({"a": 2}, w="majority") - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> from pymongo import WriteConcern - >>> coll2 = collection.with_options( - ... write_concern=WriteConcern(w="majority")) - >>> oid = coll2.insert({"a": 2}) - -.. seealso:: :meth:`~pymongo.database.Database.get_collection` - -Codec Options -------------- - -The "document_class" attribute is removed -......................................... - -Code like this:: - - >>> from bson.son import SON - >>> client = MongoClient() - >>> client.document_class = SON - -can be replaced by this in any version of PyMongo: - -.. doctest:: - - >>> from bson.son import SON - >>> client = MongoClient(document_class=SON) - -or to change the `document_class` for a :class:`~pymongo.database.Database` -with PyMongo 2.9 or later: - -.. doctest:: - - >>> from bson.codec_options import CodecOptions - >>> from bson.son import SON - >>> db = client.get_database("my_database", CodecOptions(SON)) - -.. seealso:: :meth:`~pymongo.database.Database.get_collection` and - :meth:`~pymongo.collection.Collection.with_options` - -The "uuid_subtype" option and attribute are removed -................................................... - -Code like this:: - - >>> from bson.binary import JAVA_LEGACY - >>> db = client.my_database - >>> db.uuid_subtype = JAVA_LEGACY - -can be replaced by this with PyMongo 2.9 or later: - -.. doctest:: - - >>> from bson.binary import JAVA_LEGACY - >>> from bson.codec_options import CodecOptions - >>> db = client.get_database("my_database", - ... CodecOptions(uuid_representation=JAVA_LEGACY)) - -.. seealso:: :meth:`~pymongo.database.Database.get_collection` and - :meth:`~pymongo.collection.Collection.with_options` - -MongoClient ------------ - -MongoClient connects asynchronously -................................... - -In PyMongo 3, the :class:`~pymongo.mongo_client.MongoClient` constructor no -longer blocks while connecting to the server or servers, and it no longer -raises :exc:`~pymongo.errors.ConnectionFailure` if they are unavailable, nor -:exc:`~pymongo.errors.ConfigurationError` if the user’s credentials are wrong. -Instead, the constructor returns immediately and launches the connection -process on background threads. The `connect` option is added to control whether -these threads are started immediately, or when the client is first used. - -For consistent behavior in PyMongo 2.x and PyMongo 3.x, code like this:: - - >>> from pymongo.errors import ConnectionFailure - >>> try: - ... client = MongoClient() - ... except ConnectionFailure: - ... print("Server not available") - >>> - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> from pymongo.errors import ConnectionFailure - >>> client = MongoClient(connect=False) - >>> try: - ... result = client.admin.command("ismaster") - ... except ConnectionFailure: - ... print("Server not available") - >>> - -Any operation can be used to determine if the server is available. We choose -the "ismaster" command here because it is cheap and does not require auth, so -it is a simple way to check whether the server is available. - -The max_pool_size parameter is removed -...................................... - -PyMongo 3 replaced the max_pool_size parameter with support for the MongoDB URI -`maxPoolSize` option. Code like this:: - - >>> client = MongoClient(max_pool_size=10) - -can be replaced by this with PyMongo 2.9 or later: - -.. doctest:: - - >>> client = MongoClient(maxPoolSize=10) - >>> client = MongoClient("mongodb://localhost:27017/?maxPoolSize=10") - -The "disconnect" method is removed -.................................. - -Code like this:: - - >>> client.disconnect() - -can be replaced by this with PyMongo 2.9 or later: - -.. doctest:: - - >>> client.close() - -The host and port attributes are removed -........................................ - -Code like this:: - - >>> host = client.host - >>> port = client.port - -can be replaced by this with PyMongo 2.9 or later: - -.. doctest:: - - >>> address = client.address - >>> host, port = address or (None, None) - -BSON ----- - -"as_class", "tz_aware", and "uuid_subtype" are removed -...................................................... - -The `as_class`, `tz_aware`, and `uuid_subtype` parameters have been -removed from the functions provided in :mod:`bson`. Furthermore, the -:func:`~bson.encode` and :func:`~bson.decode` functions have been added -as more performant alternatives to the :meth:`bson.BSON.encode` and -:meth:`bson.BSON.decode` methods. Code like this:: - - >>> from bson import BSON - >>> from bson.son import SON - >>> encoded = BSON.encode({"a": 1}, as_class=SON) - -can be replaced by this in PyMongo 2.9 or later: - -.. doctest:: - - >>> from bson import encode - >>> from bson.codec_options import CodecOptions - >>> from bson.son import SON - >>> encoded = encode({"a": 1}, codec_options=CodecOptions(SON)) - -Removed features with no migration path ---------------------------------------- - -MasterSlaveConnection is removed -................................ - -Master slave deployments are deprecated in MongoDB. Starting with MongoDB 3.0 -a replica set can have up to 50 members and that limit is likely to be -removed in later releases. We recommend migrating to replica sets instead. - -Requests are removed -.................... - -The client methods `start_request`, `in_request`, and `end_request` are -removed. Requests were designed to make read-your-writes consistency more -likely with the w=0 write concern. Additionally, a thread in a request used the -same member for all secondary reads in a replica set. To ensure -read-your-writes consistency in PyMongo 3.0, do not override the default write -concern with w=0, and do not override the default read preference of PRIMARY. - -The "compile_re" option is removed -.................................. - -In PyMongo 3 regular expressions are never compiled to Python match objects. - -The "use_greenlets" option is removed -..................................... - -The `use_greenlets` option was meant to allow use of PyMongo with Gevent -without the use of gevent.monkey.patch_threads(). This option caused a lot -of confusion and made it difficult to support alternative asyncio libraries -like Eventlet. Users of Gevent should use gevent.monkey.patch_all() instead. - -.. seealso:: :doc:`examples/gevent` diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst new file mode 100644 index 0000000000..35fc922d51 --- /dev/null +++ b/doc/migrate-to-pymongo4.rst @@ -0,0 +1,999 @@ +.. _pymongo4-migration-guide: + +PyMongo 4 Migration Guide +========================= + +.. testsetup:: + + from pymongo import MongoClient, ReadPreference + + client = MongoClient() + database = client.my_database + collection = database.my_collection + +PyMongo 4.0 brings a number of improvements as well as some backward breaking +changes. This guide provides a roadmap for migrating an existing application +from PyMongo 3.x to 4.x or writing libraries that will work with both +PyMongo 3.x and 4.x. + +PyMongo 3 +--------- + +The first step in any successful migration involves upgrading to, or +requiring, at least that latest version of PyMongo 3.x. If your project has a +requirements.txt file, add the line "pymongo >= 3.12, < 4.0" until you have +completely migrated to PyMongo 4. Most of the key new methods and options from +PyMongo 4.0 are backported in PyMongo 3.12 making migration much easier. + +.. note:: Users of PyMongo 2.X who wish to upgrade to 4.x must first upgrade + to PyMongo 3.x by following the `PyMongo 3 Migration Guide + `_. + +Python 3.6+ +----------- + +PyMongo 4.0 drops support for Python 2.7, 3.4, and 3.5. Users who wish to +upgrade to 4.x must first upgrade to Python 3.6.2+. Users upgrading from +Python 2 should consult the :doc:`python3`. + +Enable Deprecation Warnings +--------------------------- + +:exc:`DeprecationWarning` is raised by most methods removed in PyMongo 4.0. +Make sure you enable runtime warnings to see where deprecated functions and +methods are being used in your application:: + + python -Wd + +Warnings can also be changed to errors:: + + python -Wd -Werror + +.. note:: Not all deprecated features raise :exc:`DeprecationWarning` when + used. See `Removed features with no migration path`_. + +MongoReplicaSetClient +--------------------- + +Removed :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient`. +Since PyMongo 3.0, ``MongoReplicaSetClient`` has been identical to +:class:`pymongo.mongo_client.MongoClient`. Applications can simply replace +``MongoReplicaSetClient`` with :class:`pymongo.mongo_client.MongoClient` and +get the same behavior. + +MongoClient +----------- + +.. _pymongo4-migration-direct-connection: + +``directConnection`` defaults to False +...................................... + +``directConnection`` URI option and keyword argument to :class:`~pymongo +.mongo_client.MongoClient` defaults to ``False`` instead of ``None``, +allowing for the automatic discovery of replica sets. This means that if you +want a direct connection to a single server you must pass +``directConnection=True`` as a URI option or keyword argument. + +If you see any :exc:`~pymongo.errors.ServerSelectionTimeoutError`'s after upgrading from PyMongo 3 to 4.x, you likely +need to add ``directConnection=True`` when creating the client. +Here are some example errors: + +.. code-block:: + + pymongo.errors.ServerSelectionTimeoutError: mongo_node2: [Errno 8] nodename nor servname + provided, or not known,mongo_node1:27017 + +.. code-block:: + + ServerSelectionTimeoutError: No servers match selector "Primary()", Timeout: 30s, + Topology Description: ... + + +Additionally, the "isWritablePrimary" attribute of a hello command sent back by the server will +always be True if ``directConnection=False``:: + + >>> client.admin.command('hello')['isWritablePrimary'] + True + + +The waitQueueMultiple parameter is removed +.......................................... + +Removed the ``waitQueueMultiple`` keyword argument to +:class:`~pymongo.mongo_client.MongoClient` and removed +:exc:`pymongo.errors.ExceededMaxWaiters`. Instead of using +``waitQueueMultiple`` to bound queuing, limit the size of the thread +pool in your application. + +The socketKeepAlive parameter is removed +.......................................... + +Removed the ``socketKeepAlive`` keyword argument to +:class:`~pymongo.mongo_client.MongoClient`. PyMongo now always enables TCP +keepalive. For more information see the `documentation `_. + +Renamed URI options +................... + +Several deprecated URI options have been renamed to the standardized +option names defined in the +`URI options specification `_. +The old option names and their renamed equivalents are summarized in the table +below. Some renamed options have different semantics from the option being +replaced as noted in the 'Migration Notes' column. + ++--------------------+-------------------------------+--------------------------------------------------------+ +| Old URI Option | Renamed URI Option | Migration Notes | ++====================+===============================+========================================================+ +| ssl_pem_passphrase | tlsCertificateKeyFilePassword | - | ++--------------------+-------------------------------+--------------------------------------------------------+ +| ssl_ca_certs | tlsCAFile | - | ++--------------------+-------------------------------+--------------------------------------------------------+ +| ssl_crlfile | tlsCRLFile | - | ++--------------------+-------------------------------+--------------------------------------------------------+ +| ssl_match_hostname | tlsAllowInvalidHostnames | ``ssl_match_hostname=True`` is equivalent to | +| | | ``tlsAllowInvalidHostnames=False`` and vice-versa. | ++--------------------+-------------------------------+--------------------------------------------------------+ +| ssl_cert_reqs | tlsAllowInvalidCertificates | Instead of ``ssl.CERT_NONE``, ``ssl.CERT_OPTIONAL`` | +| | | and ``ssl.CERT_REQUIRED``, the new option expects | +| | | a boolean value - ``True`` is equivalent to | +| | | ``ssl.CERT_NONE``, while ``False`` is equivalent to | +| | | ``ssl.CERT_REQUIRED``. | ++--------------------+-------------------------------+--------------------------------------------------------+ +| ssl_certfile | tlsCertificateKeyFile | Instead of using ``ssl_certfile`` and ``ssl_keyfile`` | +| | | to specify the certificate and private key files | ++--------------------+ | respectively, use ``tlsCertificateKeyFile`` to pass | +| ssl_keyfile | | a single file containing both the client certificate | +| | | and the private key. | ++--------------------+-------------------------------+--------------------------------------------------------+ +| j | journal | - | ++--------------------+-------------------------------+--------------------------------------------------------+ +| wtimeout | wTimeoutMS | - | ++--------------------+-------------------------------+--------------------------------------------------------+ + +MongoClient.fsync is removed +............................ + +Removed :meth:`pymongo.mongo_client.MongoClient.fsync`. Run the +`fsync command`_ directly with :meth:`~pymongo.database.Database.command` +instead. For example:: + + client.admin.command('fsync', lock=True) + +.. _fsync command: https://mongodb.com/docs/manual/reference/command/fsync/ + +MongoClient.unlock is removed +............................. + +Removed :meth:`pymongo.mongo_client.MongoClient.unlock`. Run the +`fsyncUnlock command`_ directly with +:meth:`~pymongo.database.Database.command` instead. For example:: + + client.admin.command('fsyncUnlock') + +.. _fsyncUnlock command: https://mongodb.com/docs/manual/reference/command/fsyncUnlock/ + +MongoClient.is_locked is removed +................................ + +Removed :attr:`pymongo.mongo_client.MongoClient.is_locked`. Run the +`currentOp command`_ directly with +:meth:`~pymongo.database.Database.command` instead. For example:: + + is_locked = client.admin.command('currentOp').get('fsyncLock') + +.. _currentOp command: https://mongodb.com/docs/manual/reference/command/currentOp/ + +MongoClient.database_names is removed +..................................... + +Removed :meth:`pymongo.mongo_client.MongoClient.database_names`. Use +:meth:`~pymongo.mongo_client.MongoClient.list_database_names` instead. Code like +this:: + + names = client.database_names() + +can be changed to this:: + + names = client.list_database_names() + +MongoClient.max_bson_size/max_message_size/max_write_batch_size are removed +........................................................................... + +Removed :attr:`pymongo.mongo_client.MongoClient.max_bson_size`, +:attr:`pymongo.mongo_client.MongoClient.max_message_size`, and +:attr:`pymongo.mongo_client.MongoClient.max_write_batch_size`. These helpers +were incorrect when in ``loadBalanced=true mode`` and ambiguous in clusters +with mixed versions. Use the `hello command`_ to get the authoritative +value from the remote server instead. Code like this:: + + max_bson_size = client.max_bson_size + max_message_size = client.max_message_size + max_write_batch_size = client.max_write_batch_size + +can be changed to this:: + + doc = client.admin.command('hello') + max_bson_size = doc['maxBsonObjectSize'] + max_message_size = doc['maxMessageSizeBytes'] + max_write_batch_size = doc['maxWriteBatchSize'] + +.. _hello command: https://mongodb.com/docs/manual/reference/command/hello/ + +MongoClient.event_listeners and other configuration option helpers are removed +.............................................................................. + +The following client configuration option helpers are removed: +- :attr:`pymongo.mongo_client.MongoClient.event_listeners`. +- :attr:`pymongo.mongo_client.MongoClient.max_pool_size`. +- :attr:`pymongo.mongo_client.MongoClient.max_idle_time_ms`. +- :attr:`pymongo.mongo_client.MongoClient.local_threshold_ms`. +- :attr:`pymongo.mongo_client.MongoClient.server_selection_timeout`. +- :attr:`pymongo.mongo_client.MongoClient.retry_writes`. +- :attr:`pymongo.mongo_client.MongoClient.retry_reads`. + +These helpers have been replaced by +:attr:`pymongo.mongo_client.MongoClient.options`. Code like this:: + + client.event_listeners + client.local_threshold_ms + client.server_selection_timeout + client.max_pool_size + client.min_pool_size + client.max_idle_time_ms + +can be changed to this:: + + client.options.event_listeners + client.options.local_threshold_ms + client.options.server_selection_timeout + client.options.pool_options.max_pool_size + client.options.pool_options.min_pool_size + client.options.pool_options.max_idle_time_seconds + +.. _tz_aware_default_change: + +``tz_aware`` defaults to ``False`` +.................................. + +The ``tz_aware`` argument to :class:`~bson.json_util.JSONOptions` +now defaults to ``False`` instead of ``True``. :meth:`bson.json_util.loads` +now decodes datetime as naive by default:: + + >>> from bson import json_util + >>> s = '{"dt": {"$date": "2022-05-09T17:54:00Z"}}' + >>> json_util.loads(s) + {'dt': datetime.datetime(2022, 5, 9, 17, 54)} + +To retain the PyMongo 3 behavior set ``tz_aware=True``, for example:: + + >>> from bson import json_util + >>> opts = json_util.JSONOptions(tz_aware=True) + >>> s = '{"dt": {"$date": "2022-05-09T17:54:00Z"}}' + >>> json_util.loads(s, json_options=opts) + {'dt': datetime.datetime(2022, 5, 9, 17, 54, tzinfo=)} + +This change was made to match the default behavior of +:class:`~bson.codec_options.CodecOptions` and :class:`bson.decode`. + +MongoClient cannot execute operations after ``close()`` +....................................................... + +:class:`~pymongo.mongo_client.MongoClient` cannot execute any operations +after being closed. The previous behavior would simply reconnect. However, +now you must create a new instance. + +MongoClient raises exception when given more than one URI +......................................................... + +:class:`~pymongo.mongo_client.MongoClient` now raises a :exc:`~pymongo.errors.ConfigurationError` +when more than one URI is passed into the ``hosts`` argument. + +MongoClient raises exception when given unescaped percent sign in login info +............................................................................ + +:class:`~pymongo.mongo_client.MongoClient` now raises an +:exc:`~pymongo.errors.InvalidURI` exception +when it encounters unescaped percent signs in username and password. + +Database +-------- + +Database.authenticate and Database.logout are removed +..................................................... + +Removed :meth:`pymongo.database.Database.authenticate` and +:meth:`pymongo.database.Database.logout`. Authenticating multiple users +on the same client conflicts with support for logical sessions in MongoDB 3.6+. +To authenticate as multiple users, create multiple instances of +:class:`~pymongo.mongo_client.MongoClient`. Code like this:: + + client = MongoClient() + client.admin.authenticate('user1', 'pass1') + client.admin.authenticate('user2', 'pass2') + +can be changed to this:: + + client1 = MongoClient(username='user1', password='pass1') + client2 = MongoClient(username='user2', password='pass2') + +Alternatively, create a single user that contains all the authentication privileges +required by your application. + +Database.collection_names is removed +.................................... + +Removed :meth:`pymongo.database.Database.collection_names`. Use +:meth:`~pymongo.database.Database.list_collection_names` instead. Code like +this:: + + names = client.collection_names() + non_system_names = client.collection_names(include_system_collections=False) + +can be changed to this:: + + names = client.list_collection_names() + non_system_names = client.list_collection_names(filter={"name": {"$regex": r"^(?!system\\.)"}}) + +Database.current_op is removed +.............................. + +Removed :meth:`pymongo.database.Database.current_op`. Use +:meth:`~pymongo.database.Database.aggregate` instead with the +`$currentOp aggregation pipeline stage`_. Code like +this:: + + ops = client.admin.current_op()['inprog'] + +can be changed to this:: + + ops = list(client.admin.aggregate([{'$currentOp': {}}])) + +.. _$currentOp aggregation pipeline stage: https://mongodb.com/docs/manual/reference/operator/aggregation/currentOp/ + +Database.add_user is removed +............................ + +Removed :meth:`pymongo.database.Database.add_user` which was deprecated in +PyMongo 3.6. Use the `createUser command`_ or `updateUser command`_ instead. +To create a user:: + + db.command("createUser", "admin", pwd="password", roles=["dbAdmin"]) + +To create a read-only user:: + + db.command("createUser", "user", pwd="password", roles=["read"]) + +To change a password:: + + db.command("updateUser", "user", pwd="newpassword") + +Or change roles:: + + db.command("updateUser", "user", roles=["readWrite"]) + +.. _createUser command: https://mongodb.com/docs/manual/reference/command/createUser/ +.. _updateUser command: https://mongodb.com/docs/manual/reference/command/updateUser/ + +Database.remove_user is removed +............................... + +Removed :meth:`pymongo.database.Database.remove_user` which was deprecated in +PyMongo 3.6. Use the `dropUser command`_ instead:: + + db.command("dropUser", "user") + +.. _dropUser command: https://mongodb.com/docs/manual/reference/command/createUser/ + +Database.profiling_level is removed +................................... + +Removed :meth:`pymongo.database.Database.profiling_level` which was deprecated in +PyMongo 3.12. Use the `profile command`_ instead. Code like this:: + + level = db.profiling_level() + +Can be changed to this:: + + profile = db.command('profile', -1) + level = profile['was'] + +.. _profile command: https://mongodb.com/docs/manual/reference/command/profile/ + +Database.set_profiling_level is removed +....................................... + +Removed :meth:`pymongo.database.Database.set_profiling_level` which was deprecated in +PyMongo 3.12. Use the `profile command`_ instead. Code like this:: + + db.set_profiling_level(pymongo.ALL, filter={'op': 'query'}) + +Can be changed to this:: + + res = db.command('profile', 2, filter={'op': 'query'}) + +Database.profiling_info is removed +.................................. + +Removed :meth:`pymongo.database.Database.profiling_info` which was deprecated in +PyMongo 3.12. Query the `'system.profile' collection`_ instead. Code like this:: + + profiling_info = db.profiling_info() + +Can be changed to this:: + + profiling_info = list(db['system.profile'].find()) + +.. _'system.profile' collection: https://mongodb.com/docs/manual/reference/database-profiler/ + +Database.__bool__ raises NotImplementedError +............................................ +:class:`~pymongo.database.Database` now raises an error upon evaluating as a +Boolean. Code like this:: + + if database: + +Can be changed to this:: + + if database is not None: + +You must now explicitly compare with None. + +Collection +---------- + +The useCursor option for Collection.aggregate is removed +........................................................ + +Removed the ``useCursor`` option for +:meth:`~pymongo.collection.Collection.aggregate` which was deprecated in +PyMongo 3.6. The option was only necessary when upgrading from MongoDB 2.4 +to MongoDB 2.6. + +Collection.insert is removed +............................ + +Removed :meth:`pymongo.collection.Collection.insert`. Use +:meth:`~pymongo.collection.Collection.insert_one` or +:meth:`~pymongo.collection.Collection.insert_many` instead. + +Code like this:: + + collection.insert({'doc': 1}) + collection.insert([{'doc': 2}, {'doc': 3}]) + +Can be changed to this:: + + collection.insert_one({'my': 'document'}) + collection.insert_many([{'doc': 2}, {'doc': 3}]) + +Collection.save is removed +.......................... + +Removed :meth:`pymongo.collection.Collection.save`. Applications will +get better performance using :meth:`~pymongo.collection.Collection.insert_one` +to insert a new document and :meth:`~pymongo.collection.Collection.update_one` +to update an existing document. Code like this:: + + doc = collection.find_one({"_id": "some id"}) + doc["some field"] = + db.collection.save(doc) + +Can be changed to this:: + + result = collection.update_one({"_id": "some id"}, {"$set": {"some field": }}) + +If performance is not a concern and refactoring is untenable, ``save`` can be +implemented like so:: + + def save(doc): + if '_id' in doc: + collection.replace_one({'_id': doc['_id']}, doc, upsert=True) + return doc['_id'] + else: + res = collection.insert_one(doc) + return res.inserted_id + +Collection.update is removed +............................ + +Removed :meth:`pymongo.collection.Collection.update`. Use +:meth:`~pymongo.collection.Collection.update_one` +to update a single document or +:meth:`~pymongo.collection.Collection.update_many` to update multiple +documents. Code like this:: + + collection.update({}, {'$set': {'a': 1}}) + collection.update({}, {'$set': {'b': 1}}, multi=True) + +Can be changed to this:: + + collection.update_one({}, {'$set': {'a': 1}}) + collection.update_many({}, {'$set': {'b': 1}}) + +Collection.remove is removed +............................ + +Removed :meth:`pymongo.collection.Collection.remove`. Use +:meth:`~pymongo.collection.Collection.delete_one` +to delete a single document or +:meth:`~pymongo.collection.Collection.delete_many` to delete multiple +documents. Code like this:: + + collection.remove({'a': 1}, multi=False) + collection.remove({'b': 1}) + +Can be changed to this:: + + collection.delete_one({'a': 1}) + collection.delete_many({'b': 1}) + +Collection.find_and_modify is removed +..................................... + +Removed :meth:`pymongo.collection.Collection.find_and_modify`. Use +:meth:`~pymongo.collection.Collection.find_one_and_update`, +:meth:`~pymongo.collection.Collection.find_one_and_replace`, or +:meth:`~pymongo.collection.Collection.find_one_and_delete` instead. +Code like this:: + + updated_doc = collection.find_and_modify({'a': 1}, {'$set': {'b': 1}}) + replaced_doc = collection.find_and_modify({'b': 1}, {'c': 1}) + deleted_doc = collection.find_and_modify({'c': 1}, remove=True) + +Can be changed to this:: + + updated_doc = collection.find_one_and_update({'a': 1}, {'$set': {'b': 1}}) + replaced_doc = collection.find_one_and_replace({'b': 1}, {'c': 1}) + deleted_doc = collection.find_one_and_delete({'c': 1}) + +Collection.count and Cursor.count is removed +............................................ + +Removed :meth:`pymongo.collection.Collection.count` and +:meth:`pymongo.cursor.Cursor.count`. Use +:meth:`~pymongo.collection.Collection.count_documents` or +:meth:`~pymongo.collection.Collection.estimated_document_count` instead. +Code like this:: + + ntotal = collection.count({}) + nmatched = collection.count({'price': {'$gte': 10}}) + # Or via the Cursor.count api: + ntotal = collection.find({}).count() + nmatched = collection.find({'price': {'$gte': 10}}).count() + +Can be changed to this:: + + ntotal = collection.estimated_document_count() + nmatched = collection.count_documents({'price': {'$gte': 10}}) + +.. note:: When migrating from :meth:`count` to :meth:`count_documents` + the following query operators must be replaced: + + +-------------+--------------------------------------------------------------+ + | Operator | Replacement | + +=============+==============================================================+ + | $where | `$expr`_ | + +-------------+--------------------------------------------------------------+ + | $near | `$geoWithin`_ with `$center`_; i.e. | + | | ``{'$geoWithin': {'$center': [[,], ]}}`` | + +-------------+--------------------------------------------------------------+ + | $nearSphere | `$geoWithin`_ with `$centerSphere`_; i.e. | + | | ``{'$geoWithin': {'$centerSphere': [[,], ]}}`` | + +-------------+--------------------------------------------------------------+ + +.. _$expr: https://mongodb.com/docs/manual/reference/operator/query/expr/ +.. _$geoWithin: https://mongodb.com/docs/manual/reference/operator/query/geoWithin/ +.. _$center: https://mongodb.com/docs/manual/reference/operator/query/center/ +.. _$centerSphere: https://mongodb.com/docs/manual/reference/operator/query/centerSphere/ + +Collection.initialize_ordered_bulk_op and initialize_unordered_bulk_op is removed +................................................................................. + +Removed :meth:`pymongo.collection.Collection.initialize_ordered_bulk_op` +and :class:`pymongo.bulk.BulkOperationBuilder`. Use +:meth:`pymongo.collection.Collection.bulk_write` instead. Code like this:: + + batch = coll.initialize_ordered_bulk_op() + batch.insert({'a': 1}) + batch.find({'a': 1}).update_one({'$set': {'b': 1}}) + batch.find({'a': 2}).upsert().replace_one({'b': 2}) + batch.find({'a': 3}).remove() + result = batch.execute() + +Can be changed to this:: + + coll.bulk_write([ + InsertOne({'a': 1}), + UpdateOne({'a': 1}, {'$set': {'b': 1}}), + ReplaceOne({'a': 2}, {'b': 2}, upsert=True), + DeleteOne({'a': 3}), + ]) + +Collection.initialize_unordered_bulk_op is removed +.................................................. + +Removed :meth:`pymongo.collection.Collection.initialize_unordered_bulk_op`. +Use :meth:`pymongo.collection.Collection.bulk_write` instead. Code like this:: + + batch = coll.initialize_unordered_bulk_op() + batch.insert({'a': 1}) + batch.find({'a': 1}).update_one({'$set': {'b': 1}}) + batch.find({'a': 2}).upsert().replace_one({'b': 2}) + batch.find({'a': 3}).remove() + result = batch.execute() + +Can be changed to this:: + + coll.bulk_write([ + InsertOne({'a': 1}), + UpdateOne({'a': 1}, {'$set': {'b': 1}}), + ReplaceOne({'a': 2}, {'b': 2}, upsert=True), + DeleteOne({'a': 3}), + ], ordered=False) + +Collection.group is removed +........................... + +Removed :meth:`pymongo.collection.Collection.group`. This method was +deprecated in PyMongo 3.5. MongoDB 4.2 removed the `group command`_. +Use :meth:`~pymongo.collection.Collection.aggregate` with the ``$group`` stage +instead. + +.. _group command: https://mongodb.com/docs/manual/reference/command/group/ + +Collection.map_reduce and Collection.inline_map_reduce are removed +.................................................................. + +Removed :meth:`pymongo.collection.Collection.map_reduce` and +:meth:`pymongo.collection.Collection.inline_map_reduce`. +Migrate to :meth:`~pymongo.collection.Collection.aggregate` or run the +`mapReduce command`_ directly with :meth:`~pymongo.database.Database.command` +instead. For more guidance on this migration see: + +- https://mongodb.com/docs/manual/reference/map-reduce-to-aggregation-pipeline/ +- https://mongodb.com/docs/manual/reference/aggregation-commands-comparison/ + +.. _mapReduce command: https://mongodb.com/docs/manual/reference/command/mapReduce/ + +Collection.ensure_index is removed +.................................. + +Removed :meth:`pymongo.collection.Collection.ensure_index`. Use +:meth:`~pymongo.collection.Collection.create_index` or +:meth:`~pymongo.collection.Collection.create_indexes` instead. Note that +``ensure_index`` maintained an in memory cache of recently created indexes +whereas the newer methods do not. Applications should avoid frequent calls +to :meth:`~pymongo.collection.Collection.create_index` or +:meth:`~pymongo.collection.Collection.create_indexes`. Code like this:: + + def persist(self, document): + collection.ensure_index('a', unique=True) + collection.insert_one(document) + +Can be changed to this:: + + def persist(self, document): + if not self.created_index: + collection.create_index('a', unique=True) + self.created_index = True + collection.insert_one(document) + +Collection.reindex is removed +............................. + +Removed :meth:`pymongo.collection.Collection.reindex`. Run the +`reIndex command`_ directly instead. Code like this:: + + >>> result = database.my_collection.reindex() + +can be changed to this:: + + >>> result = database.command('reIndex', 'my_collection') + +.. _reIndex command: https://mongodb.com/docs/manual/reference/command/reIndex/ + +The modifiers parameter is removed +.................................. + +Removed the ``modifiers`` parameter from +:meth:`~pymongo.collection.Collection.find`, +:meth:`~pymongo.collection.Collection.find_one`, +:meth:`~pymongo.collection.Collection.find_raw_batches`, and +:meth:`~pymongo.cursor.Cursor`. Pass the options directly to the method +instead. Code like this:: + + cursor = coll.find({}, modifiers={ + "$comment": "comment", + "$hint": {"_id": 1}, + "$min": {"_id": 0}, + "$max": {"_id": 6}, + "$maxTimeMS": 6000, + "$returnKey": False, + "$showDiskLoc": False, + }) + +can be changed to this:: + + cursor = coll.find( + {}, + comment="comment", + hint={"_id": 1}, + min={"_id": 0}, + max={"_id": 6}, + max_time_ms=6000, + return_key=False, + show_record_id=False, + ) + +The hint parameter is required with min/max +........................................... + +The ``hint`` option is now required when using ``min`` or ``max`` queries +with :meth:`~pymongo.collection.Collection.find` to ensure the query utilizes +the correct index. For example, code like this:: + + cursor = coll.find({}, min={'x', min_value}) + +can be changed to this:: + + cursor = coll.find({}, min={'x', min_value}, hint=[('x', ASCENDING)]) + +Collection.__bool__ raises NotImplementedError +.............................................. +:class:`~pymongo.collection.Collection` now raises an error upon evaluating +as a Boolean. Code like this:: + + if collection: + +Can be changed to this:: + + if collection is not None: + +You must now explicitly compare with None. + +Collection.find returns entire document with empty projection +............................................................. +Empty projections (eg {} or []) for +:meth:`~pymongo.collection.Collection.find`, and +:meth:`~pymongo.collection.Collection.find_one` +are passed to the server as-is rather than the previous behavior which +substituted in a projection of ``{"_id": 1}``. This means that an empty +projection will now return the entire document, not just the ``"_id"`` field. +To ensure that behavior remains consistent, code like this:: + + coll.find({}, projection={}) + +Can be changed to this:: + + coll.find({}, projection={"_id":1}) + +SONManipulator is removed +------------------------- + +Removed :mod:`pymongo.son_manipulator`, +:class:`pymongo.son_manipulator.SONManipulator`, +:class:`pymongo.son_manipulator.ObjectIdInjector`, +:class:`pymongo.son_manipulator.ObjectIdShuffler`, +:class:`pymongo.son_manipulator.AutoReference`, +:class:`pymongo.son_manipulator.NamespaceInjector`, +:meth:`pymongo.database.Database.add_son_manipulator`, +:attr:`pymongo.database.Database.outgoing_copying_manipulators`, +:attr:`pymongo.database.Database.outgoing_manipulators`, +:attr:`pymongo.database.Database.incoming_copying_manipulators`, and +:attr:`pymongo.database.Database.incoming_manipulators`. + +Removed the ``manipulate`` parameter from +:meth:`~pymongo.collection.Collection.find`, +:meth:`~pymongo.collection.Collection.find_one`, and +:meth:`~pymongo.cursor.Cursor`. + +The :class:`pymongo.son_manipulator.SONManipulator` API has limitations as a +technique for transforming your data and was deprecated in PyMongo 3.0. +Instead, it is more flexible and straightforward to transform outgoing +documents in your own code before passing them to PyMongo, and transform +incoming documents after receiving them from PyMongo. + +Alternatively, if your application uses the ``SONManipulator`` API to convert +custom types to BSON, the :class:`~bson.codec_options.TypeCodec` and +:class:`~bson.codec_options.TypeRegistry` APIs may be a suitable alternative. +For more information, see the +:doc:`custom type example `. + +``SON().items()`` now returns ``dict_items`` object. +---------------------------------------------------- +:meth:`~bson.son.SON.items` now returns a ``dict_items`` object rather than +a list. + +``SON().iteritems()`` removed. +------------------------------ +``SON.iteritems()`` now removed. Code that looks like this:: + + for k, v in son.iteritems(): + +Can now be replaced by code that looks like:: + + for k, v in son.items(): + +IsMaster is removed +------------------- + +Removed :class:`pymongo.ismaster.IsMaster`. +Use :class:`pymongo.hello.Hello` instead. + +NotMasterError is removed +------------------------- + +Removed :exc:`~pymongo.errors.NotMasterError`. +Use :exc:`~pymongo.errors.NotPrimaryError` instead. + +CertificateError is removed +--------------------------- + +Removed :exc:`~pymongo.errors.CertificateError`. Since PyMongo 3.0 this error +is handled internally and is never raised to the application. + +pymongo.GEOHAYSTACK is removed +------------------------------ + +Removed :attr:`pymongo.GEOHAYSTACK`. Replace with "geoHaystack" or create a +2d index and use $geoNear or $geoWithin instead. +See https://dochub.mongodb.org/core/4.4-deprecate-geoHaystack. + +UUIDLegacy is removed +--------------------- + +Removed :class:`bson.binary.UUIDLegacy`. Use +:meth:`bson.binary.Binary.from_uuid` instead. Code like this:: + + uu = uuid.uuid4() + uuid_legacy = UUIDLegacy(uu) + +can be changed to this:: + + uu = uuid.uuid4() + uuid_legacy = Binary.from_uuid(uu, PYTHON_LEGACY) + +Default JSONMode changed from LEGACY to RELAXED +----------------------------------------------- + +Changed the default JSON encoding representation from legacy to relaxed. +The json_mode parameter for :const:`bson.json_util.dumps` now defaults to +:const:`~bson.json_util.RELAXED_JSON_OPTIONS`. + +GridFS changes +-------------- + +.. _removed-gridfs-checksum: + +disable_md5 parameter is removed +................................ + +Removed the ``disable_md5`` option for :class:`~gridfs.GridFSBucket` and +:class:`~gridfs.GridFS`. GridFS no longer generates checksums. +Applications that desire a file digest should implement it outside GridFS +and store it with other file metadata. For example:: + + import hashlib + my_db = MongoClient().test + fs = GridFSBucket(my_db) + with fs.open_upload_stream("test_file") as grid_in: + file_data = b'...' + sha356 = hashlib.sha256(file_data).hexdigest() + grid_in.write(file_data) + grid_in.sha356 = sha356 # Set the custom 'sha356' field + +Note that for large files, the checksum may need to be computed in chunks +to avoid the excessive memory needed to load the entire file at once. + +Removed features with no migration path +--------------------------------------- + +cursor_manager support is removed +................................. + +Removed :class:`pymongo.cursor_manager.CursorManager`, +:mod:`pymongo.cursor_manager`, and +:meth:`pymongo.mongo_client.MongoClient.set_cursor_manager`. + +MongoClient.close_cursor is removed +................................... + +Removed :meth:`pymongo.mongo_client.MongoClient.close_cursor` and +:meth:`pymongo.mongo_client.MongoClient.kill_cursors`. Instead, close cursors +with :meth:`pymongo.cursor.Cursor.close` or +:meth:`pymongo.command_cursor.CommandCursor.close`. + +.. _killCursors command: https://mongodb.com/docs/manual/reference/command/killCursors/ + +Database.eval, Database.system_js, and SystemJS are removed +........................................................... + +Removed :meth:`~pymongo.database.Database.eval`, +:data:`~pymongo.database.Database.system_js` and +:class:`~pymongo.database.SystemJS`. The eval command was deprecated in +MongoDB 3.0 and removed in MongoDB 4.2. There is no replacement for eval with +MongoDB 4.2+. + +However, on MongoDB <= 4.0, code like this:: + + >>> result = database.eval('function (x) {return x;}', 3) + +can be changed to this:: + + >>> from bson.code import Code + >>> result = database.command('eval', Code('function (x) {return x;}'), args=[3]).get('retval') + +Database.error, Database.last_status, Database.previous_error, and Database.reset_error_history are removed +........................................................................................................... + +Removed :meth:`pymongo.database.Database.error`, +:meth:`pymongo.database.Database.last_status`, +:meth:`pymongo.database.Database.previous_error`, and +:meth:`pymongo.database.Database.reset_error_history`. +These methods are obsolete: all MongoDB write operations use an acknowledged +write concern and report their errors by default. These methods were +deprecated in PyMongo 2.8. + +Collection.parallel_scan is removed +................................... + +Removed :meth:`~pymongo.collection.Collection.parallel_scan`. MongoDB 4.2 +removed the `parallelCollectionScan command`_. There is no replacement. + +.. _parallelCollectionScan command: https://mongodb.com/docs/manual/reference/command/parallelCollectionScan/ + +pymongo.message helpers are removed +................................... + +Removed :meth:`pymongo.message.delete`, :meth:`pymongo.message.get_more`, +:meth:`pymongo.message.insert`, :meth:`pymongo.message.kill_cursors`, +:meth:`pymongo.message.query`, and :meth:`pymongo.message.update`. + + +Name is a required argument for pymongo.driver_info.DriverInfo +.............................................................. + +``name`` is now a required argument for the :class:`pymongo.driver_info.DriverInfo` class. + +DBRef BSON/JSON decoding behavior +................................. + +Changed the BSON and JSON decoding behavior of :class:`~bson.dbref.DBRef` +to match the behavior outlined in the `DBRef specification`_ version 1.0. +Specifically, PyMongo now only decodes a subdocument into a +:class:`~bson.dbref.DBRef` if and only if, it contains both ``$ref`` and +``$id`` fields and the ``$ref``, ``$id``, and ``$db`` fields are of the +correct type. Otherwise the document is returned as normal. Previously, any +subdocument containing a ``$ref`` field would be decoded as a +:class:`~bson.dbref.DBRef`. + +.. _DBRef specification: https://github.com/mongodb/specifications/blob/5a8c8d7/source/dbref.rst + +Encoding a UUID raises an error by default +.......................................... + +The default ``uuid_representation`` for :class:`~bson.codec_options.CodecOptions`, +:class:`~bson.json_util.JSONOptions`, and +:class:`~pymongo.mongo_client.MongoClient` has been changed from +:data:`bson.binary.UuidRepresentation.PYTHON_LEGACY` to +:data:`bson.binary.UuidRepresentation.UNSPECIFIED`. Attempting to encode a +:class:`uuid.UUID` instance to BSON or JSON now produces an error by default. +If you were using UUIDs previously, you will need to set your ``uuid_representation`` to +:data:`bson.binary.UuidRepresentation.PYTHON_LEGACY` to avoid data corruption. If you do not have UUIDs, +then you should set :data:`bson.binary.UuidRepresentation.STANDARD`. If you do not explicitly set a value, +you will receive an error like this when attempting to encode a :class:`uuid.UUID`:: + + ValueError: cannot encode native uuid.UUID with UuidRepresentation.UNSPECIFIED. UUIDs can be manually converted... + +See :ref:`handling-uuid-data-example` for details. + +Additional BSON classes implement ``__slots__`` +............................................... + +:class:`~bson.int64.Int64`, :class:`~bson.min_key.MinKey`, +:class:`~bson.max_key.MaxKey`, :class:`~bson.timestamp.Timestamp`, +:class:`~bson.regex.Regex`, and :class:`~bson.dbref.DBRef` now implement +``__slots__`` to reduce memory usage. This means that their attributes are fixed, and new +attributes cannot be added to the object at runtime. diff --git a/doc/mongo_extensions.py b/doc/mongo_extensions.py deleted file mode 100644 index cc6fe40ab2..0000000000 --- a/doc/mongo_extensions.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright 2009-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""MongoDB specific extensions to Sphinx.""" - -from docutils import nodes -from docutils.parsers import rst -from sphinx import addnodes - - -class mongodoc(nodes.Admonition, nodes.Element): - pass - - -class mongoref(nodes.reference): - pass - - -def visit_mongodoc_node(self, node): - self.visit_admonition(node, "seealso") - - -def depart_mongodoc_node(self, node): - self.depart_admonition(node) - - -def visit_mongoref_node(self, node): - atts = {"class": "reference external", - "href": node["refuri"], - "name": node["name"]} - self.body.append(self.starttag(node, 'a', '', **atts)) - - -def depart_mongoref_node(self, node): - self.body.append('') - if not isinstance(node.parent, nodes.TextElement): - self.body.append('\n') - - -class MongodocDirective(rst.Directive): - - has_content = True - required_arguments = 0 - optional_arguments = 0 - final_argument_whitespace = False - option_spec = {} - - def run(self): - node = mongodoc() - title = 'The MongoDB documentation on' - node += nodes.title(title, title) - self.state.nested_parse(self.content, self.content_offset, node) - return [node] - - -def process_mongodoc_nodes(app, doctree, fromdocname): - for node in doctree.traverse(mongodoc): - anchor = None - for name in node.parent.parent.traverse(addnodes.desc_signature): - anchor = name["ids"][0] - break - if not anchor: - for name in node.parent.traverse(nodes.section): - anchor = name["ids"][0] - break - for para in node.traverse(nodes.paragraph): - tag = str(para.traverse()[1]) - link = mongoref("", "") - link["refuri"] = "http://dochub.mongodb.org/core/%s" % tag - link["name"] = anchor - link.append(nodes.emphasis(tag, tag)) - new_para = nodes.paragraph() - new_para += link - node.replace(para, new_para) - - -def setup(app): - app.add_node(mongodoc, - html=(visit_mongodoc_node, depart_mongodoc_node), - latex=(visit_mongodoc_node, depart_mongodoc_node), - text=(visit_mongodoc_node, depart_mongodoc_node)) - app.add_node(mongoref, - html=(visit_mongoref_node, depart_mongoref_node)) - - app.add_directive("mongodoc", MongodocDirective) - app.connect("doctree-resolved", process_mongodoc_nodes) diff --git a/doc/python3.rst b/doc/python3.rst index 89b0afca57..cc11409bcf 100644 --- a/doc/python3.rst +++ b/doc/python3.rst @@ -1,12 +1,10 @@ Python 3 FAQ ============ -.. contents:: - What Python 3 versions are supported? ------------------------------------- -PyMongo supports CPython 3.4+ and PyPy3.5+. +PyMongo supports CPython 3.7+ and PyPy3.8+. Are there any PyMongo behavior changes with Python 3? ----------------------------------------------------- @@ -20,8 +18,8 @@ with subtype 0. For example, let's insert a :class:`bytes` instance using Python 3 then read it back. Notice the byte string is decoded back to :class:`bytes`:: - Python 3.6.1 (v3.6.1:69c0db5050, Mar 21 2017, 01:21:04) - [GCC 4.9.3] on linux + Python 3.7.9 (v3.7.9:13c94747c7, Aug 15 2020, 01:31:08) + [Clang 6.0 (clang-600.0.57)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> import pymongo >>> c = pymongo.MongoClient() @@ -49,8 +47,8 @@ decoded to :class:`~bson.binary.Binary` with subtype 0. For example, let's decode a JSON binary subtype 0 using Python 3. Notice the byte string is decoded to :class:`bytes`:: - Python 3.6.1 (v3.6.1:69c0db5050, Mar 21 2017, 01:21:04) - [GCC 4.2.1 (Apple Inc. build 5666) (dot 3)] on darwin + Python 3.7.9 (v3.7.9:13c94747c7, Aug 15 2020, 01:31:08) + [Clang 6.0 (clang-600.0.57)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> from bson.json_util import loads >>> loads('{"b": {"$binary": "dGhpcyBpcyBhIGJ5dGUgc3RyaW5n", "$type": "00"}}') @@ -86,8 +84,8 @@ Python 3 you must pass ``encoding='latin-1'`` to pickle.loads:: >>> pickle.dumps(oid) 'ccopy_reg\n_reconstructor\np0\n(cbson.objectid\...' - Python 3.6.1 (v3.6.1:69c0db5050, Mar 21 2017, 01:21:04) - [GCC 4.9.3] on linux + Python 3.7.9 (v3.7.9:13c94747c7, Aug 15 2020, 01:31:08) + [Clang 6.0 (clang-600.0.57)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> import pickle >>> pickle.loads(b'ccopy_reg\n_reconstructor\np0\n(cbson.objectid\...', encoding='latin-1') @@ -97,8 +95,8 @@ Python 3 you must pass ``encoding='latin-1'`` to pickle.loads:: If you need to pickle ObjectIds using Python 3 and unpickle them using Python 2 you must use ``protocol <= 2``:: - Python 3.6.5 (default, Jun 21 2018, 15:09:09) - [GCC 7.3.0] on linux + Python 3.7.9 (v3.7.9:13c94747c7, Aug 15 2020, 01:31:08) + [Clang 6.0 (clang-600.0.57)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> import pickle >>> from bson.objectid import ObjectId diff --git a/doc/tools.rst b/doc/tools.rst index 9633bdc195..6dd0df8a4d 100644 --- a/doc/tools.rst +++ b/doc/tools.rst @@ -26,30 +26,25 @@ needs. Even if you eventually come to the decision to use one of these layers, the time spent working directly with the driver will have increased your understanding of how MongoDB actually works. -PyMODM - `PyMODM `_ is an ORM-like framework on top - of PyMongo. PyMODM is maintained by engineers at MongoDB, Inc. and is quick - to adopt new MongoDB features. PyMODM is a "core" ODM, meaning that it - provides simple, extensible functionality that can be leveraged by other - libraries to target platforms like Django. At the same time, PyMODM is - powerful enough to be used for developing applications on its own. Complete - documentation is available on `readthedocs - `_ in addition to a `Gitter channel - `_ for discussing the project. +MongoEngine + `MongoEngine `_ is another ORM-like + layer on top of PyMongo. It allows you to define schemas for + documents and query collections using syntax inspired by the Django + ORM. The code is available on `GitHub + `_; for more information, see + the `tutorial `_. -Humongolus - `Humongolus `_ is a lightweight ORM - framework for Python and MongoDB. The name comes from the combination of - MongoDB and `Homunculus `_ (the - concept of a miniature though fully formed human body). Humongolus allows - you to create models/schemas with robust validation. It attempts to be as - pythonic as possible and exposes the pymongo cursor objects whenever - possible. The code is available for download - `at GitHub `_. Tutorials and usage - examples are also available at GitHub. +MincePy + `MincePy `_ is an + object-document mapper (ODM) designed to make any Python object storable + and queryable in a MongoDB database. It is designed with machine learning + and big-data computational and experimental science applications in mind + but is entirely general and can be useful to anyone looking to organise, + share, or process large amounts data with as little change to their current + workflow as possible. Ming - `Ming `_ (the Merciless) is a + `Ming `_ is a library that allows you to enforce schemas on a MongoDB database in your Python application. It was developed by `SourceForge `_ in the course of their migration to @@ -57,14 +52,6 @@ Ming `_ for more details. -MongoEngine - `MongoEngine `_ is another ORM-like - layer on top of PyMongo. It allows you to define schemas for - documents and query collections using syntax inspired by the Django - ORM. The code is available on `GitHub - `_; for more information, see - the `tutorial `_. - MotorEngine `MotorEngine `_ is a port of MongoEngine to Motor, for asynchronous access with Tornado. @@ -83,17 +70,21 @@ uMongo No longer maintained """""""""""""""""""" +PyMODM + `PyMODM `_ is an ORM-like framework on top + of PyMongo. PyMODM is maintained by engineers at MongoDB, Inc. and is quick + to adopt new MongoDB features. PyMODM is a "core" ODM, meaning that it + provides simple, extensible functionality that can be leveraged by other + libraries to target platforms like Django. At the same time, PyMODM is + powerful enough to be used for developing applications on its own. Complete + documentation is available on `readthedocs + `_. + MongoKit The `MongoKit `_ framework is an ORM-like layer on top of PyMongo. There is also a MongoKit `google group `_. -MongoAlchemy - `MongoAlchemy `_ is another ORM-like layer on top of - PyMongo. Its API is inspired by `SQLAlchemy `_. The - code is available `on GitHub `_; - for more information, see `the tutorial `_. - Minimongo `minimongo `_ is a lightweight, pythonic interface to MongoDB. It retains pymongo's query and update API, @@ -108,15 +99,26 @@ Manga Django ORM, but Pymongo's query language is maintained. The source `is on GitHub `_. +Humongolus + `Humongolus `_ is a lightweight ORM + framework for Python and MongoDB. The name comes from the combination of + MongoDB and `Homunculus `_ (the + concept of a miniature though fully formed human body). Humongolus allows + you to create models/schemas with robust validation. It attempts to be as + pythonic as possible and exposes the pymongo cursor objects whenever + possible. The code is available for download + `at GitHub `_. Tutorials and usage + examples are also available at GitHub. + Framework Tools --------------- This section lists tools and adapters that have been designed to work with various Python frameworks and libraries. -* `Djongo `_ is a connector for using +* `Djongo `_ is a connector for using Django with MongoDB as the database backend. Use the Django Admin GUI to add and modify documents in MongoDB. - The `Djongo Source Code `_ is hosted on GitHub + The `Djongo Source Code `_ is hosted on GitHub and the `Djongo package `_ is on pypi. * `Django MongoDB Engine `_ is a MongoDB @@ -129,24 +131,17 @@ various Python frameworks and libraries. `_ is a MongoDB backend for Django, an `example: `_. - For more information ``_ + For more information see ``_ * `mongodb_beaker `_ is a - project to enable using MongoDB as a backend for `beaker's - `_ caching / session system. + project to enable using MongoDB as a backend for `beakers `_ caching / session system. `The source is on GitHub `_. * `Log4Mongo `_ is a flexible Python logging handler that can store logs in MongoDB using normal and capped collections. * `MongoLog `_ is a Python logging handler that stores logs in MongoDB using a capped collection. -* `c5t `_ is a content-management system - using TurboGears and MongoDB. * `rod.recipe.mongodb `_ is a ZC Buildout recipe for downloading and installing MongoDB. -* `repoze-what-plugins-mongodb - `_ is a project - working to support a plugin for using MongoDB as a backend for - :mod:`repoze.what`. * `mongobox `_ is a tool to run a sandboxed MongoDB instance from within a python app. * `Flask-MongoAlchemy `_ Add diff --git a/doc/tutorial.rst b/doc/tutorial.rst index 3e77ab1d8b..e33936363d 100644 --- a/doc/tutorial.rst +++ b/doc/tutorial.rst @@ -4,8 +4,9 @@ Tutorial .. testsetup:: from pymongo import MongoClient + client = MongoClient() - client.drop_database('test-database') + client.drop_database("test-database") This tutorial is intended as an introduction to working with **MongoDB** and **PyMongo**. @@ -22,7 +23,7 @@ should run without raising an exception: This tutorial also assumes that a MongoDB instance is running on the default host and port. Assuming you have `downloaded and installed -`_ MongoDB, you +`_ MongoDB, you can start it like so: .. code-block:: bash @@ -45,18 +46,18 @@ specify the host and port explicitly, as follows: .. doctest:: - >>> client = MongoClient('localhost', 27017) + >>> client = MongoClient("localhost", 27017) Or use the MongoDB URI format: .. doctest:: - >>> client = MongoClient('mongodb://localhost:27017/') + >>> client = MongoClient("mongodb://localhost:27017/") Getting a Database ------------------ A single instance of MongoDB can support multiple independent -`databases `_. When +`databases `_. When working with PyMongo you access databases using attribute style access on :class:`~pymongo.mongo_client.MongoClient` instances: @@ -70,11 +71,11 @@ instead: .. doctest:: - >>> db = client['test-database'] + >>> db = client["test-database"] Getting a Collection -------------------- -A `collection `_ is a +A `collection `_ is a group of documents stored in MongoDB, and can be thought of as roughly the equivalent of a table in a relational database. Getting a collection in PyMongo works the same as getting a database: @@ -87,7 +88,7 @@ or (using dictionary style access): .. doctest:: - >>> collection = db['test-collection'] + >>> collection = db["test-collection"] An important note about collections (and databases) in MongoDB is that they are created lazily - none of the above commands have actually @@ -104,15 +105,17 @@ post: .. doctest:: >>> import datetime - >>> post = {"author": "Mike", - ... "text": "My first blog post!", - ... "tags": ["mongodb", "python", "pymongo"], - ... "date": datetime.datetime.utcnow()} + >>> post = { + ... "author": "Mike", + ... "text": "My first blog post!", + ... "tags": ["mongodb", "python", "pymongo"], + ... "date": datetime.datetime.now(tz=datetime.timezone.utc), + ... } Note that documents can contain native Python types (like :class:`datetime.datetime` instances) which will be automatically converted to and from the appropriate `BSON -`_ types. +`_ types. .. todo:: link to table of Python <-> BSON types @@ -134,7 +137,7 @@ of ``"_id"`` must be unique across the collection. :meth:`~pymongo.collection.Collection.insert_one` returns an instance of :class:`~pymongo.results.InsertOneResult`. For more information on ``"_id"``, see the `documentation on _id -`_. +`_. After inserting the first document, the *posts* collection has actually been created on the server. We can verify this by listing all @@ -143,7 +146,7 @@ of the collections in our database: .. doctest:: >>> db.list_collection_names() - [u'posts'] + ['posts'] Getting a Single Document With :meth:`~pymongo.collection.Collection.find_one` ------------------------------------------------------------------------------ @@ -159,11 +162,11 @@ document from the posts collection: >>> import pprint >>> pprint.pprint(posts.find_one()) - {u'_id': ObjectId('...'), - u'author': u'Mike', - u'date': datetime.datetime(...), - u'tags': [u'mongodb', u'python', u'pymongo'], - u'text': u'My first blog post!'} + {'_id': ObjectId('...'), + 'author': 'Mike', + 'date': datetime.datetime(...), + 'tags': ['mongodb', 'python', 'pymongo'], + 'text': 'My first blog post!'} The result is a dictionary matching the one that we inserted previously. @@ -177,11 +180,11 @@ our results to a document with author "Mike" we do: .. doctest:: >>> pprint.pprint(posts.find_one({"author": "Mike"})) - {u'_id': ObjectId('...'), - u'author': u'Mike', - u'date': datetime.datetime(...), - u'tags': [u'mongodb', u'python', u'pymongo'], - u'text': u'My first blog post!'} + {'_id': ObjectId('...'), + 'author': 'Mike', + 'date': datetime.datetime(...), + 'tags': ['mongodb', 'python', 'pymongo'], + 'text': 'My first blog post!'} If we try with a different author, like "Eliot", we'll get no result: @@ -201,18 +204,18 @@ We can also find a post by its ``_id``, which in our example is an ObjectId: >>> post_id ObjectId(...) >>> pprint.pprint(posts.find_one({"_id": post_id})) - {u'_id': ObjectId('...'), - u'author': u'Mike', - u'date': datetime.datetime(...), - u'tags': [u'mongodb', u'python', u'pymongo'], - u'text': u'My first blog post!'} + {'_id': ObjectId('...'), + 'author': 'Mike', + 'date': datetime.datetime(...), + 'tags': ['mongodb', 'python', 'pymongo'], + 'text': 'My first blog post!'} Note that an ObjectId is not the same as its string representation: .. doctest:: >>> post_id_as_str = str(post_id) - >>> posts.find_one({"_id": post_id_as_str}) # No result + >>> posts.find_one({"_id": post_id_as_str}) # No result >>> A common task in web applications is to get an ObjectId from the @@ -229,23 +232,6 @@ case to **convert the ObjectId from a string** before passing it to .. seealso:: :ref:`web-application-querying-by-objectid` -A Note On Unicode Strings -------------------------- -You probably noticed that the regular Python strings we stored earlier look -different when retrieved from the server (e.g. u'Mike' instead of 'Mike'). -A short explanation is in order. - -MongoDB stores data in `BSON format `_. BSON strings are -UTF-8 encoded so PyMongo must ensure that any strings it stores contain only -valid UTF-8 data. Regular strings () are validated and stored -unaltered. Unicode strings () are encoded UTF-8 first. The -reason our example string is represented in the Python shell as u'Mike' instead -of 'Mike' is that PyMongo decodes each BSON string to a Python unicode string, -not a regular str. - -`You can read more about Python unicode strings here -`_. - Bulk Inserts ------------ In order to make querying a little more interesting, let's insert a @@ -257,14 +243,20 @@ command to the server: .. doctest:: - >>> new_posts = [{"author": "Mike", - ... "text": "Another post!", - ... "tags": ["bulk", "insert"], - ... "date": datetime.datetime(2009, 11, 12, 11, 14)}, - ... {"author": "Eliot", - ... "title": "MongoDB is fun", - ... "text": "and pretty easy too!", - ... "date": datetime.datetime(2009, 11, 10, 10, 45)}] + >>> new_posts = [ + ... { + ... "author": "Mike", + ... "text": "Another post!", + ... "tags": ["bulk", "insert"], + ... "date": datetime.datetime(2009, 11, 12, 11, 14), + ... }, + ... { + ... "author": "Eliot", + ... "title": "MongoDB is fun", + ... "text": "and pretty easy too!", + ... "date": datetime.datetime(2009, 11, 10, 10, 45), + ... }, + ... ] >>> result = posts.insert_many(new_posts) >>> result.inserted_ids [ObjectId('...'), ObjectId('...')] @@ -291,23 +283,23 @@ document in the ``posts`` collection: .. doctest:: >>> for post in posts.find(): - ... pprint.pprint(post) + ... pprint.pprint(post) ... - {u'_id': ObjectId('...'), - u'author': u'Mike', - u'date': datetime.datetime(...), - u'tags': [u'mongodb', u'python', u'pymongo'], - u'text': u'My first blog post!'} - {u'_id': ObjectId('...'), - u'author': u'Mike', - u'date': datetime.datetime(...), - u'tags': [u'bulk', u'insert'], - u'text': u'Another post!'} - {u'_id': ObjectId('...'), - u'author': u'Eliot', - u'date': datetime.datetime(...), - u'text': u'and pretty easy too!', - u'title': u'MongoDB is fun'} + {'_id': ObjectId('...'), + 'author': 'Mike', + 'date': datetime.datetime(...), + 'tags': ['mongodb', 'python', 'pymongo'], + 'text': 'My first blog post!'} + {'_id': ObjectId('...'), + 'author': 'Mike', + 'date': datetime.datetime(...), + 'tags': ['bulk', 'insert'], + 'text': 'Another post!'} + {'_id': ObjectId('...'), + 'author': 'Eliot', + 'date': datetime.datetime(...), + 'text': 'and pretty easy too!', + 'title': 'MongoDB is fun'} Just like we did with :meth:`~pymongo.collection.Collection.find_one`, we can pass a document to :meth:`~pymongo.collection.Collection.find` @@ -317,18 +309,18 @@ author is "Mike": .. doctest:: >>> for post in posts.find({"author": "Mike"}): - ... pprint.pprint(post) + ... pprint.pprint(post) ... - {u'_id': ObjectId('...'), - u'author': u'Mike', - u'date': datetime.datetime(...), - u'tags': [u'mongodb', u'python', u'pymongo'], - u'text': u'My first blog post!'} - {u'_id': ObjectId('...'), - u'author': u'Mike', - u'date': datetime.datetime(...), - u'tags': [u'bulk', u'insert'], - u'text': u'Another post!'} + {'_id': ObjectId('...'), + 'author': 'Mike', + 'date': datetime.datetime(...), + 'tags': ['mongodb', 'python', 'pymongo'], + 'text': 'My first blog post!'} + {'_id': ObjectId('...'), + 'author': 'Mike', + 'date': datetime.datetime(...), + 'tags': ['bulk', 'insert'], + 'text': 'Another post!'} Counting -------- @@ -352,7 +344,7 @@ or just of those documents that match a specific query: Range Queries ------------- MongoDB supports many different types of `advanced queries -`_. As an +`_. As an example, lets perform a query where we limit results to posts older than a certain date, but also sort the results by author: @@ -360,18 +352,18 @@ than a certain date, but also sort the results by author: >>> d = datetime.datetime(2009, 11, 12, 12) >>> for post in posts.find({"date": {"$lt": d}}).sort("author"): - ... pprint.pprint(post) + ... pprint.pprint(post) ... - {u'_id': ObjectId('...'), - u'author': u'Eliot', - u'date': datetime.datetime(...), - u'text': u'and pretty easy too!', - u'title': u'MongoDB is fun'} - {u'_id': ObjectId('...'), - u'author': u'Mike', - u'date': datetime.datetime(...), - u'tags': [u'bulk', u'insert'], - u'text': u'Another post!'} + {'_id': ObjectId('...'), + 'author': 'Eliot', + 'date': datetime.datetime(...), + 'text': 'and pretty easy too!', + 'title': 'MongoDB is fun'} + {'_id': ObjectId('...'), + 'author': 'Mike', + 'date': datetime.datetime(...), + 'tags': ['bulk', 'insert'], + 'text': 'Another post!'} Here we use the special ``"$lt"`` operator to do a range query, and also call :meth:`~pymongo.cursor.Cursor.sort` to sort the results @@ -383,17 +375,16 @@ Indexing Adding indexes can help accelerate certain queries and can also add additional functionality to querying and storing documents. In this example, we'll demonstrate how to create a `unique index -`_ on a key that rejects +`_ on a key that rejects documents whose value for that key already exists in the index. First, we'll need to create the index: .. doctest:: - >>> result = db.profiles.create_index([('user_id', pymongo.ASCENDING)], - ... unique=True) + >>> result = db.profiles.create_index([("user_id", pymongo.ASCENDING)], unique=True) >>> sorted(list(db.profiles.index_information())) - [u'_id_', u'user_id_1'] + ['_id_', 'user_id_1'] Notice that we have two indexes now: one is the index on ``_id`` that MongoDB creates automatically, and the other is the index on ``user_id`` we just @@ -403,9 +394,7 @@ Now let's set up some user profiles: .. doctest:: - >>> user_profiles = [ - ... {'user_id': 211, 'name': 'Luke'}, - ... {'user_id': 212, 'name': 'Ziltoid'}] + >>> user_profiles = [{"user_id": 211, "name": "Luke"}, {"user_id": 212, "name": "Ziltoid"}] >>> result = db.profiles.insert_many(user_profiles) The index prevents us from inserting a document whose ``user_id`` is already in @@ -414,11 +403,11 @@ the collection: .. doctest:: :options: +IGNORE_EXCEPTION_DETAIL - >>> new_profile = {'user_id': 213, 'name': 'Drew'} - >>> duplicate_profile = {'user_id': 212, 'name': 'Tommy'} + >>> new_profile = {"user_id": 213, "name": "Drew"} + >>> duplicate_profile = {"user_id": 212, "name": "Tommy"} >>> result = db.profiles.insert_one(new_profile) # This is fine. >>> result = db.profiles.insert_one(duplicate_profile) Traceback (most recent call last): DuplicateKeyError: E11000 duplicate key error index: test_database.profiles.$user_id_1 dup key: { : 212 } -.. seealso:: The MongoDB documentation on `indexes `_ +.. seealso:: The MongoDB documentation on `indexes `_ diff --git a/ez_setup.py b/ez_setup.py deleted file mode 100644 index 800c31ef6b..0000000000 --- a/ez_setup.py +++ /dev/null @@ -1,414 +0,0 @@ -#!/usr/bin/env python - -""" -Setuptools bootstrapping installer. - -Maintained at https://github.com/pypa/setuptools/tree/bootstrap. - -Run this script to install or upgrade setuptools. - -This method is DEPRECATED. Check https://github.com/pypa/setuptools/issues/581 for more details. -""" - -import os -import shutil -import sys -import tempfile -import zipfile -import optparse -import subprocess -import platform -import textwrap -import contextlib - -from distutils import log - -try: - from urllib.request import urlopen -except ImportError: - from urllib2 import urlopen - -try: - from site import USER_SITE -except ImportError: - USER_SITE = None - -# 33.1.1 is the last version that supports setuptools self upgrade/installation. -DEFAULT_VERSION = "33.1.1" -DEFAULT_URL = "https://pypi.io/packages/source/s/setuptools/" -DEFAULT_SAVE_DIR = os.curdir -DEFAULT_DEPRECATION_MESSAGE = "ez_setup.py is deprecated and when using it setuptools will be pinned to {0} since it's the last version that supports setuptools self upgrade/installation, check https://github.com/pypa/setuptools/issues/581 for more info; use pip to install setuptools" - -MEANINGFUL_INVALID_ZIP_ERR_MSG = 'Maybe {0} is corrupted, delete it and try again.' - -log.warn(DEFAULT_DEPRECATION_MESSAGE.format(DEFAULT_VERSION)) - - -def _python_cmd(*args): - """ - Execute a command. - - Return True if the command succeeded. - """ - args = (sys.executable,) + args - return subprocess.call(args) == 0 - - -def _install(archive_filename, install_args=()): - """Install Setuptools.""" - with archive_context(archive_filename): - # installing - log.warn('Installing Setuptools') - if not _python_cmd('setup.py', 'install', *install_args): - log.warn('Something went wrong during the installation.') - log.warn('See the error message above.') - # exitcode will be 2 - return 2 - - -def _build_egg(egg, archive_filename, to_dir): - """Build Setuptools egg.""" - with archive_context(archive_filename): - # building an egg - log.warn('Building a Setuptools egg in %s', to_dir) - _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir) - # returning the result - log.warn(egg) - if not os.path.exists(egg): - raise IOError('Could not build the egg.') - - -class ContextualZipFile(zipfile.ZipFile): - - """Supplement ZipFile class to support context manager for Python 2.6.""" - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - self.close() - - def __new__(cls, *args, **kwargs): - """Construct a ZipFile or ContextualZipFile as appropriate.""" - if hasattr(zipfile.ZipFile, '__exit__'): - return zipfile.ZipFile(*args, **kwargs) - return super(ContextualZipFile, cls).__new__(cls) - - -@contextlib.contextmanager -def archive_context(filename): - """ - Unzip filename to a temporary directory, set to the cwd. - - The unzipped target is cleaned up after. - """ - tmpdir = tempfile.mkdtemp() - log.warn('Extracting in %s', tmpdir) - old_wd = os.getcwd() - try: - os.chdir(tmpdir) - try: - with ContextualZipFile(filename) as archive: - archive.extractall() - except zipfile.BadZipfile as err: - if not err.args: - err.args = ('', ) - err.args = err.args + ( - MEANINGFUL_INVALID_ZIP_ERR_MSG.format(filename), - ) - raise - - # going in the directory - subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0]) - os.chdir(subdir) - log.warn('Now working in %s', subdir) - yield - - finally: - os.chdir(old_wd) - shutil.rmtree(tmpdir) - - -def _do_download(version, download_base, to_dir, download_delay): - """Download Setuptools.""" - py_desig = 'py{sys.version_info[0]}.{sys.version_info[1]}'.format(sys=sys) - tp = 'setuptools-{version}-{py_desig}.egg' - egg = os.path.join(to_dir, tp.format(**locals())) - if not os.path.exists(egg): - archive = download_setuptools(version, download_base, - to_dir, download_delay) - _build_egg(egg, archive, to_dir) - sys.path.insert(0, egg) - - # Remove previously-imported pkg_resources if present (see - # https://bitbucket.org/pypa/setuptools/pull-request/7/ for details). - if 'pkg_resources' in sys.modules: - _unload_pkg_resources() - - import setuptools - setuptools.bootstrap_install_from = egg - - -def use_setuptools( - version=DEFAULT_VERSION, download_base=DEFAULT_URL, - to_dir=DEFAULT_SAVE_DIR, download_delay=15): - """ - Ensure that a setuptools version is installed. - - Return None. Raise SystemExit if the requested version - or later cannot be installed. - """ - to_dir = os.path.abspath(to_dir) - - # prior to importing, capture the module state for - # representative modules. - rep_modules = 'pkg_resources', 'setuptools' - imported = set(sys.modules).intersection(rep_modules) - - try: - import pkg_resources - pkg_resources.require("setuptools>=" + version) - # a suitable version is already installed - return - except ImportError: - # pkg_resources not available; setuptools is not installed; download - pass - except pkg_resources.DistributionNotFound: - # no version of setuptools was found; allow download - pass - except pkg_resources.VersionConflict as VC_err: - if imported: - _conflict_bail(VC_err, version) - - # otherwise, unload pkg_resources to allow the downloaded version to - # take precedence. - del pkg_resources - _unload_pkg_resources() - - return _do_download(version, download_base, to_dir, download_delay) - - -def _conflict_bail(VC_err, version): - """ - Setuptools was imported prior to invocation, so it is - unsafe to unload it. Bail out. - """ - conflict_tmpl = textwrap.dedent(""" - The required version of setuptools (>={version}) is not available, - and can't be installed while this script is running. Please - install a more recent version first, using - 'easy_install -U setuptools'. - - (Currently using {VC_err.args[0]!r}) - """) - msg = conflict_tmpl.format(**locals()) - sys.stderr.write(msg) - sys.exit(2) - - -def _unload_pkg_resources(): - sys.meta_path = [ - importer - for importer in sys.meta_path - if importer.__class__.__module__ != 'pkg_resources.extern' - ] - del_modules = [ - name for name in sys.modules - if name.startswith('pkg_resources') - ] - for mod_name in del_modules: - del sys.modules[mod_name] - - -def _clean_check(cmd, target): - """ - Run the command to download target. - - If the command fails, clean up before re-raising the error. - """ - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError: - if os.access(target, os.F_OK): - os.unlink(target) - raise - - -def download_file_powershell(url, target): - """ - Download the file at url to target using Powershell. - - Powershell will validate trust. - Raise an exception if the command cannot complete. - """ - target = os.path.abspath(target) - ps_cmd = ( - "[System.Net.WebRequest]::DefaultWebProxy.Credentials = " - "[System.Net.CredentialCache]::DefaultCredentials; " - '(new-object System.Net.WebClient).DownloadFile("%(url)s", "%(target)s")' - % locals() - ) - cmd = [ - 'powershell', - '-Command', - ps_cmd, - ] - _clean_check(cmd, target) - - -def has_powershell(): - """Determine if Powershell is available.""" - if platform.system() != 'Windows': - return False - cmd = ['powershell', '-Command', 'echo test'] - with open(os.path.devnull, 'wb') as devnull: - try: - subprocess.check_call(cmd, stdout=devnull, stderr=devnull) - except Exception: - return False - return True -download_file_powershell.viable = has_powershell - - -def download_file_curl(url, target): - cmd = ['curl', url, '--location', '--silent', '--output', target] - _clean_check(cmd, target) - - -def has_curl(): - cmd = ['curl', '--version'] - with open(os.path.devnull, 'wb') as devnull: - try: - subprocess.check_call(cmd, stdout=devnull, stderr=devnull) - except Exception: - return False - return True -download_file_curl.viable = has_curl - - -def download_file_wget(url, target): - cmd = ['wget', url, '--quiet', '--output-document', target] - _clean_check(cmd, target) - - -def has_wget(): - cmd = ['wget', '--version'] - with open(os.path.devnull, 'wb') as devnull: - try: - subprocess.check_call(cmd, stdout=devnull, stderr=devnull) - except Exception: - return False - return True -download_file_wget.viable = has_wget - - -def download_file_insecure(url, target): - """Use Python to download the file, without connection authentication.""" - src = urlopen(url) - try: - # Read all the data in one block. - data = src.read() - finally: - src.close() - - # Write all the data in one block to avoid creating a partial file. - with open(target, "wb") as dst: - dst.write(data) -download_file_insecure.viable = lambda: True - - -def get_best_downloader(): - downloaders = ( - download_file_powershell, - download_file_curl, - download_file_wget, - download_file_insecure, - ) - viable_downloaders = (dl for dl in downloaders if dl.viable()) - return next(viable_downloaders, None) - - -def download_setuptools( - version=DEFAULT_VERSION, download_base=DEFAULT_URL, - to_dir=DEFAULT_SAVE_DIR, delay=15, - downloader_factory=get_best_downloader): - """ - Download setuptools from a specified location and return its filename. - - `version` should be a valid setuptools version number that is available - as an sdist for download under the `download_base` URL (which should end - with a '/'). `to_dir` is the directory where the egg will be downloaded. - `delay` is the number of seconds to pause before an actual download - attempt. - - ``downloader_factory`` should be a function taking no arguments and - returning a function for downloading a URL to a target. - """ - # making sure we use the absolute path - to_dir = os.path.abspath(to_dir) - zip_name = "setuptools-%s.zip" % version - url = download_base + zip_name - saveto = os.path.join(to_dir, zip_name) - if not os.path.exists(saveto): # Avoid repeated downloads - log.warn("Downloading %s", url) - downloader = downloader_factory() - downloader(url, saveto) - return os.path.realpath(saveto) - - -def _build_install_args(options): - """ - Build the arguments to 'python setup.py install' on the setuptools package. - - Returns list of command line arguments. - """ - return ['--user'] if options.user_install else [] - - -def _parse_args(): - """Parse the command line for options.""" - parser = optparse.OptionParser() - parser.add_option( - '--user', dest='user_install', action='store_true', default=False, - help='install in user site package') - parser.add_option( - '--download-base', dest='download_base', metavar="URL", - default=DEFAULT_URL, - help='alternative URL from where to download the setuptools package') - parser.add_option( - '--insecure', dest='downloader_factory', action='store_const', - const=lambda: download_file_insecure, default=get_best_downloader, - help='Use internal, non-validating downloader' - ) - parser.add_option( - '--version', help="Specify which version to download", - default=DEFAULT_VERSION, - ) - parser.add_option( - '--to-dir', - help="Directory to save (and re-use) package", - default=DEFAULT_SAVE_DIR, - ) - options, args = parser.parse_args() - # positional arguments are ignored - return options - - -def _download_args(options): - """Return args for download_setuptools function from cmdline args.""" - return dict( - version=options.version, - download_base=options.download_base, - downloader_factory=options.downloader_factory, - to_dir=options.to_dir, - ) - - -def main(): - """Install or upgrade setuptools and EasyInstall.""" - options = _parse_args() - archive = download_setuptools(**_download_args(options)) - return _install(archive, _build_install_args(options)) - -if __name__ == '__main__': - sys.exit(main()) diff --git a/green_framework_test.py b/green_framework_test.py index baffe21b15..01f72b245a 100644 --- a/green_framework_test.py +++ b/green_framework_test.py @@ -13,38 +13,46 @@ # limitations under the License. """Test PyMongo with a variety of greenlet-based monkey-patching frameworks.""" +from __future__ import annotations import getopt import sys +import pytest + def run_gevent(): """Prepare to run tests with Gevent. Can raise ImportError.""" from gevent import monkey + monkey.patch_all() def run_eventlet(): """Prepare to run tests with Eventlet. Can raise ImportError.""" import eventlet + # https://github.com/eventlet/eventlet/issues/401 eventlet.sleep() eventlet.monkey_patch() FRAMEWORKS = { - 'gevent': run_gevent, - 'eventlet': run_eventlet, + "gevent": run_gevent, + "eventlet": run_eventlet, } def list_frameworks(): """Tell the user what framework names are valid.""" - sys.stdout.write("""Testable frameworks: %s + sys.stdout.write( + """Testable frameworks: %s Note that membership in this list means the framework can be tested with PyMongo, not necessarily that it is officially supported. -""" % ", ".join(sorted(FRAMEWORKS))) +""" + % ", ".join(sorted(FRAMEWORKS)) + ) def run(framework_name, *args): @@ -53,20 +61,18 @@ def run(framework_name, *args): FRAMEWORKS[framework_name]() # Run the tests. - sys.argv[:] = ['setup.py', 'test'] + list(args) - import setup + sys.exit(pytest.main(list(args))) def main(): """Parse options and run tests.""" - usage = """python %s FRAMEWORK_NAME + usage = f"""python {sys.argv[0]} FRAMEWORK_NAME Test PyMongo with a variety of greenlet-based monkey-patching frameworks. See -python %s --help-frameworks.""" % (sys.argv[0], sys.argv[0]) +python {sys.argv[0]} --help-frameworks.""" try: - opts, args = getopt.getopt( - sys.argv[1:], "h", ["help", "help-frameworks"]) + opts, args = getopt.getopt(sys.argv[1:], "h", ["help", "help-frameworks"]) except getopt.GetoptError as err: print(str(err)) print(usage) @@ -80,20 +86,21 @@ def main(): list_frameworks() sys.exit() else: - assert False, "unhandled option" + raise AssertionError("unhandled option") if not args: print(usage) sys.exit(1) if args[0] not in FRAMEWORKS: - print('%r is not a testable framework.\n' % args[0]) + print("%r is not a testable framework.\n" % args[0]) list_frameworks() sys.exit(1) - run(args[0], # Framework name. - *args[1:]) # Command line args to setup.py, like what test to run. + run( + args[0], *args[1:] # Framework name. + ) # Command line args to pytest, like what test to run. -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/gridfs/__init__.py b/gridfs/__init__.py index 6c56a605e6..63aa40623a 100644 --- a/gridfs/__init__.py +++ b/gridfs/__init__.py @@ -17,27 +17,47 @@ The :mod:`gridfs` package is an implementation of GridFS on top of :mod:`pymongo`, exposing a file-like interface. -.. mongodoc:: gridfs +.. seealso:: The MongoDB documentation on `gridfs `_. """ +from __future__ import annotations -from bson.py3compat import abc +from collections import abc +from typing import Any, Mapping, Optional, cast + +from bson.objectid import ObjectId from gridfs.errors import NoFile -from gridfs.grid_file import (GridIn, - GridOut, - GridOutCursor, - DEFAULT_CHUNK_SIZE, - _clear_entity_type_registry) -from pymongo import (ASCENDING, - DESCENDING) -from pymongo.common import UNAUTHORIZED_CODES, validate_string +from gridfs.grid_file import ( + DEFAULT_CHUNK_SIZE, + GridIn, + GridOut, + GridOutCursor, + _clear_entity_type_registry, + _disallow_transactions, +) +from pymongo import ASCENDING, DESCENDING, _csot +from pymongo.client_session import ClientSession +from pymongo.collection import Collection +from pymongo.common import validate_string from pymongo.database import Database -from pymongo.errors import ConfigurationError, OperationFailure - +from pymongo.errors import ConfigurationError +from pymongo.read_preferences import _ServerMode +from pymongo.write_concern import WriteConcern + +__all__ = [ + "GridFS", + "GridFSBucket", + "NoFile", + "DEFAULT_CHUNK_SIZE", + "GridIn", + "GridOut", + "GridOutCursor", +] + + +class GridFS: + """An instance of GridFS on top of a single Database.""" -class GridFS(object): - """An instance of GridFS on top of a single Database. - """ - def __init__(self, database, collection="fs", disable_md5=False): + def __init__(self, database: Database, collection: str = "fs"): """Create a new instance of :class:`GridFS`. Raises :class:`TypeError` if `database` is not an instance of @@ -46,9 +66,17 @@ def __init__(self, database, collection="fs", disable_md5=False): :Parameters: - `database`: database to use - `collection` (optional): root collection to use - - `disable_md5` (optional): When True, MD5 checksums will not be - computed for uploaded files. Useful in environments where MD5 - cannot be used for regulatory or other reasons. Defaults to False. + + .. versionchanged:: 4.0 + Removed the `disable_md5` parameter. See + :ref:`removed-gridfs-checksum` for details. + + .. versionchanged:: 3.11 + Running a GridFS operation in a transaction now always raises an + error. GridFS does not support multi-document transactions. + + .. versionchanged:: 3.7 + Added the `disable_md5` parameter. .. versionchanged:: 3.1 Indexes are only ensured on the first write to the DB. @@ -57,7 +85,7 @@ def __init__(self, database, collection="fs", disable_md5=False): `database` must use an acknowledged :attr:`~pymongo.database.Database.write_concern` - .. mongodoc:: gridfs + .. seealso:: The MongoDB documentation on `gridfs `_. """ if not isinstance(database, Database): raise TypeError("database must be an instance of Database") @@ -65,16 +93,13 @@ def __init__(self, database, collection="fs", disable_md5=False): database = _clear_entity_type_registry(database) if not database.write_concern.acknowledged: - raise ConfigurationError('database must use ' - 'acknowledged write_concern') + raise ConfigurationError("database must use acknowledged write_concern") - self.__database = database self.__collection = database[collection] self.__files = self.__collection.files self.__chunks = self.__collection.chunks - self.__disable_md5 = disable_md5 - def new_file(self, **kwargs): + def new_file(self, **kwargs: Any) -> GridIn: """Create a new file in GridFS. Returns a new :class:`~gridfs.grid_file.GridIn` instance to @@ -88,28 +113,21 @@ def new_file(self, **kwargs): :Parameters: - `**kwargs` (optional): keyword arguments for file creation """ - # No need for __ensure_index_files_id() here; GridIn ensures - # the (files_id, n) index when needed. - return GridIn( - self.__collection, disable_md5=self.__disable_md5, **kwargs) + return GridIn(self.__collection, **kwargs) - def put(self, data, **kwargs): + def put(self, data: Any, **kwargs: Any) -> Any: """Put data in GridFS as a new file. Equivalent to doing:: - try: - f = new_file(**kwargs) + with fs.new_file(**kwargs) as f: f.write(data) - finally: - f.close() - - `data` can be either an instance of :class:`str` (:class:`bytes` - in python 3) or a file-like object providing a :meth:`read` method. - If an `encoding` keyword argument is passed, `data` can also be a - :class:`unicode` (:class:`str` in python 3) instance, which will - be encoded as `encoding` before being written. Any keyword arguments - will be passed through to the created file - see + + `data` can be either an instance of :class:`bytes` or a file-like + object providing a :meth:`read` method. If an `encoding` keyword + argument is passed, `data` can also be a :class:`str` instance, which + will be encoded as `encoding` before being written. Any keyword + arguments will be passed through to the created file - see :meth:`~gridfs.grid_file.GridIn` for possible arguments. Returns the ``"_id"`` of the created file. @@ -124,16 +142,11 @@ def put(self, data, **kwargs): .. versionchanged:: 3.0 w=0 writes to GridFS are now prohibited. """ - grid_file = GridIn( - self.__collection, disable_md5=self.__disable_md5, **kwargs) - try: + with GridIn(self.__collection, **kwargs) as grid_file: grid_file.write(data) - finally: - grid_file.close() - - return grid_file._id + return grid_file._id - def get(self, file_id, session=None): + def get(self, file_id: Any, session: Optional[ClientSession] = None) -> GridOut: """Get a file from GridFS by ``"_id"``. Returns an instance of :class:`~gridfs.grid_file.GridOut`, @@ -153,7 +166,13 @@ def get(self, file_id, session=None): gout._ensure_file() return gout - def get_version(self, filename=None, version=-1, session=None, **kwargs): + def get_version( + self, + filename: Optional[str] = None, + version: Optional[int] = -1, + session: Optional[ClientSession] = None, + **kwargs: Any, + ) -> GridOut: """Get a file from GridFS by ``"filename"`` or metadata fields. Returns a version of the file in GridFS whose filename matches @@ -192,7 +211,10 @@ def get_version(self, filename=None, version=-1, session=None, **kwargs): if filename is not None: query["filename"] = filename + _disallow_transactions(session) cursor = self.__files.find(query, session=session) + if version is None: + version = -1 if version < 0: skip = abs(version) - 1 cursor.limit(-1).skip(skip).sort("uploadDate", DESCENDING) @@ -200,12 +222,13 @@ def get_version(self, filename=None, version=-1, session=None, **kwargs): cursor.limit(-1).skip(version).sort("uploadDate", ASCENDING) try: doc = next(cursor) - return GridOut( - self.__collection, file_document=doc, session=session) + return GridOut(self.__collection, file_document=doc, session=session) except StopIteration: - raise NoFile("no version %d for filename %r" % (version, filename)) + raise NoFile("no version %d for filename %r" % (version, filename)) from None - def get_last_version(self, filename=None, session=None, **kwargs): + def get_last_version( + self, filename: Optional[str] = None, session: Optional[ClientSession] = None, **kwargs: Any + ) -> GridOut: """Get the most recent version of a file in GridFS by ``"filename"`` or metadata fields. @@ -224,7 +247,7 @@ def get_last_version(self, filename=None, session=None, **kwargs): return self.get_version(filename=filename, session=session, **kwargs) # TODO add optional safe mode for chunk removal? - def delete(self, file_id, session=None): + def delete(self, file_id: Any, session: Optional[ClientSession] = None) -> None: """Delete a file from GridFS by ``"_id"``. Deletes all data belonging to the file with ``"_id"``: @@ -249,10 +272,11 @@ def delete(self, file_id, session=None): .. versionchanged:: 3.1 ``delete`` no longer ensures indexes. """ + _disallow_transactions(session) self.__files.delete_one({"_id": file_id}, session=session) self.__chunks.delete_many({"files_id": file_id}, session=session) - def list(self, session=None): + def list(self, session: Optional[ClientSession] = None) -> list[str]: """List the names of all files stored in this instance of :class:`GridFS`. @@ -266,13 +290,20 @@ def list(self, session=None): .. versionchanged:: 3.1 ``list`` no longer ensures indexes. """ + _disallow_transactions(session) # With an index, distinct includes documents with no filename # as None. return [ - name for name in self.__files.distinct("filename", session=session) - if name is not None] - - def find_one(self, filter=None, session=None, *args, **kwargs): + name for name in self.__files.distinct("filename", session=session) if name is not None + ] + + def find_one( + self, + filter: Optional[Any] = None, + session: Optional[ClientSession] = None, + *args: Any, + **kwargs: Any, + ) -> Optional[GridOut]: """Get a single file from gridfs. All arguments to :meth:`find` are also valid arguments for @@ -299,12 +330,13 @@ def find_one(self, filter=None, session=None, *args, **kwargs): if filter is not None and not isinstance(filter, abc.Mapping): filter = {"_id": filter} + _disallow_transactions(session) for f in self.find(filter, *args, session=session, **kwargs): return f return None - def find(self, *args, **kwargs): + def find(self, *args: Any, **kwargs: Any) -> GridOutCursor: """Query GridFS for files. Returns a cursor that iterates across files matching @@ -336,9 +368,9 @@ def find(self, *args, **kwargs): are associated with that session. :Parameters: - - `filter` (optional): a SON object specifying elements which - must be present for a document to be included in the - result set + - `filter` (optional): A query document that selects which files + to include in the result set. Can be an empty document to include + all files. - `skip` (optional): the number of files to omit (from the start of the result set) when returning the results - `limit` (optional): the maximum number of results to @@ -361,11 +393,16 @@ def find(self, *args, **kwargs): Removed the read_preference, tag_sets, and secondary_acceptable_latency_ms options. .. versionadded:: 2.7 - .. mongodoc:: find + .. seealso:: The MongoDB documentation on `find `_. """ return GridOutCursor(self.__collection, *args, **kwargs) - def exists(self, document_or_id=None, session=None, **kwargs): + def exists( + self, + document_or_id: Optional[Any] = None, + session: Optional[ClientSession] = None, + **kwargs: Any, + ) -> bool: """Check if a file exists in this instance of :class:`GridFS`. The file to check for can be specified by the value of its @@ -403,6 +440,7 @@ def exists(self, document_or_id=None, session=None, **kwargs): .. versionchanged:: 3.6 Added ``session`` parameter. """ + _disallow_transactions(session) if kwargs: f = self.__files.find_one(kwargs, ["_id"], session=session) else: @@ -411,12 +449,17 @@ def exists(self, document_or_id=None, session=None, **kwargs): return f is not None -class GridFSBucket(object): +class GridFSBucket: """An instance of GridFS on top of a single Database.""" - def __init__(self, db, bucket_name="fs", - chunk_size_bytes=DEFAULT_CHUNK_SIZE, write_concern=None, - read_preference=None, disable_md5=False): + def __init__( + self, + db: Database, + bucket_name: str = "fs", + chunk_size_bytes: int = DEFAULT_CHUNK_SIZE, + write_concern: Optional[WriteConcern] = None, + read_preference: Optional[_ServerMode] = None, + ) -> None: """Create a new instance of :class:`GridFSBucket`. Raises :exc:`TypeError` if `database` is not an instance of @@ -435,13 +478,21 @@ def __init__(self, db, bucket_name="fs", (the default) db.write_concern is used. - `read_preference` (optional): The read preference to use. If ``None`` (the default) db.read_preference is used. - - `disable_md5` (optional): When True, MD5 checksums will not be - computed for uploaded files. Useful in environments where MD5 - cannot be used for regulatory or other reasons. Defaults to False. + + .. versionchanged:: 4.0 + Removed the `disable_md5` parameter. See + :ref:`removed-gridfs-checksum` for details. + + .. versionchanged:: 3.11 + Running a GridFSBucket operation in a transaction now always raises + an error. GridFSBucket does not support multi-document transactions. + + .. versionchanged:: 3.7 + Added the `disable_md5` parameter. .. versionadded:: 3.1 - .. mongodoc:: gridfs + .. seealso:: The MongoDB documentation on `gridfs `_. """ if not isinstance(db, Database): raise TypeError("database must be an instance of Database") @@ -450,25 +501,28 @@ def __init__(self, db, bucket_name="fs", wtc = write_concern if write_concern is not None else db.write_concern if not wtc.acknowledged: - raise ConfigurationError('write concern must be acknowledged') + raise ConfigurationError("write concern must be acknowledged") - self._db = db self._bucket_name = bucket_name self._collection = db[bucket_name] - self._disable_md5 = disable_md5 - - self._chunks = self._collection.chunks.with_options( - write_concern=write_concern, - read_preference=read_preference) + self._chunks: Collection = self._collection.chunks.with_options( + write_concern=write_concern, read_preference=read_preference + ) - self._files = self._collection.files.with_options( - write_concern=write_concern, - read_preference=read_preference) + self._files: Collection = self._collection.files.with_options( + write_concern=write_concern, read_preference=read_preference + ) self._chunk_size_bytes = chunk_size_bytes - - def open_upload_stream(self, filename, chunk_size_bytes=None, - metadata=None, session=None): + self._timeout = db.client.options.timeout + + def open_upload_stream( + self, + filename: str, + chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[ClientSession] = None, + ) -> GridIn: """Opens a Stream that the application can write the contents of the file to. @@ -479,11 +533,11 @@ def open_upload_stream(self, filename, chunk_size_bytes=None, my_db = MongoClient().test fs = GridFSBucket(my_db) - grid_in = fs.open_upload_stream( + with fs.open_upload_stream( "test_file", chunk_size_bytes=4, - metadata={"contentType": "text/plain"}) - grid_in.write("data I want to store!") - grid_in.close() # uploaded on close + metadata={"contentType": "text/plain"}) as grid_in: + grid_in.write("data I want to store!") + # uploaded on close Returns an instance of :class:`~gridfs.grid_file.GridIn`. @@ -506,21 +560,25 @@ def open_upload_stream(self, filename, chunk_size_bytes=None, """ validate_string("filename", filename) - opts = {"filename": filename, - "chunk_size": (chunk_size_bytes if chunk_size_bytes - is not None else self._chunk_size_bytes)} + opts = { + "filename": filename, + "chunk_size": ( + chunk_size_bytes if chunk_size_bytes is not None else self._chunk_size_bytes + ), + } if metadata is not None: opts["metadata"] = metadata - return GridIn( - self._collection, - session=session, - disable_md5=self._disable_md5, - **opts) + return GridIn(self._collection, session=session, **opts) def open_upload_stream_with_id( - self, file_id, filename, chunk_size_bytes=None, metadata=None, - session=None): + self, + file_id: Any, + filename: str, + chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[ClientSession] = None, + ) -> GridIn: """Opens a Stream that the application can write the contents of the file to. @@ -531,13 +589,13 @@ def open_upload_stream_with_id( my_db = MongoClient().test fs = GridFSBucket(my_db) - grid_in = fs.open_upload_stream_with_id( + with fs.open_upload_stream_with_id( ObjectId(), "test_file", chunk_size_bytes=4, - metadata={"contentType": "text/plain"}) - grid_in.write("data I want to store!") - grid_in.close() # uploaded on close + metadata={"contentType": "text/plain"}) as grid_in: + grid_in.write("data I want to store!") + # uploaded on close Returns an instance of :class:`~gridfs.grid_file.GridIn`. @@ -562,21 +620,27 @@ def open_upload_stream_with_id( """ validate_string("filename", filename) - opts = {"_id": file_id, - "filename": filename, - "chunk_size": (chunk_size_bytes if chunk_size_bytes - is not None else self._chunk_size_bytes)} + opts = { + "_id": file_id, + "filename": filename, + "chunk_size": ( + chunk_size_bytes if chunk_size_bytes is not None else self._chunk_size_bytes + ), + } if metadata is not None: opts["metadata"] = metadata - return GridIn( - self._collection, - session=session, - disable_md5=self._disable_md5, - **opts) - - def upload_from_stream(self, filename, source, chunk_size_bytes=None, - metadata=None, session=None): + return GridIn(self._collection, session=session, **opts) + + @_csot.apply + def upload_from_stream( + self, + filename: str, + source: Any, + chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[ClientSession] = None, + ) -> ObjectId: """Uploads a user file to a GridFS bucket. Reads the contents of the user file from `source` and uploads @@ -612,15 +676,21 @@ def upload_from_stream(self, filename, source, chunk_size_bytes=None, .. versionchanged:: 3.6 Added ``session`` parameter. """ - with self.open_upload_stream( - filename, chunk_size_bytes, metadata, session=session) as gin: + with self.open_upload_stream(filename, chunk_size_bytes, metadata, session=session) as gin: gin.write(source) - return gin._id - - def upload_from_stream_with_id(self, file_id, filename, source, - chunk_size_bytes=None, metadata=None, - session=None): + return cast(ObjectId, gin._id) + + @_csot.apply + def upload_from_stream_with_id( + self, + file_id: Any, + filename: str, + source: Any, + chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[ClientSession] = None, + ) -> None: """Uploads a user file to a GridFS bucket with a custom file id. Reads the contents of the user file from `source` and uploads @@ -658,11 +728,13 @@ def upload_from_stream_with_id(self, file_id, filename, source, Added ``session`` parameter. """ with self.open_upload_stream_with_id( - file_id, filename, chunk_size_bytes, metadata, - session=session) as gin: + file_id, filename, chunk_size_bytes, metadata, session=session + ) as gin: gin.write(source) - def open_download_stream(self, file_id, session=None): + def open_download_stream( + self, file_id: Any, session: Optional[ClientSession] = None + ) -> GridOut: """Opens a Stream from which the application can read the contents of the stored file specified by file_id. @@ -693,7 +765,10 @@ def open_download_stream(self, file_id, session=None): gout._ensure_file() return gout - def download_to_stream(self, file_id, destination, session=None): + @_csot.apply + def download_to_stream( + self, file_id: Any, destination: Any, session: Optional[ClientSession] = None + ) -> None: """Downloads the contents of the stored file specified by file_id and writes the contents to `destination`. @@ -721,10 +796,14 @@ def download_to_stream(self, file_id, destination, session=None): Added ``session`` parameter. """ with self.open_download_stream(file_id, session=session) as gout: - for chunk in gout: + while True: + chunk = gout.readchunk() + if not len(chunk): + break destination.write(chunk) - def delete(self, file_id, session=None): + @_csot.apply + def delete(self, file_id: Any, session: Optional[ClientSession] = None) -> None: """Given an file_id, delete this stored file's files collection document and associated chunks from a GridFS bucket. @@ -746,13 +825,13 @@ def delete(self, file_id, session=None): .. versionchanged:: 3.6 Added ``session`` parameter. """ + _disallow_transactions(session) res = self._files.delete_one({"_id": file_id}, session=session) self._chunks.delete_many({"files_id": file_id}, session=session) if not res.deleted_count: - raise NoFile( - "no file could be deleted because none matched %s" % file_id) + raise NoFile("no file could be deleted because none matched %s" % file_id) - def find(self, *args, **kwargs): + def find(self, *args: Any, **kwargs: Any) -> GridOutCursor: """Find and return the files collection documents that match ``filter`` Returns a cursor that iterates across files matching @@ -800,7 +879,9 @@ def find(self, *args, **kwargs): """ return GridOutCursor(self._collection, *args, **kwargs) - def open_download_stream_by_name(self, filename, revision=-1, session=None): + def open_download_stream_by_name( + self, filename: str, revision: int = -1, session: Optional[ClientSession] = None + ) -> GridOut: """Opens a Stream from which the application can read the contents of `filename` and optional `revision`. @@ -839,9 +920,8 @@ def open_download_stream_by_name(self, filename, revision=-1, session=None): Added ``session`` parameter. """ validate_string("filename", filename) - query = {"filename": filename} - + _disallow_transactions(session) cursor = self._files.find(query, session=session) if revision < 0: skip = abs(revision) - 1 @@ -850,14 +930,18 @@ def open_download_stream_by_name(self, filename, revision=-1, session=None): cursor.limit(-1).skip(revision).sort("uploadDate", ASCENDING) try: grid_file = next(cursor) - return GridOut( - self._collection, file_document=grid_file, session=session) + return GridOut(self._collection, file_document=grid_file, session=session) except StopIteration: - raise NoFile( - "no version %d for filename %r" % (revision, filename)) - - def download_to_stream_by_name(self, filename, destination, revision=-1, - session=None): + raise NoFile("no version %d for filename %r" % (revision, filename)) from None + + @_csot.apply + def download_to_stream_by_name( + self, + filename: str, + destination: Any, + revision: int = -1, + session: Optional[ClientSession] = None, + ) -> None: """Write the contents of `filename` (with optional `revision`) to `destination`. @@ -895,12 +979,16 @@ def download_to_stream_by_name(self, filename, destination, revision=-1, .. versionchanged:: 3.6 Added ``session`` parameter. """ - with self.open_download_stream_by_name( - filename, revision, session=session) as gout: - for chunk in gout: + with self.open_download_stream_by_name(filename, revision, session=session) as gout: + while True: + chunk = gout.readchunk() + if not len(chunk): + break destination.write(chunk) - def rename(self, file_id, new_filename, session=None): + def rename( + self, file_id: Any, new_filename: str, session: Optional[ClientSession] = None + ) -> None: """Renames the stored file with the specified file_id. For example:: @@ -922,9 +1010,12 @@ def rename(self, file_id, new_filename, session=None): .. versionchanged:: 3.6 Added ``session`` parameter. """ - result = self._files.update_one({"_id": file_id}, - {"$set": {"filename": new_filename}}, - session=session) + _disallow_transactions(session) + result = self._files.update_one( + {"_id": file_id}, {"$set": {"filename": new_filename}}, session=session + ) if not result.matched_count: - raise NoFile("no files could be renamed %r because none " - "matched file_id %i" % (new_filename, file_id)) + raise NoFile( + "no files could be renamed %r because none " + "matched file_id %i" % (new_filename, file_id) + ) diff --git a/gridfs/errors.py b/gridfs/errors.py index 39736d55b3..e8c02cef4f 100644 --- a/gridfs/errors.py +++ b/gridfs/errors.py @@ -13,6 +13,7 @@ # limitations under the License. """Exceptions raised by the :mod:`gridfs` package""" +from __future__ import annotations from pymongo.errors import PyMongoError diff --git a/gridfs/grid_file.py b/gridfs/grid_file.py index 88b71ebb1f..685d097494 100644 --- a/gridfs/grid_file.py +++ b/gridfs/grid_file.py @@ -13,36 +13,35 @@ # limitations under the License. """Tools for representing files stored in GridFS.""" +from __future__ import annotations + import datetime -import hashlib import io import math import os +from typing import Any, Iterable, Mapping, NoReturn, Optional -from bson.int64 import Int64 -from bson.son import SON from bson.binary import Binary +from bson.int64 import Int64 from bson.objectid import ObjectId -from bson.py3compat import text_type, StringIO +from bson.son import SON from gridfs.errors import CorruptGridFile, FileExists, NoFile from pymongo import ASCENDING +from pymongo.client_session import ClientSession from pymongo.collection import Collection from pymongo.cursor import Cursor -from pymongo.errors import (ConfigurationError, - CursorNotFound, - DuplicateKeyError, - OperationFailure) +from pymongo.errors import ( + ConfigurationError, + CursorNotFound, + DuplicateKeyError, + InvalidOperation, + OperationFailure, +) from pymongo.read_preferences import ReadPreference -try: - _SEEK_SET = os.SEEK_SET - _SEEK_CUR = os.SEEK_CUR - _SEEK_END = os.SEEK_END -# before 2.5 -except AttributeError: - _SEEK_SET = 0 - _SEEK_CUR = 1 - _SEEK_END = 2 +_SEEK_SET = os.SEEK_SET +_SEEK_CUR = os.SEEK_CUR +_SEEK_END = os.SEEK_END EMPTY = b"" NEWLN = b"\n" @@ -51,47 +50,54 @@ # Slightly under a power of 2, to work well with server's record allocations. DEFAULT_CHUNK_SIZE = 255 * 1024 -_C_INDEX = SON([("files_id", ASCENDING), ("n", ASCENDING)]) -_F_INDEX = SON([("filename", ASCENDING), ("uploadDate", ASCENDING)]) +_C_INDEX: SON[str, Any] = SON([("files_id", ASCENDING), ("n", ASCENDING)]) +_F_INDEX: SON[str, Any] = SON([("filename", ASCENDING), ("uploadDate", ASCENDING)]) -def _grid_in_property(field_name, docstring, read_only=False, - closed_only=False): +def _grid_in_property( + field_name: str, + docstring: str, + read_only: Optional[bool] = False, + closed_only: Optional[bool] = False, +) -> Any: """Create a GridIn property.""" - def getter(self): + + def getter(self: Any) -> Any: if closed_only and not self._closed: - raise AttributeError("can only get %r on a closed file" % - field_name) + raise AttributeError("can only get %r on a closed file" % field_name) # Protect against PHP-237 - if field_name == 'length': + if field_name == "length": return self._file.get(field_name, 0) return self._file.get(field_name, None) - def setter(self, value): + def setter(self: Any, value: Any) -> Any: if self._closed: - self._coll.files.update_one({"_id": self._file["_id"]}, - {"$set": {field_name: value}}) + self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {field_name: value}}) self._file[field_name] = value if read_only: docstring += "\n\nThis attribute is read-only." elif closed_only: - docstring = "%s\n\n%s" % (docstring, "This attribute is read-only and " - "can only be read after :meth:`close` " - "has been called.") + docstring = "{}\n\n{}".format( + docstring, + "This attribute is read-only and " + "can only be read after :meth:`close` " + "has been called.", + ) if not read_only and not closed_only: return property(getter, setter, doc=docstring) return property(getter, doc=docstring) -def _grid_out_property(field_name, docstring): +def _grid_out_property(field_name: str, docstring: str) -> Any: """Create a GridOut property.""" - def getter(self): + + def getter(self: Any) -> Any: self._ensure_file() # Protect against PHP-237 - if field_name == 'length': + if field_name == "length": return self._file.get(field_name, 0) return self._file.get(field_name, None) @@ -99,17 +105,23 @@ def getter(self): return property(getter, doc=docstring) -def _clear_entity_type_registry(entity, **kwargs): +def _clear_entity_type_registry(entity: Any, **kwargs: Any) -> Any: """Clear the given database/collection object's type registry.""" codecopts = entity.codec_options.with_options(type_registry=None) return entity.with_options(codec_options=codecopts, **kwargs) -class GridIn(object): - """Class to write data to GridFS. - """ +def _disallow_transactions(session: Optional[ClientSession]) -> None: + if session and session.in_transaction: + raise InvalidOperation("GridFS does not support multi-document transactions") + + +class GridIn: + """Class to write data to GridFS.""" + def __init__( - self, root_collection, session=None, disable_md5=False, **kwargs): + self, root_collection: Collection, session: Optional[ClientSession] = None, **kwargs: Any + ) -> None: """Write a file to GridFS Application developers should generally not need to @@ -137,22 +149,22 @@ def __init__( - ``"chunkSize"`` or ``"chunk_size"``: size of each of the chunks, in bytes (default: 255 kb) - - ``"encoding"``: encoding used for this file. In Python 2, - any :class:`unicode` that is written to the file will be - converted to a :class:`str`. In Python 3, any :class:`str` - that is written to the file will be converted to - :class:`bytes`. + - ``"encoding"``: encoding used for this file. Any :class:`str` + that is written to the file will be converted to :class:`bytes`. :Parameters: - `root_collection`: root collection to write to - `session` (optional): a :class:`~pymongo.client_session.ClientSession` to use for all commands - - `disable_md5` (optional): When True, an MD5 checksum will not be - computed for the uploaded file. Useful in environments where - MD5 cannot be used for regulatory or other reasons. Defaults to - False. - - `**kwargs` (optional): file level options (see above) + - `**kwargs: Any` (optional): file level options (see above) + + .. versionchanged:: 4.0 + Removed the `disable_md5` parameter. See + :ref:`removed-gridfs-checksum` for details. + + .. versionchanged:: 3.7 + Added the `disable_md5` parameter. .. versionchanged:: 3.6 Added ``session`` parameter. @@ -162,12 +174,11 @@ def __init__( :attr:`~pymongo.collection.Collection.write_concern` """ if not isinstance(root_collection, Collection): - raise TypeError("root_collection must be an " - "instance of Collection") + raise TypeError("root_collection must be an instance of Collection") if not root_collection.write_concern.acknowledged: - raise ConfigurationError('root_collection must use ' - 'acknowledged write_concern') + raise ConfigurationError("root_collection must use acknowledged write_concern") + _disallow_transactions(session) # Handle alternative naming if "content_type" in kwargs: @@ -175,11 +186,8 @@ def __init__( if "chunk_size" in kwargs: kwargs["chunkSize"] = kwargs.pop("chunk_size") - coll = _clear_entity_type_registry( - root_collection, read_preference=ReadPreference.PRIMARY) + coll = _clear_entity_type_registry(root_collection, read_preference=ReadPreference.PRIMARY) - if not disable_md5: - kwargs["md5"] = hashlib.md5() # Defaults kwargs["_id"] = kwargs.get("_id", ObjectId()) kwargs["chunkSize"] = kwargs.get("chunkSize", DEFAULT_CHUNK_SIZE) @@ -187,67 +195,69 @@ def __init__( object.__setattr__(self, "_coll", coll) object.__setattr__(self, "_chunks", coll.chunks) object.__setattr__(self, "_file", kwargs) - object.__setattr__(self, "_buffer", StringIO()) + object.__setattr__(self, "_buffer", io.BytesIO()) object.__setattr__(self, "_position", 0) object.__setattr__(self, "_chunk_number", 0) object.__setattr__(self, "_closed", False) object.__setattr__(self, "_ensured_index", False) - def __create_index(self, collection, index_key, unique): + def __create_index(self, collection: Collection, index_key: Any, unique: bool) -> None: doc = collection.find_one(projection={"_id": 1}, session=self._session) if doc is None: try: - index_keys = [index_spec['key'] for index_spec in - collection.list_indexes(session=self._session)] + index_keys = [ + index_spec["key"] + for index_spec in collection.list_indexes(session=self._session) + ] except OperationFailure: index_keys = [] if index_key not in index_keys: - collection.create_index( - index_key.items(), unique=unique, session=self._session) + collection.create_index(index_key.items(), unique=unique, session=self._session) - def __ensure_indexes(self): + def __ensure_indexes(self) -> None: if not object.__getattribute__(self, "_ensured_index"): + _disallow_transactions(self._session) self.__create_index(self._coll.files, _F_INDEX, False) self.__create_index(self._coll.chunks, _C_INDEX, True) object.__setattr__(self, "_ensured_index", True) - def abort(self): - """Remove all chunks/files that may have been uploaded and close. - """ - self._coll.chunks.delete_many( - {"files_id": self._file['_id']}, session=self._session) - self._coll.files.delete_one( - {"_id": self._file['_id']}, session=self._session) + def abort(self) -> None: + """Remove all chunks/files that may have been uploaded and close.""" + self._coll.chunks.delete_many({"files_id": self._file["_id"]}, session=self._session) + self._coll.files.delete_one({"_id": self._file["_id"]}, session=self._session) object.__setattr__(self, "_closed", True) @property - def closed(self): - """Is this file closed? - """ + def closed(self) -> bool: + """Is this file closed?""" return self._closed - _id = _grid_in_property("_id", "The ``'_id'`` value for this file.", - read_only=True) - filename = _grid_in_property("filename", "Name of this file.") - name = _grid_in_property("filename", "Alias for `filename`.") - content_type = _grid_in_property("contentType", "Mime-type for this file.") - length = _grid_in_property("length", "Length (in bytes) of this file.", - closed_only=True) - chunk_size = _grid_in_property("chunkSize", "Chunk size for this file.", - read_only=True) - upload_date = _grid_in_property("uploadDate", - "Date that this file was uploaded.", - closed_only=True) - md5 = _grid_in_property("md5", "MD5 of the contents of this file " - "if an md5 sum was created.", - closed_only=True) - - def __getattr__(self, name): + _id: Any = _grid_in_property("_id", "The ``'_id'`` value for this file.", read_only=True) + filename: Optional[str] = _grid_in_property("filename", "Name of this file.") + name: Optional[str] = _grid_in_property("filename", "Alias for `filename`.") + content_type: Optional[str] = _grid_in_property( + "contentType", "DEPRECATED, will be removed in PyMongo 5.0. Mime-type for this file." + ) + length: int = _grid_in_property("length", "Length (in bytes) of this file.", closed_only=True) + chunk_size: int = _grid_in_property("chunkSize", "Chunk size for this file.", read_only=True) + upload_date: datetime.datetime = _grid_in_property( + "uploadDate", "Date that this file was uploaded.", closed_only=True + ) + md5: Optional[str] = _grid_in_property( + "md5", + "DEPRECATED, will be removed in PyMongo 5.0. MD5 of the contents of this file if an md5 sum was created.", + closed_only=True, + ) + + _buffer: io.BytesIO + _closed: bool + + def __getattr__(self, name: str) -> Any: if name in self._file: return self._file[name] raise AttributeError("GridIn object has no attribute '%s'" % name) - def __setattr__(self, name, value): + def __setattr__(self, name: str, value: Any) -> None: # For properties of this instance like _buffer, or descriptors set on # the class like filename, use regular __setattr__ if name in self.__dict__ or name in self.__class__.__dict__: @@ -258,60 +268,47 @@ def __setattr__(self, name, value): # them now. self._file[name] = value if self._closed: - self._coll.files.update_one({"_id": self._file["_id"]}, - {"$set": {name: value}}) + self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {name: value}}) - def __flush_data(self, data): - """Flush `data` to a chunk. - """ + def __flush_data(self, data: Any) -> None: + """Flush `data` to a chunk.""" self.__ensure_indexes() - if 'md5' in self._file: - self._file['md5'].update(data) - if not data: return - assert(len(data) <= self.chunk_size) + assert len(data) <= self.chunk_size - chunk = {"files_id": self._file["_id"], - "n": self._chunk_number, - "data": Binary(data)} + chunk = {"files_id": self._file["_id"], "n": self._chunk_number, "data": Binary(data)} try: self._chunks.insert_one(chunk, session=self._session) except DuplicateKeyError: - self._raise_file_exists(self._file['_id']) + self._raise_file_exists(self._file["_id"]) self._chunk_number += 1 self._position += len(data) - def __flush_buffer(self): - """Flush the buffer contents out to a chunk. - """ + def __flush_buffer(self) -> None: + """Flush the buffer contents out to a chunk.""" self.__flush_data(self._buffer.getvalue()) self._buffer.close() - self._buffer = StringIO() + self._buffer = io.BytesIO() - def __flush(self): - """Flush the file to the database. - """ + def __flush(self) -> Any: + """Flush the file to the database.""" try: self.__flush_buffer() - - if "md5" in self._file: - self._file["md5"] = self._file["md5"].hexdigest() # The GridFS spec says length SHOULD be an Int64. self._file["length"] = Int64(self._position) - self._file["uploadDate"] = datetime.datetime.utcnow() + self._file["uploadDate"] = datetime.datetime.now(tz=datetime.timezone.utc) - return self._coll.files.insert_one( - self._file, session=self._session) + return self._coll.files.insert_one(self._file, session=self._session) except DuplicateKeyError: self._raise_file_exists(self._id) - def _raise_file_exists(self, file_id): + def _raise_file_exists(self, file_id: Any) -> NoReturn: """Raise a FileExists exception for the given file_id.""" raise FileExists("file with _id %r already exists" % file_id) - def close(self): + def close(self) -> None: """Flush the file and close it. A closed file cannot be written any more. Calling @@ -321,30 +318,29 @@ def close(self): self.__flush() object.__setattr__(self, "_closed", True) - def read(self, size=-1): - raise io.UnsupportedOperation('read') + def read(self, size: int = -1) -> NoReturn: + raise io.UnsupportedOperation("read") - def readable(self): + def readable(self) -> bool: return False - def seekable(self): + def seekable(self) -> bool: return False - def write(self, data): + def write(self, data: Any) -> None: """Write data to the file. There is no return value. `data` can be either a string of bytes or a file-like object (implementing :meth:`read`). If the file has an :attr:`encoding` attribute, `data` can also be a - :class:`unicode` (:class:`str` in python 3) instance, which - will be encoded as :attr:`encoding` before being written. + :class:`str` instance, which will be encoded as + :attr:`encoding` before being written. Due to buffering, the data may not actually be written to the database until the :meth:`close` method is called. Raises :class:`ValueError` if this file is already closed. Raises :class:`TypeError` if `data` is not an instance of - :class:`str` (:class:`bytes` in python 3), a file-like object, - or an instance of :class:`unicode` (:class:`str` in python 3). + :class:`bytes`, a file-like object, or an instance of :class:`str`. Unicode data is only allowed if the file has an :attr:`encoding` attribute. @@ -360,15 +356,16 @@ def write(self, data): read = data.read except AttributeError: # string - if not isinstance(data, (text_type, bytes)): - raise TypeError("can only write strings or file-like objects") - if isinstance(data, text_type): + if not isinstance(data, (str, bytes)): + raise TypeError("can only write strings or file-like objects") from None + if isinstance(data, str): try: data = data.encode(self.encoding) except AttributeError: - raise TypeError("must specify an encoding for file in " - "order to write %s" % (text_type.__name__,)) - read = StringIO(data).read + raise TypeError( + "must specify an encoding for file in order to write str" + ) from None + read = io.BytesIO(data).read if self._buffer.tell() > 0: # Make sure to flush only when _buffer is complete @@ -376,7 +373,7 @@ def write(self, data): if space: try: to_write = read(space) - except: + except BaseException: self.abort() raise self._buffer.write(to_write) @@ -389,38 +386,47 @@ def write(self, data): to_write = read(self.chunk_size) self._buffer.write(to_write) - def writelines(self, sequence): + def writelines(self, sequence: Iterable[Any]) -> None: """Write a sequence of strings to the file. - Does not add seperators. + Does not add separators. """ for line in sequence: self.write(line) - def writeable(self): + def writeable(self) -> bool: return True - def __enter__(self): - """Support for the context manager protocol. - """ + def __enter__(self) -> GridIn: + """Support for the context manager protocol.""" return self - def __exit__(self, exc_type, exc_val, exc_tb): + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> Any: """Support for the context manager protocol. - Close the file and allow exceptions to propagate. + Close the file if no exceptions occur and allow exceptions to propagate. """ - self.close() + if exc_type is None: + # No exceptions happened. + self.close() + else: + # Something happened, at minimum mark as closed. + object.__setattr__(self, "_closed", True) # propagate exceptions return False -class GridOut(object): - """Class to read data out of GridFS. - """ - def __init__(self, root_collection, file_id=None, file_document=None, - session=None): +class GridOut(io.IOBase): + """Class to read data out of GridFS.""" + + def __init__( + self, + root_collection: Collection, + file_id: Optional[int] = None, + file_document: Optional[Any] = None, + session: Optional[ClientSession] = None, + ) -> None: """Read a file from GridFS Application developers should generally not need to @@ -454,93 +460,99 @@ def __init__(self, root_collection, file_id=None, file_document=None, from the server. Metadata is fetched when first needed. """ if not isinstance(root_collection, Collection): - raise TypeError("root_collection must be an " - "instance of Collection") + raise TypeError("root_collection must be an instance of Collection") + _disallow_transactions(session) root_collection = _clear_entity_type_registry(root_collection) + super().__init__() + self.__chunks = root_collection.chunks self.__files = root_collection.files self.__file_id = file_id self.__buffer = EMPTY + # Start position within the current buffered chunk. + self.__buffer_pos = 0 self.__chunk_iter = None + # Position within the total file. self.__position = 0 self._file = file_document self._session = session - _id = _grid_out_property("_id", "The ``'_id'`` value for this file.") - filename = _grid_out_property("filename", "Name of this file.") - name = _grid_out_property("filename", "Alias for `filename`.") - content_type = _grid_out_property("contentType", "Mime-type for this file.") - length = _grid_out_property("length", "Length (in bytes) of this file.") - chunk_size = _grid_out_property("chunkSize", "Chunk size for this file.") - upload_date = _grid_out_property("uploadDate", - "Date that this file was first uploaded.") - aliases = _grid_out_property("aliases", "List of aliases for this file.") - metadata = _grid_out_property("metadata", "Metadata attached to this file.") - md5 = _grid_out_property("md5", "MD5 of the contents of this file " - "if an md5 sum was created.") - - def _ensure_file(self): + _id: Any = _grid_out_property("_id", "The ``'_id'`` value for this file.") + filename: str = _grid_out_property("filename", "Name of this file.") + name: str = _grid_out_property("filename", "Alias for `filename`.") + content_type: Optional[str] = _grid_out_property( + "contentType", "DEPRECATED, will be removed in PyMongo 5.0. Mime-type for this file." + ) + length: int = _grid_out_property("length", "Length (in bytes) of this file.") + chunk_size: int = _grid_out_property("chunkSize", "Chunk size for this file.") + upload_date: datetime.datetime = _grid_out_property( + "uploadDate", "Date that this file was first uploaded." + ) + aliases: Optional[list[str]] = _grid_out_property( + "aliases", "DEPRECATED, will be removed in PyMongo 5.0. List of aliases for this file." + ) + metadata: Optional[Mapping[str, Any]] = _grid_out_property( + "metadata", "Metadata attached to this file." + ) + md5: Optional[str] = _grid_out_property( + "md5", + "DEPRECATED, will be removed in PyMongo 5.0. MD5 of the contents of this file if an md5 sum was created.", + ) + + _file: Any + __chunk_iter: Any + + def _ensure_file(self) -> None: if not self._file: - self._file = self.__files.find_one({"_id": self.__file_id}, - session=self._session) + _disallow_transactions(self._session) + self._file = self.__files.find_one({"_id": self.__file_id}, session=self._session) if not self._file: - raise NoFile("no file in gridfs collection %r with _id %r" % - (self.__files, self.__file_id)) + raise NoFile( + f"no file in gridfs collection {self.__files!r} with _id {self.__file_id!r}" + ) - def __getattr__(self, name): + def __getattr__(self, name: str) -> Any: self._ensure_file() if name in self._file: return self._file[name] raise AttributeError("GridOut object has no attribute '%s'" % name) - def readable(self): + def readable(self) -> bool: return True - def readchunk(self): + def readchunk(self) -> bytes: """Reads a chunk at a time. If the current position is within a chunk the remainder of the chunk is returned. """ - received = len(self.__buffer) + received = len(self.__buffer) - self.__buffer_pos chunk_data = EMPTY chunk_size = int(self.chunk_size) if received > 0: - chunk_data = self.__buffer + chunk_data = self.__buffer[self.__buffer_pos :] elif self.__position < int(self.length): chunk_number = int((received + self.__position) / chunk_size) if self.__chunk_iter is None: self.__chunk_iter = _GridOutChunkIterator( - self, self.__chunks, self._session, chunk_number) + self, self.__chunks, self._session, chunk_number + ) chunk = self.__chunk_iter.next() - chunk_data = chunk["data"][self.__position % chunk_size:] + chunk_data = chunk["data"][self.__position % chunk_size :] if not chunk_data: raise CorruptGridFile("truncated chunk") self.__position += len(chunk_data) self.__buffer = EMPTY + self.__buffer_pos = 0 return chunk_data - def read(self, size=-1): - """Read at most `size` bytes from the file (less if there - isn't enough data). - - The bytes are returned as an instance of :class:`str` (:class:`bytes` - in python 3). If `size` is negative or omitted all data is read. - - :Parameters: - - `size` (optional): the number of bytes to read - - .. versionchanged:: 3.8 - This method now only checks for extra chunks after reading the - entire file. Previously, this method would check for extra chunks - on every call. - """ + def _read_size_or_line(self, size: int = -1, line: bool = False) -> bytes: + """Internal read() and readline() helper.""" self._ensure_file() - remainder = int(self.length) - self.__position if size < 0 or size > remainder: size = remainder @@ -549,11 +561,36 @@ def read(self, size=-1): return EMPTY received = 0 - data = StringIO() + data = [] while received < size: - chunk_data = self.readchunk() + needed = size - received + if self.__buffer: + # Optimization: Read the buffer with zero byte copies. + buf = self.__buffer + chunk_start = self.__buffer_pos + chunk_data = memoryview(buf)[self.__buffer_pos :] + self.__buffer = EMPTY + self.__buffer_pos = 0 + self.__position += len(chunk_data) + else: + buf = self.readchunk() + chunk_start = 0 + chunk_data = memoryview(buf) + if line: + pos = buf.find(NEWLN, chunk_start, chunk_start + needed) - chunk_start + if pos >= 0: + # Decrease size to exit the loop. + size = received + pos + 1 + needed = pos + 1 + if len(chunk_data) > needed: + data.append(chunk_data[:needed]) + # Optimization: Save the buffer with zero byte copies. + self.__buffer = buf + self.__buffer_pos = chunk_start + needed + self.__position -= len(self.__buffer) - self.__buffer_pos + else: + data.append(chunk_data) received += len(chunk_data) - data.write(chunk_data) # Detect extra chunks after reading the entire file. if size == remainder and self.__chunk_iter: @@ -562,54 +599,38 @@ def read(self, size=-1): except StopIteration: pass - self.__position -= received - size + return b"".join(data) - # Return 'size' bytes and store the rest. - data.seek(size) - self.__buffer = data.read() - data.seek(0) - return data.read(size) + def read(self, size: int = -1) -> bytes: + """Read at most `size` bytes from the file (less if there + isn't enough data). - def readline(self, size=-1): - """Read one line or up to `size` bytes from the file. + The bytes are returned as an instance of :class:`bytes` + If `size` is negative or omitted all data is read. :Parameters: - - `size` (optional): the maximum number of bytes to read - """ - remainder = int(self.length) - self.__position - if size < 0 or size > remainder: - size = remainder - - if size == 0: - return EMPTY - - received = 0 - data = StringIO() - while received < size: - chunk_data = self.readchunk() - pos = chunk_data.find(NEWLN, 0, size) - if pos != -1: - size = received + pos + 1 - - received += len(chunk_data) - data.write(chunk_data) - if pos != -1: - break + - `size` (optional): the number of bytes to read - self.__position -= received - size + .. versionchanged:: 3.8 + This method now only checks for extra chunks after reading the + entire file. Previously, this method would check for extra chunks + on every call. + """ + return self._read_size_or_line(size=size) - # Return 'size' bytes and store the rest. - data.seek(size) - self.__buffer = data.read() - data.seek(0) - return data.read(size) + def readline(self, size: int = -1) -> bytes: # type: ignore[override] + """Read one line or up to `size` bytes from the file. - def tell(self): - """Return the current position of this file. + :Parameters: + - `size` (optional): the maximum number of bytes to read """ + return self._read_size_or_line(size=size, line=True) + + def tell(self) -> int: + """Return the current position of this file.""" return self.__position - def seek(self, pos, whence=_SEEK_SET): + def seek(self, pos: int, whence: int = _SEEK_SET) -> int: """Set the current position of this file. :Parameters: @@ -620,6 +641,10 @@ def seek(self, pos, whence=_SEEK_SET): positioning, :attr:`os.SEEK_CUR` (``1``) to seek relative to the current position, :attr:`os.SEEK_END` (``2``) to seek relative to the file's end. + + .. versionchanged:: 4.1 + The method now returns the new position in the file, to + conform to the behavior of :meth:`io.IOBase.seek`. """ if whence == _SEEK_SET: new_pos = pos @@ -628,75 +653,112 @@ def seek(self, pos, whence=_SEEK_SET): elif whence == _SEEK_END: new_pos = int(self.length) + pos else: - raise IOError(22, "Invalid value for `whence`") + raise OSError(22, "Invalid value for `whence`") if new_pos < 0: - raise IOError(22, "Invalid value for `pos` - must be positive") + raise OSError(22, "Invalid value for `pos` - must be positive") # Optimization, continue using the same buffer and chunk iterator. if new_pos == self.__position: - return + return new_pos self.__position = new_pos self.__buffer = EMPTY + self.__buffer_pos = 0 if self.__chunk_iter: self.__chunk_iter.close() self.__chunk_iter = None + return new_pos - def seekable(self): + def seekable(self) -> bool: return True - def __iter__(self): + def __iter__(self) -> GridOut: """Return an iterator over all of this file's data. - The iterator will return chunk-sized instances of - :class:`str` (:class:`bytes` in python 3). This can be - useful when serving files using a webserver that handles - such an iterator efficiently. - - .. note:: - This is different from :py:class:`io.IOBase` which iterates over - *lines* in the file. Use :meth:`GridOut.readline` to read line by - line instead of chunk by chunk. + The iterator will return lines (delimited by ``b'\\n'``) of + :class:`bytes`. This can be useful when serving files + using a webserver that handles such an iterator efficiently. .. versionchanged:: 3.8 The iterator now raises :class:`CorruptGridFile` when encountering any truncated, missing, or extra chunk in a file. The previous behavior was to only raise :class:`CorruptGridFile` on a missing chunk. + + .. versionchanged:: 4.0 + The iterator now iterates over *lines* in the file, instead + of chunks, to conform to the base class :py:class:`io.IOBase`. + Use :meth:`GridOut.readchunk` to read chunk by chunk instead + of line by line. """ - return GridOutIterator(self, self.__chunks, self._session) + return self - def close(self): + def close(self) -> None: """Make GridOut more generically file-like.""" if self.__chunk_iter: self.__chunk_iter.close() self.__chunk_iter = None + super().close() + + def write(self, value: Any) -> NoReturn: + raise io.UnsupportedOperation("write") - def write(self, value): - raise io.UnsupportedOperation('write') + def writelines(self, lines: Any) -> NoReturn: + raise io.UnsupportedOperation("writelines") - def __enter__(self): + def writable(self) -> bool: + return False + + def __enter__(self) -> GridOut: """Makes it possible to use :class:`GridOut` files with the context manager protocol. """ return self - def __exit__(self, exc_type, exc_val, exc_tb): + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> Any: """Makes it possible to use :class:`GridOut` files with the context manager protocol. """ self.close() return False + def fileno(self) -> NoReturn: + raise io.UnsupportedOperation("fileno") + + def flush(self) -> None: + # GridOut is read-only, so flush does nothing. + pass + + def isatty(self) -> bool: + return False + + def truncate(self, size: Optional[int] = None) -> NoReturn: + # See https://docs.python.org/3/library/io.html#io.IOBase.writable + # for why truncate has to raise. + raise io.UnsupportedOperation("truncate") -class _GridOutChunkIterator(object): + # Override IOBase.__del__ otherwise it will lead to __getattr__ on + # __IOBase_closed which calls _ensure_file and potentially performs I/O. + # We cannot do I/O in __del__ since it can lead to a deadlock. + def __del__(self) -> None: + pass + + +class _GridOutChunkIterator: """Iterates over a file's chunks using a single cursor. Raises CorruptGridFile when encountering any truncated, missing, or extra chunk in a file. """ - def __init__(self, grid_out, chunks, session, next_chunk): + + def __init__( + self, + grid_out: GridOut, + chunks: Collection, + session: Optional[ClientSession], + next_chunk: Any, + ) -> None: self._id = grid_out._id self._chunk_size = int(grid_out.chunk_size) self._length = int(grid_out.length) @@ -706,22 +768,24 @@ def __init__(self, grid_out, chunks, session, next_chunk): self._num_chunks = math.ceil(float(self._length) / self._chunk_size) self._cursor = None - def expected_chunk_length(self, chunk_n): + _cursor: Optional[Cursor] + + def expected_chunk_length(self, chunk_n: int) -> int: if chunk_n < self._num_chunks - 1: return self._chunk_size return self._length - (self._chunk_size * (self._num_chunks - 1)) - def __iter__(self): + def __iter__(self) -> _GridOutChunkIterator: return self - def _create_cursor(self): + def _create_cursor(self) -> None: filter = {"files_id": self._id} if self._next_chunk > 0: filter["n"] = {"$gte": self._next_chunk} - self._cursor = self._chunks.find(filter, sort=[("n", 1)], - session=self._session) + _disallow_transactions(self._session) + self._cursor = self._chunks.find(filter, sort=[("n", 1)], session=self._session) - def _next_with_retry(self): + def _next_with_retry(self) -> Mapping[str, Any]: """Return the next chunk and retry once on CursorNotFound. We retry on CursorNotFound to maintain backwards compatibility in @@ -730,7 +794,7 @@ def _next_with_retry(self): """ if self._cursor is None: self._create_cursor() - + assert self._cursor is not None try: return self._cursor.next() except CursorNotFound: @@ -738,19 +802,20 @@ def _next_with_retry(self): self._create_cursor() return self._cursor.next() - def next(self): + def next(self) -> Mapping[str, Any]: try: chunk = self._next_with_retry() except StopIteration: if self._next_chunk >= self._num_chunks: raise - raise CorruptGridFile("no chunk #%d" % self._next_chunk) + raise CorruptGridFile("no chunk #%d" % self._next_chunk) from None if chunk["n"] != self._next_chunk: self.close() raise CorruptGridFile( "Missing chunk: expected chunk #%d but found " - "chunk with n=%d" % (self._next_chunk, chunk["n"])) + "chunk with n=%d" % (self._next_chunk, chunk["n"]) + ) if chunk["n"] >= self._num_chunks: # According to spec, ignore extra chunks if they are empty. @@ -758,35 +823,36 @@ def next(self): self.close() raise CorruptGridFile( "Extra chunk found: expected %d chunks but found " - "chunk with n=%d" % (self._num_chunks, chunk["n"])) + "chunk with n=%d" % (self._num_chunks, chunk["n"]) + ) expected_length = self.expected_chunk_length(chunk["n"]) if len(chunk["data"]) != expected_length: self.close() raise CorruptGridFile( "truncated chunk #%d: expected chunk length to be %d but " - "found chunk with length %d" % ( - chunk["n"], expected_length, len(chunk["data"]))) + "found chunk with length %d" % (chunk["n"], expected_length, len(chunk["data"])) + ) self._next_chunk += 1 return chunk __next__ = next - def close(self): + def close(self) -> None: if self._cursor: self._cursor.close() self._cursor = None -class GridOutIterator(object): - def __init__(self, grid_out, chunks, session): +class GridOutIterator: + def __init__(self, grid_out: GridOut, chunks: Collection, session: ClientSession): self.__chunk_iter = _GridOutChunkIterator(grid_out, chunks, session, 0) - def __iter__(self): + def __iter__(self) -> GridOutIterator: return self - def next(self): + def next(self) -> bytes: chunk = self.__chunk_iter.next() return bytes(chunk["data"]) @@ -797,9 +863,18 @@ class GridOutCursor(Cursor): """A cursor / iterator for returning GridOut objects as the result of an arbitrary query against the GridFS files collection. """ - def __init__(self, collection, filter=None, skip=0, limit=0, - no_cursor_timeout=False, sort=None, batch_size=0, - session=None): + + def __init__( + self, + collection: Collection, + filter: Optional[Mapping[str, Any]] = None, + skip: int = 0, + limit: int = 0, + no_cursor_timeout: bool = False, + sort: Optional[Any] = None, + batch_size: int = 0, + session: Optional[ClientSession] = None, + ) -> None: """Create a new cursor, similar to the normal :class:`~pymongo.cursor.Cursor`. @@ -808,35 +883,39 @@ def __init__(self, collection, filter=None, skip=0, limit=0, .. versionadded 2.7 - .. mongodoc:: cursors + .. seealso:: The MongoDB documentation on `cursors `_. """ + _disallow_transactions(session) collection = _clear_entity_type_registry(collection) # Hold on to the base "fs" collection to create GridOut objects later. self.__root_collection = collection - super(GridOutCursor, self).__init__( - collection.files, filter, skip=skip, limit=limit, - no_cursor_timeout=no_cursor_timeout, sort=sort, - batch_size=batch_size, session=session) - - def next(self): - """Get next GridOut object from cursor. - """ - # Work around "super is not iterable" issue in Python 3.x - next_file = super(GridOutCursor, self).next() - return GridOut(self.__root_collection, file_document=next_file, - session=self.session) + super().__init__( + collection.files, + filter, + skip=skip, + limit=limit, + no_cursor_timeout=no_cursor_timeout, + sort=sort, + batch_size=batch_size, + session=session, + ) + + def next(self) -> GridOut: + """Get next GridOut object from cursor.""" + _disallow_transactions(self.session) + next_file = super().next() + return GridOut(self.__root_collection, file_document=next_file, session=self.session) __next__ = next - def add_option(self, *args, **kwargs): + def add_option(self, *args: Any, **kwargs: Any) -> NoReturn: raise NotImplementedError("Method does not exist for GridOutCursor") - def remove_option(self, *args, **kwargs): + def remove_option(self, *args: Any, **kwargs: Any) -> NoReturn: raise NotImplementedError("Method does not exist for GridOutCursor") - def _clone_base(self, session): - """Creates an empty GridOutCursor for information to be copied into. - """ + def _clone_base(self, session: Optional[ClientSession]) -> GridOutCursor: + """Creates an empty GridOutCursor for information to be copied into.""" return GridOutCursor(self.__root_collection, session=session) diff --git a/gridfs/py.typed b/gridfs/py.typed new file mode 100644 index 0000000000..0f4057061a --- /dev/null +++ b/gridfs/py.typed @@ -0,0 +1,2 @@ +# PEP-561 Support File. +# "Package maintainers who wish to support type checking of their code MUST add a marker file named py.typed to their package supporting typing". diff --git a/mypy.ini b/mypy.ini new file mode 100644 index 0000000000..5fd52aa7c1 --- /dev/null +++ b/mypy.ini @@ -0,0 +1,40 @@ +[mypy] +python_version = 3.7 +check_untyped_defs = true +disallow_subclassing_any = true +disallow_incomplete_defs = true +no_implicit_optional = true +pretty = true +show_error_context = true +show_error_codes = true +strict_equality = true +warn_unused_configs = true +warn_unused_ignores = true +warn_redundant_casts = true + +[mypy-gevent.*] +ignore_missing_imports = True + +[mypy-kerberos.*] +ignore_missing_imports = True + +[mypy-mockupdb] +ignore_missing_imports = True + +[mypy-pymongo_auth_aws.*] +ignore_missing_imports = True + +[mypy-pymongocrypt.*] +ignore_missing_imports = True + +[mypy-service_identity.*] +ignore_missing_imports = True + +[mypy-snappy.*] +ignore_missing_imports = True + +[mypy-test.test_typing] +warn_unused_ignores = True + +[mypy-winkerberos.*] +ignore_missing_imports = True diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 92ae333175..cdcbe5a5a0 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -13,6 +13,38 @@ # limitations under the License. """Python driver for MongoDB.""" +from __future__ import annotations + +from typing import ContextManager, Optional + +__all__ = [ + "ASCENDING", + "DESCENDING", + "GEO2D", + "GEOSPHERE", + "HASHED", + "TEXT", + "version_tuple", + "get_version_string", + "__version__", + "version", + "ReturnDocument", + "MAX_SUPPORTED_WIRE_VERSION", + "MIN_SUPPORTED_WIRE_VERSION", + "CursorType", + "MongoClient", + "DeleteMany", + "DeleteOne", + "IndexModel", + "InsertOne", + "ReplaceOne", + "UpdateMany", + "UpdateOne", + "ReadPreference", + "WriteConcern", + "has_c", + "timeout", +] ASCENDING = 1 """Ascending sort order.""" @@ -22,15 +54,7 @@ GEO2D = "2d" """Index specifier for a 2-dimensional `geospatial index`_. -.. _geospatial index: http://docs.mongodb.org/manual/core/2d/ -""" - -GEOHAYSTACK = "geoHaystack" -"""Index specifier for a 2-dimensional `haystack index`_. - -.. versionadded:: 2.1 - -.. _haystack index: http://docs.mongodb.org/manual/core/geohaystack/ +.. _geospatial index: http://mongodb.com/docs/manual/core/2d/ """ GEOSPHERE = "2dsphere" @@ -38,7 +62,7 @@ .. versionadded:: 2.5 -.. _spherical geospatial index: http://docs.mongodb.org/manual/core/2dsphere/ +.. _spherical geospatial index: http://mongodb.com/docs/manual/core/2dsphere/ """ HASHED = "hashed" @@ -46,54 +70,109 @@ .. versionadded:: 2.5 -.. _hashed index: http://docs.mongodb.org/manual/core/index-hashed/ +.. _hashed index: http://mongodb.com/docs/manual/core/index-hashed/ """ TEXT = "text" """Index specifier for a `text index`_. +.. seealso:: MongoDB's `Atlas Search + `_ which offers more advanced + text search functionality. + .. versionadded:: 2.7.1 -.. _text index: http://docs.mongodb.org/manual/core/index-text/ +.. _text index: http://mongodb.com/docs/manual/core/index-text/ """ -OFF = 0 -"""No database profiling.""" -SLOW_ONLY = 1 -"""Only profile slow operations.""" -ALL = 2 -"""Profile all operations.""" - -version_tuple = (3, 10, 1) - -def get_version_string(): - if isinstance(version_tuple[-1], str): - return '.'.join(map(str, version_tuple[:-1])) + version_tuple[-1] - return '.'.join(map(str, version_tuple)) - -__version__ = version = get_version_string() -"""Current version of PyMongo.""" - +from pymongo import _csot +from pymongo._version import __version__, get_version_string, version_tuple from pymongo.collection import ReturnDocument -from pymongo.common import (MIN_SUPPORTED_WIRE_VERSION, - MAX_SUPPORTED_WIRE_VERSION) +from pymongo.common import MAX_SUPPORTED_WIRE_VERSION, MIN_SUPPORTED_WIRE_VERSION from pymongo.cursor import CursorType from pymongo.mongo_client import MongoClient -from pymongo.mongo_replica_set_client import MongoReplicaSetClient -from pymongo.operations import (IndexModel, - InsertOne, - DeleteOne, - DeleteMany, - UpdateOne, - UpdateMany, - ReplaceOne) +from pymongo.operations import ( + DeleteMany, + DeleteOne, + IndexModel, + InsertOne, + ReplaceOne, + UpdateMany, + UpdateOne, +) from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern -def has_c(): +version = __version__ +"""Current version of PyMongo.""" + + +def has_c() -> bool: """Is the C extension installed?""" try: - from pymongo import _cmessage + from pymongo import _cmessage # type: ignore[attr-defined] # noqa: F401 + return True except ImportError: return False + + +def timeout(seconds: Optional[float]) -> ContextManager[None]: + """**(Provisional)** Apply the given timeout for a block of operations. + + .. note:: :func:`~pymongo.timeout` is currently provisional. Backwards + incompatible changes may occur before becoming officially supported. + + Use :func:`~pymongo.timeout` in a with-statement:: + + with pymongo.timeout(5): + client.db.coll.insert_one({}) + client.db.coll2.insert_one({}) + + When the with-statement is entered, a deadline is set for the entire + block. When that deadline is exceeded, any blocking pymongo operation + will raise a timeout exception. For example:: + + try: + with pymongo.timeout(5): + client.db.coll.insert_one({}) + time.sleep(5) + # The deadline has now expired, the next operation will raise + # a timeout exception. + client.db.coll2.insert_one({}) + except PyMongoError as exc: + if exc.timeout: + print(f"block timed out: {exc!r}") + else: + print(f"failed with non-timeout error: {exc!r}") + + When nesting :func:`~pymongo.timeout`, the nested deadline is capped by + the outer deadline. The deadline can only be shortened, not extended. + When exiting the block, the previous deadline is restored:: + + with pymongo.timeout(5): + coll.find_one() # Uses the 5 second deadline. + with pymongo.timeout(3): + coll.find_one() # Uses the 3 second deadline. + coll.find_one() # Uses the original 5 second deadline. + with pymongo.timeout(10): + coll.find_one() # Still uses the original 5 second deadline. + coll.find_one() # Uses the original 5 second deadline. + + :Parameters: + - `seconds`: A non-negative floating point number expressing seconds, or None. + + :Raises: + - :py:class:`ValueError`: When `seconds` is negative. + + See :ref:`timeout-example` for more examples. + + .. versionadded:: 4.2 + """ + if not isinstance(seconds, (int, float, type(None))): + raise TypeError("timeout must be None, an int, or a float") + if seconds and seconds < 0: + raise ValueError("timeout cannot be negative") + if seconds is not None: + seconds = float(seconds) + return _csot._TimeoutContext(seconds) diff --git a/pymongo/_cmessagemodule.c b/pymongo/_cmessagemodule.c index 7c4a517c5c..7ac66a1e4b 100644 --- a/pymongo/_cmessagemodule.c +++ b/pymongo/_cmessagemodule.c @@ -28,15 +28,14 @@ struct module_state { PyObject* _cbson; + PyObject* _max_bson_size_str; + PyObject* _max_message_size_str; + PyObject* _max_write_batch_size_str; + PyObject* _max_split_size_str; }; /* See comments about module initialization in _cbsonmodule.c */ -#if PY_MAJOR_VERSION >= 3 #define GETSTATE(m) ((struct module_state*)PyModule_GetState(m)) -#else -#define GETSTATE(m) (&_state) -static struct module_state _state; -#endif #define DOC_TOO_LARGE_FMT "BSON document too large (%d bytes)" \ " - the connected server supports" \ @@ -67,366 +66,11 @@ static int buffer_write_bytes_ssize_t(buffer_t buffer, const char* data, Py_ssiz return buffer_write_bytes(buffer, data, downsize); } -/* add a lastError message on the end of the buffer. - * returns 0 on failure */ -static int add_last_error(PyObject* self, buffer_t buffer, - int request_id, char* ns, Py_ssize_t nslen, - codec_options_t* options, PyObject* args) { - struct module_state *state = GETSTATE(self); - - int message_start; - int document_start; - int message_length; - int document_length; - PyObject* key = NULL; - PyObject* value = NULL; - Py_ssize_t pos = 0; - PyObject* one; - char *p = strchr(ns, '.'); - /* Length of the database portion of ns. */ - nslen = p ? (int)(p - ns) : nslen; - - message_start = buffer_save_space(buffer, 4); - if (message_start == -1) { - PyErr_NoMemory(); - return 0; - } - if (!buffer_write_int32(buffer, (int32_t)request_id) || - !buffer_write_bytes(buffer, - "\x00\x00\x00\x00" /* responseTo */ - "\xd4\x07\x00\x00" /* opcode */ - "\x00\x00\x00\x00", /* options */ - 12) || - !buffer_write_bytes_ssize_t(buffer, ns, nslen) || /* database */ - !buffer_write_bytes(buffer, - ".$cmd\x00" /* collection name */ - "\x00\x00\x00\x00" /* skip */ - "\xFF\xFF\xFF\xFF", /* limit (-1) */ - 14)) { - return 0; - } - - /* save space for length */ - document_start = buffer_save_space(buffer, 4); - if (document_start == -1) { - PyErr_NoMemory(); - return 0; - } - - /* getlasterror: 1 */ - if (!(one = PyLong_FromLong(1))) - return 0; - - if (!write_pair(state->_cbson, buffer, "getlasterror", 12, one, 0, - options, 1)) { - Py_DECREF(one); - return 0; - } - Py_DECREF(one); - - /* getlasterror options */ - while (PyDict_Next(args, &pos, &key, &value)) { - if (!decode_and_write_pair(state->_cbson, buffer, key, value, 0, - options, 0)) { - return 0; - } - } - - /* EOD */ - if (!buffer_write_bytes(buffer, "\x00", 1)) { - return 0; - } - - message_length = buffer_get_position(buffer) - message_start; - document_length = buffer_get_position(buffer) - document_start; - buffer_write_int32_at_position( - buffer, message_start, (int32_t)message_length); - buffer_write_int32_at_position( - buffer, document_start, (int32_t)document_length); - return 1; -} - -static int init_insert_buffer(buffer_t buffer, int request_id, int options, - const char* coll_name, Py_ssize_t coll_name_len, - int compress) { - int length_location = 0; - if (!compress) { - /* Save space for message length */ - int length_location = buffer_save_space(buffer, 4); - if (length_location == -1) { - PyErr_NoMemory(); - return length_location; - } - if (!buffer_write_int32(buffer, (int32_t)request_id) || - !buffer_write_bytes(buffer, - "\x00\x00\x00\x00" - "\xd2\x07\x00\x00", - 8)) { - return -1; - } - } - if (!buffer_write_int32(buffer, (int32_t)options) || - !buffer_write_bytes_ssize_t(buffer, - coll_name, - coll_name_len + 1)) { - return -1; - } - return length_location; -} - -static PyObject* _cbson_insert_message(PyObject* self, PyObject* args) { - /* Used by the Bulk API to insert into pre-2.6 servers. Collection.insert - * uses _cbson_do_batched_insert. */ - struct module_state *state = GETSTATE(self); - - /* NOTE just using a random number as the request_id */ - int request_id = rand(); - char* collection_name = NULL; - Py_ssize_t collection_name_length; - PyObject* docs; - PyObject* doc; - PyObject* iterator; - int before, cur_size, max_size = 0; - int flags = 0; - unsigned char check_keys; - unsigned char safe; - unsigned char continue_on_error; - codec_options_t options; - PyObject* last_error_args; - buffer_t buffer; - int length_location, message_length; - PyObject* result; - - if (!PyArg_ParseTuple(args, "et#ObbObO&", - "utf-8", - &collection_name, - &collection_name_length, - &docs, &check_keys, &safe, - &last_error_args, - &continue_on_error, - convert_codec_options, &options)) { - return NULL; - } - if (continue_on_error) { - flags += 1; - } - buffer = buffer_new(); - if (!buffer) { - PyErr_NoMemory(); - destroy_codec_options(&options); - PyMem_Free(collection_name); - return NULL; - } - - length_location = init_insert_buffer(buffer, - request_id, - flags, - collection_name, - collection_name_length, - 0); - if (length_location == -1) { - destroy_codec_options(&options); - PyMem_Free(collection_name); - buffer_free(buffer); - return NULL; - } - - iterator = PyObject_GetIter(docs); - if (iterator == NULL) { - PyObject* InvalidOperation = _error("InvalidOperation"); - if (InvalidOperation) { - PyErr_SetString(InvalidOperation, "input is not iterable"); - Py_DECREF(InvalidOperation); - } - destroy_codec_options(&options); - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; - } - while ((doc = PyIter_Next(iterator)) != NULL) { - before = buffer_get_position(buffer); - if (!write_dict(state->_cbson, buffer, doc, check_keys, - &options, 1)) { - Py_DECREF(doc); - Py_DECREF(iterator); - destroy_codec_options(&options); - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; - } - Py_DECREF(doc); - cur_size = buffer_get_position(buffer) - before; - max_size = (cur_size > max_size) ? cur_size : max_size; - } - Py_DECREF(iterator); - - if (PyErr_Occurred()) { - destroy_codec_options(&options); - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; - } - - if (!max_size) { - PyObject* InvalidOperation = _error("InvalidOperation"); - if (InvalidOperation) { - PyErr_SetString(InvalidOperation, "cannot do an empty bulk insert"); - Py_DECREF(InvalidOperation); - } - destroy_codec_options(&options); - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; - } - - message_length = buffer_get_position(buffer) - length_location; - buffer_write_int32_at_position( - buffer, length_location, (int32_t)message_length); - - if (safe) { - if (!add_last_error(self, buffer, request_id, collection_name, - collection_name_length, &options, last_error_args)) { - destroy_codec_options(&options); - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; - } - } - - PyMem_Free(collection_name); - - /* objectify buffer */ - result = Py_BuildValue("i" BYTES_FORMAT_STRING "i", request_id, - buffer_get_buffer(buffer), - (Py_ssize_t)buffer_get_position(buffer), - max_size); - destroy_codec_options(&options); - buffer_free(buffer); - return result; -} - -static PyObject* _cbson_update_message(PyObject* self, PyObject* args) { - /* NOTE just using a random number as the request_id */ - struct module_state *state = GETSTATE(self); - - int request_id = rand(); - char* collection_name = NULL; - Py_ssize_t collection_name_length; - int before, cur_size, max_size = 0; - PyObject* doc; - PyObject* spec; - unsigned char multi; - unsigned char upsert; - unsigned char safe; - unsigned char check_keys; - codec_options_t options; - PyObject* last_error_args; - int flags; - buffer_t buffer; - int length_location, message_length; - PyObject* result; - - if (!PyArg_ParseTuple(args, "et#bbOObObO&", - "utf-8", - &collection_name, - &collection_name_length, - &upsert, &multi, &spec, &doc, &safe, - &last_error_args, &check_keys, - convert_codec_options, &options)) { - return NULL; - } - - flags = 0; - if (upsert) { - flags += 1; - } - if (multi) { - flags += 2; - } - buffer = buffer_new(); - if (!buffer) { - destroy_codec_options(&options); - PyErr_NoMemory(); - PyMem_Free(collection_name); - return NULL; - } - - // save space for message length - length_location = buffer_save_space(buffer, 4); - if (length_location == -1) { - destroy_codec_options(&options); - PyMem_Free(collection_name); - PyErr_NoMemory(); - return NULL; - } - if (!buffer_write_int32(buffer, (int32_t)request_id) || - !buffer_write_bytes(buffer, - "\x00\x00\x00\x00" - "\xd1\x07\x00\x00" - "\x00\x00\x00\x00", - 12) || - !buffer_write_bytes_ssize_t(buffer, - collection_name, - collection_name_length + 1) || - !buffer_write_int32(buffer, (int32_t)flags)) { - destroy_codec_options(&options); - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; - } - - before = buffer_get_position(buffer); - if (!write_dict(state->_cbson, buffer, spec, 0, &options, 1)) { - destroy_codec_options(&options); - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; - } - max_size = buffer_get_position(buffer) - before; - - before = buffer_get_position(buffer); - if (!write_dict(state->_cbson, buffer, doc, check_keys, - &options, 1)) { - destroy_codec_options(&options); - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; - } - cur_size = buffer_get_position(buffer) - before; - max_size = (cur_size > max_size) ? cur_size : max_size; - - message_length = buffer_get_position(buffer) - length_location; - buffer_write_int32_at_position( - buffer, length_location, (int32_t)message_length); - - if (safe) { - if (!add_last_error(self, buffer, request_id, collection_name, - collection_name_length, &options, last_error_args)) { - destroy_codec_options(&options); - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; - } - } - - PyMem_Free(collection_name); - - /* objectify buffer */ - result = Py_BuildValue("i" BYTES_FORMAT_STRING "i", request_id, - buffer_get_buffer(buffer), - (Py_ssize_t)buffer_get_position(buffer), - max_size); - destroy_codec_options(&options); - buffer_free(buffer); - return result; -} - static PyObject* _cbson_query_message(PyObject* self, PyObject* args) { /* NOTE just using a random number as the request_id */ struct module_state *state = GETSTATE(self); int request_id = rand(); - PyObject* cluster_time = NULL; unsigned int flags; char* collection_name = NULL; Py_ssize_t collection_name_length; @@ -435,63 +79,34 @@ static PyObject* _cbson_query_message(PyObject* self, PyObject* args) { int num_to_return; PyObject* query; PyObject* field_selector; + PyObject* options_obj; codec_options_t options; - buffer_t buffer; + buffer_t buffer = NULL; int length_location, message_length; - unsigned char check_keys = 0; PyObject* result = NULL; - if (!PyArg_ParseTuple(args, "Iet#iiOOO&|b", + if (!(PyArg_ParseTuple(args, "Iet#iiOOO", &flags, "utf-8", &collection_name, &collection_name_length, &num_to_skip, &num_to_return, &query, &field_selector, - convert_codec_options, &options, - &check_keys)) { + &options_obj) && + convert_codec_options(state->_cbson, options_obj, &options))) { return NULL; } - buffer = buffer_new(); + buffer = pymongo_buffer_new(); if (!buffer) { - PyErr_NoMemory(); - destroy_codec_options(&options); - PyMem_Free(collection_name); - return NULL; + goto fail; } // save space for message length - length_location = buffer_save_space(buffer, 4); + length_location = pymongo_buffer_save_space(buffer, 4); if (length_location == -1) { - destroy_codec_options(&options); - PyMem_Free(collection_name); - PyErr_NoMemory(); - return NULL; + goto fail; } - /* Pop $clusterTime from dict and write it at the end, avoiding an error - * from the $-prefix and check_keys. - * - * If "dict" is a defaultdict we don't want to call PyMapping_GetItemString - * on it. That would **create** an _id where one didn't previously exist - * (PYTHON-871). - */ - if (PyDict_Check(query)) { - cluster_time = PyDict_GetItemString(query, "$clusterTime"); - if (cluster_time) { - /* PyDict_GetItemString returns a borrowed reference. */ - Py_INCREF(cluster_time); - if (-1 == PyMapping_DelItemString(query, "$clusterTime")) { - goto fail; - } - } - } else if (PyMapping_HasKeyString(query, "$clusterTime")) { - cluster_time = PyMapping_GetItemString(query, "$clusterTime"); - if (!cluster_time - || -1 == PyMapping_DelItemString(query, "$clusterTime")) { - goto fail; - } - } if (!buffer_write_int32(buffer, (int32_t)request_id) || !buffer_write_bytes(buffer, "\x00\x00\x00\x00\xd4\x07\x00\x00", 8) || !buffer_write_int32(buffer, (int32_t)flags) || @@ -502,65 +117,38 @@ static PyObject* _cbson_query_message(PyObject* self, PyObject* args) { goto fail; } - begin = buffer_get_position(buffer); - if (!write_dict(state->_cbson, buffer, query, check_keys, &options, 1)) { + begin = pymongo_buffer_get_position(buffer); + if (!write_dict(state->_cbson, buffer, query, 0, &options, 1)) { goto fail; } - /* back up a byte and write $clusterTime */ - if (cluster_time) { - int length; - char zero = 0; - - buffer_update_position(buffer, buffer_get_position(buffer) - 1); - if (!write_pair(state->_cbson, buffer, "$clusterTime", 12, cluster_time, - 0, &options, 1)) { - goto fail; - } - - if (!buffer_write_bytes(buffer, &zero, 1)) { - goto fail; - } - - length = buffer_get_position(buffer) - begin; - buffer_write_int32_at_position(buffer, begin, (int32_t)length); - - /* undo popping $clusterTime */ - if (-1 == PyMapping_SetItemString( - query, "$clusterTime", cluster_time)) { - goto fail; - } - - Py_CLEAR(cluster_time); - } - - max_size = buffer_get_position(buffer) - begin; + max_size = pymongo_buffer_get_position(buffer) - begin; if (field_selector != Py_None) { - begin = buffer_get_position(buffer); + begin = pymongo_buffer_get_position(buffer); if (!write_dict(state->_cbson, buffer, field_selector, 0, &options, 1)) { goto fail; } - cur_size = buffer_get_position(buffer) - begin; + cur_size = pymongo_buffer_get_position(buffer) - begin; max_size = (cur_size > max_size) ? cur_size : max_size; } - message_length = buffer_get_position(buffer) - length_location; + message_length = pymongo_buffer_get_position(buffer) - length_location; buffer_write_int32_at_position( buffer, length_location, (int32_t)message_length); /* objectify buffer */ - result = Py_BuildValue("i" BYTES_FORMAT_STRING "i", request_id, - buffer_get_buffer(buffer), - (Py_ssize_t)buffer_get_position(buffer), + result = Py_BuildValue("iy#i", request_id, + pymongo_buffer_get_buffer(buffer), + (Py_ssize_t)pymongo_buffer_get_position(buffer), max_size); - fail: PyMem_Free(collection_name); destroy_codec_options(&options); - buffer_free(buffer); - Py_XDECREF(cluster_time); + if (buffer) { + pymongo_buffer_free(buffer); + } return result; } @@ -571,9 +159,9 @@ static PyObject* _cbson_get_more_message(PyObject* self, PyObject* args) { Py_ssize_t collection_name_length; int num_to_return; long long cursor_id; - buffer_t buffer; + buffer_t buffer = NULL; int length_location, message_length; - PyObject* result; + PyObject* result = NULL; if (!PyArg_ParseTuple(args, "et#iL", "utf-8", @@ -583,19 +171,15 @@ static PyObject* _cbson_get_more_message(PyObject* self, PyObject* args) { &cursor_id)) { return NULL; } - buffer = buffer_new(); + buffer = pymongo_buffer_new(); if (!buffer) { - PyErr_NoMemory(); - PyMem_Free(collection_name); - return NULL; + goto fail; } // save space for message length - length_location = buffer_save_space(buffer, 4); + length_location = pymongo_buffer_save_space(buffer, 4); if (length_location == -1) { - PyMem_Free(collection_name); - PyErr_NoMemory(); - return NULL; + goto fail; } if (!buffer_write_int32(buffer, (int32_t)request_id) || !buffer_write_bytes(buffer, @@ -607,22 +191,22 @@ static PyObject* _cbson_get_more_message(PyObject* self, PyObject* args) { collection_name_length + 1) || !buffer_write_int32(buffer, (int32_t)num_to_return) || !buffer_write_int64(buffer, (int64_t)cursor_id)) { - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; + goto fail; } - PyMem_Free(collection_name); - - message_length = buffer_get_position(buffer) - length_location; + message_length = pymongo_buffer_get_position(buffer) - length_location; buffer_write_int32_at_position( buffer, length_location, (int32_t)message_length); /* objectify buffer */ - result = Py_BuildValue("i" BYTES_FORMAT_STRING, request_id, - buffer_get_buffer(buffer), - (Py_ssize_t)buffer_get_position(buffer)); - buffer_free(buffer); + result = Py_BuildValue("iy#", request_id, + pymongo_buffer_get_buffer(buffer), + (Py_ssize_t)pymongo_buffer_get_position(buffer)); +fail: + PyMem_Free(collection_name); + if (buffer) { + pymongo_buffer_free(buffer); + } return result; } @@ -642,79 +226,76 @@ static PyObject* _cbson_op_msg(PyObject* self, PyObject* args) { Py_ssize_t identifier_length = 0; PyObject* docs; PyObject* doc; - unsigned char check_keys = 0; + PyObject* options_obj; codec_options_t options; - buffer_t buffer; + buffer_t buffer = NULL; int length_location, message_length; int total_size = 0; int max_doc_size = 0; PyObject* result = NULL; PyObject* iterator = NULL; - /*flags, command, identifier, docs, check_keys, opts*/ - if (!PyArg_ParseTuple(args, "IOet#ObO&", + /*flags, command, identifier, docs, opts*/ + if (!(PyArg_ParseTuple(args, "IOet#OO", &flags, &command, "utf-8", &identifier, &identifier_length, &docs, - &check_keys, - convert_codec_options, &options)) { + &options_obj) && + convert_codec_options(state->_cbson, options_obj, &options))) { return NULL; } - buffer = buffer_new(); + buffer = pymongo_buffer_new(); if (!buffer) { - PyErr_NoMemory(); - goto bufferfail; + goto fail; } // save space for message length - length_location = buffer_save_space(buffer, 4); + length_location = pymongo_buffer_save_space(buffer, 4); if (length_location == -1) { - PyErr_NoMemory(); - goto bufferfail; + goto fail; } if (!buffer_write_int32(buffer, (int32_t)request_id) || !buffer_write_bytes(buffer, "\x00\x00\x00\x00" /* responseTo */ "\xdd\x07\x00\x00" /* 2013 */, 8)) { - goto encodefail; + goto fail; } if (!buffer_write_int32(buffer, (int32_t)flags) || !buffer_write_bytes(buffer, "\x00", 1) /* Payload type 0 */) { - goto encodefail; + goto fail; } total_size = write_dict(state->_cbson, buffer, command, 0, &options, 1); if (!total_size) { - goto encodefail; + goto fail; } if (identifier_length) { int payload_one_length_location, payload_length; /* Payload type 1 */ if (!buffer_write_bytes(buffer, "\x01", 1)) { - goto encodefail; + goto fail; } /* save space for payload 0 length */ - payload_one_length_location = buffer_save_space(buffer, 4); + payload_one_length_location = pymongo_buffer_save_space(buffer, 4); /* C string identifier */ if (!buffer_write_bytes_ssize_t(buffer, identifier, identifier_length + 1)) { - goto encodefail; + goto fail; } iterator = PyObject_GetIter(docs); if (iterator == NULL) { - goto encodefail; + goto fail; } while ((doc = PyIter_Next(iterator)) != NULL) { int encoded_doc_size = write_dict( - state->_cbson, buffer, doc, check_keys, - &options, 1); + state->_cbson, buffer, doc, 0, &options, 1); if (!encoded_doc_size) { Py_CLEAR(doc); - goto encodefail; + goto fail; } if (encoded_doc_size > max_doc_size) { max_doc_size = encoded_doc_size; @@ -722,26 +303,27 @@ static PyObject* _cbson_op_msg(PyObject* self, PyObject* args) { Py_CLEAR(doc); } - payload_length = buffer_get_position(buffer) - payload_one_length_location; + payload_length = pymongo_buffer_get_position(buffer) - payload_one_length_location; buffer_write_int32_at_position( buffer, payload_one_length_location, (int32_t)payload_length); total_size += payload_length; } - message_length = buffer_get_position(buffer) - length_location; + message_length = pymongo_buffer_get_position(buffer) - length_location; buffer_write_int32_at_position( buffer, length_location, (int32_t)message_length); /* objectify buffer */ - result = Py_BuildValue("i" BYTES_FORMAT_STRING "ii", request_id, - buffer_get_buffer(buffer), - (Py_ssize_t)buffer_get_position(buffer), + result = Py_BuildValue("iy#ii", request_id, + pymongo_buffer_get_buffer(buffer), + (Py_ssize_t)pymongo_buffer_get_position(buffer), total_size, max_doc_size); -encodefail: +fail: Py_XDECREF(iterator); - buffer_free(buffer); -bufferfail: + if (buffer) { + pymongo_buffer_free(buffer); + } PyMem_Free(identifier); destroy_codec_options(&options); return result; @@ -752,11 +334,7 @@ static void _set_document_too_large(int size, long max) { PyObject* DocumentTooLarge = _error("DocumentTooLarge"); if (DocumentTooLarge) { -#if PY_MAJOR_VERSION >= 3 PyObject* error = PyUnicode_FromFormat(DOC_TOO_LARGE_FMT, size, max); -#else - PyObject* error = PyString_FromFormat(DOC_TOO_LARGE_FMT, size, max); -#endif if (error) { PyErr_SetObject(DocumentTooLarge, error); Py_DECREF(error); @@ -765,343 +343,6 @@ _set_document_too_large(int size, long max) { } } -static PyObject* -_send_insert(PyObject* self, PyObject* ctx, - PyObject* gle_args, buffer_t buffer, - char* coll_name, Py_ssize_t coll_len, int request_id, int safe, - codec_options_t* options, PyObject* to_publish, int compress) { - - if (safe) { - if (!add_last_error(self, buffer, request_id, - coll_name, coll_len, options, gle_args)) { - return NULL; - } - } - - /* The max_doc_size parameter for legacy_bulk_insert is the max size of - * any document in buffer. We enforced max size already, pass 0 here. */ - return PyObject_CallMethod(ctx, "legacy_bulk_insert", - "i" BYTES_FORMAT_STRING "iNOi", - request_id, - buffer_get_buffer(buffer), - (Py_ssize_t)buffer_get_position(buffer), - 0, - PyBool_FromLong((long)safe), - to_publish, compress); -} - -static PyObject* _cbson_do_batched_insert(PyObject* self, PyObject* args) { - struct module_state *state = GETSTATE(self); - - /* NOTE just using a random number as the request_id */ - int request_id = rand(); - int send_safe, flags = 0; - int length_location, message_length; - Py_ssize_t collection_name_length; - int compress; - char* collection_name = NULL; - PyObject* docs; - PyObject* doc; - PyObject* iterator; - PyObject* ctx; - PyObject* last_error_args; - PyObject* result; - PyObject* max_bson_size_obj; - PyObject* max_message_size_obj; - PyObject* compress_obj; - PyObject* to_publish = NULL; - unsigned char check_keys; - unsigned char safe; - unsigned char continue_on_error; - codec_options_t options; - unsigned char empty = 1; - long max_bson_size; - long max_message_size; - buffer_t buffer; - PyObject *exc_type = NULL, *exc_value = NULL, *exc_trace = NULL; - - if (!PyArg_ParseTuple(args, "et#ObbObO&O", - "utf-8", - &collection_name, - &collection_name_length, - &docs, &check_keys, &safe, - &last_error_args, - &continue_on_error, - convert_codec_options, &options, - &ctx)) { - return NULL; - } - if (continue_on_error) { - flags += 1; - } - /* - * If we are doing unacknowledged writes *and* continue_on_error - * is True it's pointless (and slower) to send GLE. - */ - send_safe = (safe || !continue_on_error); - max_bson_size_obj = PyObject_GetAttrString(ctx, "max_bson_size"); -#if PY_MAJOR_VERSION >= 3 - max_bson_size = PyLong_AsLong(max_bson_size_obj); -#else - max_bson_size = PyInt_AsLong(max_bson_size_obj); -#endif - Py_XDECREF(max_bson_size_obj); - if (max_bson_size == -1) { - destroy_codec_options(&options); - PyMem_Free(collection_name); - return NULL; - } - - max_message_size_obj = PyObject_GetAttrString(ctx, "max_message_size"); -#if PY_MAJOR_VERSION >= 3 - max_message_size = PyLong_AsLong(max_message_size_obj); -#else - max_message_size = PyInt_AsLong(max_message_size_obj); -#endif - Py_XDECREF(max_message_size_obj); - if (max_message_size == -1) { - destroy_codec_options(&options); - PyMem_Free(collection_name); - return NULL; - } - - compress_obj = PyObject_GetAttrString(ctx, "compress"); - compress = PyObject_IsTrue(compress_obj); - Py_XDECREF(compress_obj); - if (compress == -1) { - destroy_codec_options(&options); - PyMem_Free(collection_name); - return NULL; - } - - compress = compress && !(safe || send_safe); - - buffer = buffer_new(); - if (!buffer) { - destroy_codec_options(&options); - PyErr_NoMemory(); - PyMem_Free(collection_name); - return NULL; - } - - length_location = init_insert_buffer(buffer, - request_id, - flags, - collection_name, - collection_name_length, - compress); - if (length_location == -1) { - goto insertfail; - } - - if (!(to_publish = PyList_New(0))) { - goto insertfail; - } - - iterator = PyObject_GetIter(docs); - if (iterator == NULL) { - PyObject* InvalidOperation = _error("InvalidOperation"); - if (InvalidOperation) { - PyErr_SetString(InvalidOperation, "input is not iterable"); - Py_DECREF(InvalidOperation); - } - goto insertfail; - } - while ((doc = PyIter_Next(iterator)) != NULL) { - int before = buffer_get_position(buffer); - int cur_size; - if (!write_dict(state->_cbson, buffer, doc, check_keys, - &options, 1)) { - goto iterfail; - } - - cur_size = buffer_get_position(buffer) - before; - if (cur_size > max_bson_size) { - /* If we've encoded anything send it before raising. */ - if (!empty) { - buffer_update_position(buffer, before); - if (!compress) { - message_length = buffer_get_position(buffer) - length_location; - buffer_write_int32_at_position( - buffer, length_location, (int32_t)message_length); - } - result = _send_insert(self, ctx, last_error_args, buffer, - collection_name, collection_name_length, - request_id, send_safe, &options, - to_publish, compress); - if (!result) - goto iterfail; - Py_DECREF(result); - } - _set_document_too_large(cur_size, max_bson_size); - goto iterfail; - } - empty = 0; - - /* We have enough data, send this batch. */ - if (buffer_get_position(buffer) > max_message_size) { - int new_request_id = rand(); - int message_start; - buffer_t new_buffer = buffer_new(); - if (!new_buffer) { - PyErr_NoMemory(); - goto iterfail; - } - message_start = init_insert_buffer(new_buffer, - new_request_id, - flags, - collection_name, - collection_name_length, - compress); - if (message_start == -1) { - buffer_free(new_buffer); - goto iterfail; - } - - /* Copy the overflow encoded document into the new buffer. */ - if (!buffer_write_bytes(new_buffer, - (const char*)buffer_get_buffer(buffer) + before, cur_size)) { - buffer_free(new_buffer); - goto iterfail; - } - - /* Roll back to the beginning of this document. */ - buffer_update_position(buffer, before); - if (!compress) { - message_length = buffer_get_position(buffer) - length_location; - buffer_write_int32_at_position( - buffer, length_location, (int32_t)message_length); - } - - result = _send_insert(self, ctx, last_error_args, buffer, - collection_name, collection_name_length, - request_id, send_safe, &options, to_publish, - compress); - - buffer_free(buffer); - buffer = new_buffer; - request_id = new_request_id; - length_location = message_start; - - Py_DECREF(to_publish); - if (!(to_publish = PyList_New(0))) { - goto insertfail; - } - - if (!result) { - PyObject *etype = NULL, *evalue = NULL, *etrace = NULL; - PyObject* OperationFailure; - PyErr_Fetch(&etype, &evalue, &etrace); - OperationFailure = _error("OperationFailure"); - if (OperationFailure) { - if (PyErr_GivenExceptionMatches(etype, OperationFailure)) { - if (!safe || continue_on_error) { - Py_DECREF(OperationFailure); - if (!safe) { - /* We're doing unacknowledged writes and - * continue_on_error is False. Just return. */ - Py_DECREF(etype); - Py_XDECREF(evalue); - Py_XDECREF(etrace); - Py_DECREF(to_publish); - Py_DECREF(iterator); - Py_DECREF(doc); - buffer_free(buffer); - PyMem_Free(collection_name); - Py_RETURN_NONE; - } - /* continue_on_error is True, store the error - * details to re-raise after the final batch */ - Py_XDECREF(exc_type); - Py_XDECREF(exc_value); - Py_XDECREF(exc_trace); - exc_type = etype; - exc_value = evalue; - exc_trace = etrace; - if (PyList_Append(to_publish, doc) < 0) { - goto iterfail; - } - Py_CLEAR(doc); - continue; - } - } - Py_DECREF(OperationFailure); - } - /* This isn't OperationFailure, we couldn't - * import OperationFailure, or we are doing - * acknowledged writes. Re-raise immediately. */ - PyErr_Restore(etype, evalue, etrace); - goto iterfail; - } else { - Py_DECREF(result); - } - } - if (PyList_Append(to_publish, doc) < 0) { - goto iterfail; - } - Py_CLEAR(doc); - } - Py_DECREF(iterator); - - if (PyErr_Occurred()) { - goto insertfail; - } - - if (empty) { - PyObject* InvalidOperation = _error("InvalidOperation"); - if (InvalidOperation) { - PyErr_SetString(InvalidOperation, "cannot do an empty bulk insert"); - Py_DECREF(InvalidOperation); - } - goto insertfail; - } - - if (!compress) { - message_length = buffer_get_position(buffer) - length_location; - buffer_write_int32_at_position( - buffer, length_location, (int32_t)message_length); - } - - /* Send the last (or only) batch */ - result = _send_insert(self, ctx, last_error_args, buffer, - collection_name, collection_name_length, - request_id, safe, &options, to_publish, compress); - - Py_DECREF(to_publish); - PyMem_Free(collection_name); - buffer_free(buffer); - - if (!result) { - Py_XDECREF(exc_type); - Py_XDECREF(exc_value); - Py_XDECREF(exc_trace); - return NULL; - } else { - Py_DECREF(result); - } - - if (exc_type) { - /* Re-raise any previously stored exception - * due to continue_on_error being True */ - PyErr_Restore(exc_type, exc_value, exc_trace); - return NULL; - } - - Py_RETURN_NONE; - -iterfail: - Py_XDECREF(doc); - Py_DECREF(iterator); -insertfail: - Py_XDECREF(exc_type); - Py_XDECREF(exc_value); - Py_XDECREF(exc_trace); - Py_XDECREF(to_publish); - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; -} - #define _INSERT 0 #define _UPDATE 1 #define _DELETE 2 @@ -1110,7 +351,7 @@ static PyObject* _cbson_do_batched_insert(PyObject* self, PyObject* args) { static int _batched_op_msg( - unsigned char op, unsigned char check_keys, unsigned char ack, + unsigned char op, unsigned char ack, PyObject* command, PyObject* docs, PyObject* ctx, PyObject* to_publish, codec_options_t options, buffer_t buffer, struct module_state *state) { @@ -1129,34 +370,22 @@ _batched_op_msg( PyObject* iterator = NULL; char* flags = ack ? "\x00\x00\x00\x00" : "\x02\x00\x00\x00"; - max_bson_size_obj = PyObject_GetAttrString(ctx, "max_bson_size"); -#if PY_MAJOR_VERSION >= 3 + max_bson_size_obj = PyObject_GetAttr(ctx, state->_max_bson_size_str); max_bson_size = PyLong_AsLong(max_bson_size_obj); -#else - max_bson_size = PyInt_AsLong(max_bson_size_obj); -#endif Py_XDECREF(max_bson_size_obj); if (max_bson_size == -1) { return 0; } - max_write_batch_size_obj = PyObject_GetAttrString(ctx, "max_write_batch_size"); -#if PY_MAJOR_VERSION >= 3 + max_write_batch_size_obj = PyObject_GetAttr(ctx, state->_max_write_batch_size_str); max_write_batch_size = PyLong_AsLong(max_write_batch_size_obj); -#else - max_write_batch_size = PyInt_AsLong(max_write_batch_size_obj); -#endif Py_XDECREF(max_write_batch_size_obj); if (max_write_batch_size == -1) { return 0; } - max_message_size_obj = PyObject_GetAttrString(ctx, "max_message_size"); -#if PY_MAJOR_VERSION >= 3 + max_message_size_obj = PyObject_GetAttr(ctx, state->_max_message_size_str); max_message_size = PyLong_AsLong(max_message_size_obj); -#else - max_message_size = PyInt_AsLong(max_message_size_obj); -#endif Py_XDECREF(max_message_size_obj); if (max_message_size == -1) { return 0; @@ -1179,9 +408,8 @@ _batched_op_msg( return 0; } /* Save space for size */ - size_location = buffer_save_space(buffer, 4); + size_location = pymongo_buffer_save_space(buffer, 4); if (size_location == -1) { - PyErr_NoMemory(); return 0; } @@ -1194,16 +422,12 @@ _batched_op_msg( } case _UPDATE: { - /* MongoDB does key validation for update. */ - check_keys = 0; if (!buffer_write_bytes(buffer, "updates\x00", 8)) goto fail; break; } case _DELETE: { - /* Never check keys in a delete command. */ - check_keys = 0; if (!buffer_write_bytes(buffer, "deletes\x00", 8)) goto fail; break; @@ -1229,18 +453,17 @@ _batched_op_msg( return 0; } while ((doc = PyIter_Next(iterator)) != NULL) { - int cur_doc_begin = buffer_get_position(buffer); + int cur_doc_begin = pymongo_buffer_get_position(buffer); int cur_size; int doc_too_large = 0; int unacked_doc_too_large = 0; - if (!write_dict(state->_cbson, buffer, doc, check_keys, - &options, 1)) { + if (!write_dict(state->_cbson, buffer, doc, 0, &options, 1)) { goto fail; } - cur_size = buffer_get_position(buffer) - cur_doc_begin; + cur_size = pymongo_buffer_get_position(buffer) - cur_doc_begin; /* Does the first document exceed max_message_size? */ - doc_too_large = (idx == 0 && (buffer_get_position(buffer) > max_message_size)); + doc_too_large = (idx == 0 && (pymongo_buffer_get_position(buffer) > max_message_size)); /* When OP_MSG is used unacknowledged we have to check * document size client side or applications won't be notified. * Otherwise we let the server deal with documents that are too large @@ -1268,12 +491,12 @@ _batched_op_msg( goto fail; } /* We have enough data, return this batch. */ - if (buffer_get_position(buffer) > max_message_size) { + if (pymongo_buffer_get_position(buffer) > max_message_size) { /* * Roll the existing buffer back to the beginning * of the last document encoded. */ - buffer_update_position(buffer, cur_doc_begin); + pymongo_buffer_update_position(buffer, cur_doc_begin); Py_CLEAR(doc); break; } @@ -1293,7 +516,7 @@ _batched_op_msg( goto fail; } - position = buffer_get_position(buffer); + position = pymongo_buffer_get_position(buffer); length = position - size_location; buffer_write_int32_at_position(buffer, size_location, (int32_t)length); return 1; @@ -1307,25 +530,24 @@ _batched_op_msg( static PyObject* _cbson_encode_batched_op_msg(PyObject* self, PyObject* args) { unsigned char op; - unsigned char check_keys; unsigned char ack; PyObject* command; PyObject* docs; PyObject* ctx = NULL; PyObject* to_publish = NULL; PyObject* result = NULL; + PyObject* options_obj; codec_options_t options; buffer_t buffer; struct module_state *state = GETSTATE(self); - if (!PyArg_ParseTuple(args, "bOObbO&O", - &op, &command, &docs, &check_keys, &ack, - convert_codec_options, &options, - &ctx)) { + if (!(PyArg_ParseTuple(args, "bOObOO", + &op, &command, &docs, &ack, + &options_obj, &ctx) && + convert_codec_options(state->_cbson, options_obj, &options))) { return NULL; } - if (!(buffer = buffer_new())) { - PyErr_NoMemory(); + if (!(buffer = pymongo_buffer_new())) { destroy_codec_options(&options); return NULL; } @@ -1335,7 +557,6 @@ _cbson_encode_batched_op_msg(PyObject* self, PyObject* args) { if (!_batched_op_msg( op, - check_keys, ack, command, docs, @@ -1347,13 +568,13 @@ _cbson_encode_batched_op_msg(PyObject* self, PyObject* args) { goto fail; } - result = Py_BuildValue(BYTES_FORMAT_STRING "O", - buffer_get_buffer(buffer), - (Py_ssize_t)buffer_get_position(buffer), + result = Py_BuildValue("y#O", + pymongo_buffer_get_buffer(buffer), + (Py_ssize_t)pymongo_buffer_get_position(buffer), to_publish); fail: destroy_codec_options(&options); - buffer_free(buffer); + pymongo_buffer_free(buffer); Py_XDECREF(to_publish); return result; } @@ -1361,7 +582,6 @@ _cbson_encode_batched_op_msg(PyObject* self, PyObject* args) { static PyObject* _cbson_batched_op_msg(PyObject* self, PyObject* args) { unsigned char op; - unsigned char check_keys; unsigned char ack; int request_id; int position; @@ -1370,24 +590,23 @@ _cbson_batched_op_msg(PyObject* self, PyObject* args) { PyObject* ctx = NULL; PyObject* to_publish = NULL; PyObject* result = NULL; + PyObject* options_obj; codec_options_t options; buffer_t buffer; struct module_state *state = GETSTATE(self); - if (!PyArg_ParseTuple(args, "bOObbO&O", - &op, &command, &docs, &check_keys, &ack, - convert_codec_options, &options, - &ctx)) { + if (!(PyArg_ParseTuple(args, "bOObOO", + &op, &command, &docs, &ack, + &options_obj, &ctx) && + convert_codec_options(state->_cbson, options_obj, &options))) { return NULL; } - if (!(buffer = buffer_new())) { - PyErr_NoMemory(); + if (!(buffer = pymongo_buffer_new())) { destroy_codec_options(&options); return NULL; } /* Save space for message length and request id */ - if ((buffer_save_space(buffer, 8)) == -1) { - PyErr_NoMemory(); + if ((pymongo_buffer_save_space(buffer, 8)) == -1) { goto fail; } if (!buffer_write_bytes(buffer, @@ -1402,7 +621,6 @@ _cbson_batched_op_msg(PyObject* self, PyObject* args) { if (!_batched_op_msg( op, - check_keys, ack, command, docs, @@ -1415,16 +633,16 @@ _cbson_batched_op_msg(PyObject* self, PyObject* args) { } request_id = rand(); - position = buffer_get_position(buffer); + position = pymongo_buffer_get_position(buffer); buffer_write_int32_at_position(buffer, 0, (int32_t)position); buffer_write_int32_at_position(buffer, 4, (int32_t)request_id); - result = Py_BuildValue("i" BYTES_FORMAT_STRING "O", request_id, - buffer_get_buffer(buffer), - (Py_ssize_t)buffer_get_position(buffer), + result = Py_BuildValue("iy#O", request_id, + pymongo_buffer_get_buffer(buffer), + (Py_ssize_t)pymongo_buffer_get_position(buffer), to_publish); fail: destroy_codec_options(&options); - buffer_free(buffer); + pymongo_buffer_free(buffer); Py_XDECREF(to_publish); return result; } @@ -1433,7 +651,7 @@ _cbson_batched_op_msg(PyObject* self, PyObject* args) { static int _batched_write_command( - char* ns, Py_ssize_t ns_len, unsigned char op, int check_keys, + char* ns, Py_ssize_t ns_len, unsigned char op, PyObject* command, PyObject* docs, PyObject* ctx, PyObject* to_publish, codec_options_t options, buffer_t buffer, struct module_state *state) { @@ -1453,12 +671,8 @@ _batched_write_command( PyObject* doc = NULL; PyObject* iterator = NULL; - max_bson_size_obj = PyObject_GetAttrString(ctx, "max_bson_size"); -#if PY_MAJOR_VERSION >= 3 + max_bson_size_obj = PyObject_GetAttr(ctx, state->_max_bson_size_str); max_bson_size = PyLong_AsLong(max_bson_size_obj); -#else - max_bson_size = PyInt_AsLong(max_bson_size_obj); -#endif Py_XDECREF(max_bson_size_obj); if (max_bson_size == -1) { return 0; @@ -1469,12 +683,8 @@ _batched_write_command( */ max_cmd_size = max_bson_size + 16382; - max_write_batch_size_obj = PyObject_GetAttrString(ctx, "max_write_batch_size"); -#if PY_MAJOR_VERSION >= 3 + max_write_batch_size_obj = PyObject_GetAttr(ctx, state->_max_write_batch_size_str); max_write_batch_size = PyLong_AsLong(max_write_batch_size_obj); -#else - max_write_batch_size = PyInt_AsLong(max_write_batch_size_obj); -#endif Py_XDECREF(max_write_batch_size_obj); if (max_write_batch_size == -1) { return 0; @@ -1483,12 +693,8 @@ _batched_write_command( // max_split_size is the size at which to perform a batch split. // Normally this this value is equal to max_bson_size (16MiB). However, // when auto encryption is enabled max_split_size is reduced to 2MiB. - max_split_size_obj = PyObject_GetAttrString(ctx, "max_split_size"); -#if PY_MAJOR_VERSION >= 3 + max_split_size_obj = PyObject_GetAttr(ctx, state->_max_split_size_str); max_split_size = PyLong_AsLong(max_split_size_obj); -#else - max_split_size = PyInt_AsLong(max_split_size_obj); -#endif Py_XDECREF(max_split_size_obj); if (max_split_size == -1) { return 0; @@ -1506,14 +712,14 @@ _batched_write_command( } /* Position of command document length */ - cmd_len_loc = buffer_get_position(buffer); + cmd_len_loc = pymongo_buffer_get_position(buffer); if (!write_dict(state->_cbson, buffer, command, 0, &options, 0)) { return 0; } /* Write type byte for array */ - *(buffer_get_buffer(buffer) + (buffer_get_position(buffer) - 1)) = 0x4; + *(pymongo_buffer_get_buffer(buffer) + (pymongo_buffer_get_position(buffer) - 1)) = 0x4; switch (op) { case _INSERT: @@ -1524,16 +730,12 @@ _batched_write_command( } case _UPDATE: { - /* MongoDB does key validation for update. */ - check_keys = 0; if (!buffer_write_bytes(buffer, "updates\x00", 8)) goto fail; break; } case _DELETE: { - /* Never check keys in a delete command. */ - check_keys = 0; if (!buffer_write_bytes(buffer, "deletes\x00", 8)) goto fail; break; @@ -1550,9 +752,8 @@ _batched_write_command( } /* Save space for list document */ - lst_len_loc = buffer_save_space(buffer, 4); + lst_len_loc = pymongo_buffer_save_space(buffer, 4); if (lst_len_loc == -1) { - PyErr_NoMemory(); return 0; } @@ -1566,26 +767,28 @@ _batched_write_command( return 0; } while ((doc = PyIter_Next(iterator)) != NULL) { - int sub_doc_begin = buffer_get_position(buffer); + int sub_doc_begin = pymongo_buffer_get_position(buffer); int cur_doc_begin; int cur_size; int enough_data = 0; - char key[16]; - INT2STRING(key, idx); + char key[BUF_SIZE]; + int res = LL2STR(key, (long long)idx); + if (res == -1) { + return 0; + } if (!buffer_write_bytes(buffer, "\x03", 1) || !buffer_write_bytes(buffer, key, (int)strlen(key) + 1)) { goto fail; } - cur_doc_begin = buffer_get_position(buffer); - if (!write_dict(state->_cbson, buffer, doc, - check_keys, &options, 1)) { + cur_doc_begin = pymongo_buffer_get_position(buffer); + if (!write_dict(state->_cbson, buffer, doc, 0, &options, 1)) { goto fail; } /* We have enough data, return this batch. * max_cmd_size accounts for the two trailing null bytes. */ - cur_size = buffer_get_position(buffer) - cur_doc_begin; + cur_size = pymongo_buffer_get_position(buffer) - cur_doc_begin; /* This single document is too large for the command. */ if (cur_size > max_cmd_size) { if (op == _INSERT) { @@ -1607,13 +810,13 @@ _batched_write_command( goto fail; } enough_data = (idx >= 1 && - (buffer_get_position(buffer) > max_split_size)); + (pymongo_buffer_get_position(buffer) > max_split_size)); if (enough_data) { /* * Roll the existing buffer back to the beginning * of the last document encoded. */ - buffer_update_position(buffer, sub_doc_begin); + pymongo_buffer_update_position(buffer, sub_doc_begin); Py_CLEAR(doc); break; } @@ -1637,7 +840,7 @@ _batched_write_command( goto fail; } - position = buffer_get_position(buffer); + position = pymongo_buffer_get_position(buffer); length = position - lst_len_loc - 1; buffer_write_int32_at_position(buffer, lst_len_loc, (int32_t)length); length = position - cmd_len_loc; @@ -1654,25 +857,24 @@ static PyObject* _cbson_encode_batched_write_command(PyObject* self, PyObject* args) { char *ns = NULL; unsigned char op; - unsigned char check_keys; Py_ssize_t ns_len; PyObject* command; PyObject* docs; PyObject* ctx = NULL; PyObject* to_publish = NULL; PyObject* result = NULL; + PyObject* options_obj; codec_options_t options; buffer_t buffer; struct module_state *state = GETSTATE(self); - if (!PyArg_ParseTuple(args, "et#bOObO&O", "utf-8", - &ns, &ns_len, &op, &command, &docs, &check_keys, - convert_codec_options, &options, - &ctx)) { + if (!(PyArg_ParseTuple(args, "et#bOOOO", "utf-8", + &ns, &ns_len, &op, &command, &docs, + &options_obj, &ctx) && + convert_codec_options(state->_cbson, options_obj, &options))) { return NULL; } - if (!(buffer = buffer_new())) { - PyErr_NoMemory(); + if (!(buffer = pymongo_buffer_new())) { PyMem_Free(ns); destroy_codec_options(&options); return NULL; @@ -1685,7 +887,6 @@ _cbson_encode_batched_write_command(PyObject* self, PyObject* args) { ns, ns_len, op, - check_keys, command, docs, ctx, @@ -1696,108 +897,25 @@ _cbson_encode_batched_write_command(PyObject* self, PyObject* args) { goto fail; } - result = Py_BuildValue(BYTES_FORMAT_STRING "O", - buffer_get_buffer(buffer), - (Py_ssize_t)buffer_get_position(buffer), + result = Py_BuildValue("y#O", + pymongo_buffer_get_buffer(buffer), + (Py_ssize_t)pymongo_buffer_get_position(buffer), to_publish); fail: PyMem_Free(ns); destroy_codec_options(&options); - buffer_free(buffer); - Py_XDECREF(to_publish); - return result; -} - -static PyObject* -_cbson_batched_write_command(PyObject* self, PyObject* args) { - char *ns = NULL; - unsigned char op; - unsigned char check_keys; - Py_ssize_t ns_len; - int request_id; - int position; - PyObject* command; - PyObject* docs; - PyObject* ctx = NULL; - PyObject* to_publish = NULL; - PyObject* result = NULL; - codec_options_t options; - buffer_t buffer; - struct module_state *state = GETSTATE(self); - - if (!PyArg_ParseTuple(args, "et#bOObO&O", "utf-8", - &ns, &ns_len, &op, &command, &docs, &check_keys, - convert_codec_options, &options, - &ctx)) { - return NULL; - } - if (!(buffer = buffer_new())) { - PyErr_NoMemory(); - PyMem_Free(ns); - destroy_codec_options(&options); - return NULL; - } - /* Save space for message length and request id */ - if ((buffer_save_space(buffer, 8)) == -1) { - PyErr_NoMemory(); - goto fail; - } - if (!buffer_write_bytes(buffer, - "\x00\x00\x00\x00" /* responseTo */ - "\xd4\x07\x00\x00", /* opcode */ - 8)) { - goto fail; - } - if (!(to_publish = PyList_New(0))) { - goto fail; - } - - if (!_batched_write_command( - ns, - ns_len, - op, - check_keys, - command, - docs, - ctx, - to_publish, - options, - buffer, - state)) { - goto fail; - } - - request_id = rand(); - position = buffer_get_position(buffer); - buffer_write_int32_at_position(buffer, 0, (int32_t)position); - buffer_write_int32_at_position(buffer, 4, (int32_t)request_id); - result = Py_BuildValue("i" BYTES_FORMAT_STRING "O", request_id, - buffer_get_buffer(buffer), - (Py_ssize_t)buffer_get_position(buffer), - to_publish); -fail: - PyMem_Free(ns); - destroy_codec_options(&options); - buffer_free(buffer); + pymongo_buffer_free(buffer); Py_XDECREF(to_publish); return result; } static PyMethodDef _CMessageMethods[] = { - {"_insert_message", _cbson_insert_message, METH_VARARGS, - "Create an insert message to be sent to MongoDB"}, - {"_update_message", _cbson_update_message, METH_VARARGS, - "create an update message to be sent to MongoDB"}, {"_query_message", _cbson_query_message, METH_VARARGS, "create a query message to be sent to MongoDB"}, {"_get_more_message", _cbson_get_more_message, METH_VARARGS, "create a get more message to be sent to MongoDB"}, {"_op_msg", _cbson_op_msg, METH_VARARGS, "create an OP_MSG message to be sent to MongoDB"}, - {"_do_batched_insert", _cbson_do_batched_insert, METH_VARARGS, - "insert a batch of documents, splitting the batch as needed"}, - {"_batched_write_command", _cbson_batched_write_command, METH_VARARGS, - "Create the next batched insert, update, or delete command"}, {"_encode_batched_write_command", _cbson_encode_batched_write_command, METH_VARARGS, "Encode the next batched insert, update, or delete command"}, {"_batched_op_msg", _cbson_batched_op_msg, METH_VARARGS, @@ -1807,15 +925,22 @@ static PyMethodDef _CMessageMethods[] = { {NULL, NULL, 0, NULL} }; -#if PY_MAJOR_VERSION >= 3 #define INITERROR return NULL static int _cmessage_traverse(PyObject *m, visitproc visit, void *arg) { Py_VISIT(GETSTATE(m)->_cbson); + Py_VISIT(GETSTATE(m)->_max_bson_size_str); + Py_VISIT(GETSTATE(m)->_max_message_size_str); + Py_VISIT(GETSTATE(m)->_max_split_size_str); + Py_VISIT(GETSTATE(m)->_max_write_batch_size_str); return 0; } static int _cmessage_clear(PyObject *m) { Py_CLEAR(GETSTATE(m)->_cbson); + Py_CLEAR(GETSTATE(m)->_max_bson_size_str); + Py_CLEAR(GETSTATE(m)->_max_message_size_str); + Py_CLEAR(GETSTATE(m)->_max_split_size_str); + Py_CLEAR(GETSTATE(m)->_max_write_batch_size_str); return 0; } @@ -1833,15 +958,11 @@ static struct PyModuleDef moduledef = { PyMODINIT_FUNC PyInit__cmessage(void) -#else -#define INITERROR return -PyMODINIT_FUNC -init_cmessage(void) -#endif { PyObject *_cbson = NULL; PyObject *c_api_object = NULL; PyObject *m = NULL; + struct module_state* state = NULL; /* Store a reference to the _cbson module since it's needed to call some * of its functions @@ -1858,40 +979,32 @@ init_cmessage(void) if (c_api_object == NULL) { goto fail; } -#if PY_VERSION_HEX >= 0x03010000 _cbson_API = (void **)PyCapsule_GetPointer(c_api_object, "_cbson._C_API"); -#else - _cbson_API = (void **)PyCObject_AsVoidPtr(c_api_object); -#endif if (_cbson_API == NULL) { goto fail; } -#if PY_MAJOR_VERSION >= 3 /* Returns a new reference. */ m = PyModule_Create(&moduledef); -#else - /* Returns a borrowed reference. */ - m = Py_InitModule("_cmessage", _CMessageMethods); -#endif if (m == NULL) { goto fail; } - GETSTATE(m)->_cbson = _cbson; + state = GETSTATE(m); + state->_cbson = _cbson; + if (!((state->_max_bson_size_str = PyUnicode_FromString("max_bson_size")) && + (state->_max_message_size_str = PyUnicode_FromString("max_message_size")) && + (state->_max_write_batch_size_str = PyUnicode_FromString("max_write_batch_size")) && + (state->_max_split_size_str = PyUnicode_FromString("max_split_size")))) { + goto fail; + } Py_DECREF(c_api_object); -#if PY_MAJOR_VERSION >= 3 return m; -#else - return; -#endif fail: -#if PY_MAJOR_VERSION >= 3 Py_XDECREF(m); -#endif Py_XDECREF(c_api_object); Py_XDECREF(_cbson); INITERROR; diff --git a/pymongo/_csot.py b/pymongo/_csot.py new file mode 100644 index 0000000000..6fad86f9e0 --- /dev/null +++ b/pymongo/_csot.py @@ -0,0 +1,152 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Internal helpers for CSOT.""" + +from __future__ import annotations + +import functools +import time +from collections import deque +from contextlib import AbstractContextManager +from contextvars import ContextVar, Token +from typing import Any, Callable, Deque, MutableMapping, Optional, TypeVar, cast + +from pymongo.write_concern import WriteConcern + +TIMEOUT: ContextVar[Optional[float]] = ContextVar("TIMEOUT", default=None) +RTT: ContextVar[float] = ContextVar("RTT", default=0.0) +DEADLINE: ContextVar[float] = ContextVar("DEADLINE", default=float("inf")) + + +def get_timeout() -> Optional[float]: + return TIMEOUT.get(None) + + +def get_rtt() -> float: + return RTT.get() + + +def get_deadline() -> float: + return DEADLINE.get() + + +def set_rtt(rtt: float) -> None: + RTT.set(rtt) + + +def remaining() -> Optional[float]: + if not get_timeout(): + return None + return DEADLINE.get() - time.monotonic() + + +def clamp_remaining(max_timeout: float) -> float: + """Return the remaining timeout clamped to a max value.""" + timeout = remaining() + if timeout is None: + return max_timeout + return min(timeout, max_timeout) + + +class _TimeoutContext(AbstractContextManager): + """Internal timeout context manager. + + Use :func:`pymongo.timeout` instead:: + + with pymongo.timeout(0.5): + client.test.test.insert_one({}) + """ + + def __init__(self, timeout: Optional[float]): + self._timeout = timeout + self._tokens: Optional[tuple[Token[Optional[float]], Token[float], Token[float]]] = None + + def __enter__(self) -> _TimeoutContext: + timeout_token = TIMEOUT.set(self._timeout) + prev_deadline = DEADLINE.get() + next_deadline = time.monotonic() + self._timeout if self._timeout else float("inf") + deadline_token = DEADLINE.set(min(prev_deadline, next_deadline)) + rtt_token = RTT.set(0.0) + self._tokens = (timeout_token, deadline_token, rtt_token) + return self + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + if self._tokens: + timeout_token, deadline_token, rtt_token = self._tokens + TIMEOUT.reset(timeout_token) + DEADLINE.reset(deadline_token) + RTT.reset(rtt_token) + + +# See https://mypy.readthedocs.io/en/stable/generics.html?#decorator-factories +F = TypeVar("F", bound=Callable[..., Any]) + + +def apply(func: F) -> F: + """Apply the client's timeoutMS to this operation.""" + + @functools.wraps(func) + def csot_wrapper(self: Any, *args: Any, **kwargs: Any) -> Any: + if get_timeout() is None: + timeout = self._timeout + if timeout is not None: + with _TimeoutContext(timeout): + return func(self, *args, **kwargs) + return func(self, *args, **kwargs) + + return cast(F, csot_wrapper) + + +def apply_write_concern( + cmd: MutableMapping[str, Any], write_concern: Optional[WriteConcern] +) -> None: + """Apply the given write concern to a command.""" + if not write_concern or write_concern.is_server_default: + return + wc = write_concern.document + if get_timeout() is not None: + wc.pop("wtimeout", None) + if wc: + cmd["writeConcern"] = wc + + +_MAX_RTT_SAMPLES: int = 10 +_MIN_RTT_SAMPLES: int = 2 + + +class MovingMinimum: + """Tracks a minimum RTT within the last 10 RTT samples.""" + + samples: Deque[float] + + def __init__(self) -> None: + self.samples = deque(maxlen=_MAX_RTT_SAMPLES) + + def add_sample(self, sample: float) -> None: + if sample < 0: + # Likely system time change while waiting for hello response + # and not using time.monotonic. Ignore it, the next one will + # probably be valid. + return + self.samples.append(sample) + + def get(self) -> float: + """Get the min, or 0.0 if there aren't enough samples yet.""" + if len(self.samples) >= _MIN_RTT_SAMPLES: + return min(self.samples) + return 0.0 + + def reset(self) -> None: + self.samples.clear() diff --git a/pymongo/_version.py b/pymongo/_version.py new file mode 100644 index 0000000000..0ce5d76ff0 --- /dev/null +++ b/pymongo/_version.py @@ -0,0 +1,30 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Current version of PyMongo.""" +from __future__ import annotations + +from typing import Tuple, Union + +version_tuple: Tuple[Union[int, str], ...] = (4, 6, 3) + + +def get_version_string() -> str: + if isinstance(version_tuple[-1], str): + return ".".join(map(str, version_tuple[:-1])) + version_tuple[-1] + return ".".join(map(str, version_tuple)) + + +__version__: str = get_version_string() +version = __version__ diff --git a/pymongo/aggregation.py b/pymongo/aggregation.py index 7383a35111..ef6af1092e 100644 --- a/pymongo/aggregation.py +++ b/pymongo/aggregation.py @@ -13,43 +13,75 @@ # permissions and limitations under the License. """Perform aggregation operations on a collection or database.""" +from __future__ import annotations -from bson.son import SON +from collections.abc import Callable, Mapping, MutableMapping +from typing import TYPE_CHECKING, Any, Optional, Union +from bson.son import SON from pymongo import common from pymongo.collation import validate_collation_or_none from pymongo.errors import ConfigurationError -from pymongo.read_preferences import ReadPreference +from pymongo.read_preferences import ReadPreference, _AggWritePref +if TYPE_CHECKING: + from pymongo.client_session import ClientSession + from pymongo.collection import Collection + from pymongo.command_cursor import CommandCursor + from pymongo.database import Database + from pymongo.pool import Connection + from pymongo.read_preferences import _ServerMode + from pymongo.server import Server + from pymongo.typings import _DocumentType, _Pipeline -class _AggregationCommand(object): + +class _AggregationCommand: """The internal abstract base class for aggregation cursors. Should not be called directly by application developers. Use :meth:`pymongo.collection.Collection.aggregate`, or :meth:`pymongo.database.Database.aggregate` instead. """ - def __init__(self, target, cursor_class, pipeline, options, - explicit_session, user_fields=None, result_processor=None): + + def __init__( + self, + target: Union[Database, Collection], + cursor_class: type[CommandCursor], + pipeline: _Pipeline, + options: MutableMapping[str, Any], + explicit_session: bool, + let: Optional[Mapping[str, Any]] = None, + user_fields: Optional[MutableMapping[str, Any]] = None, + result_processor: Optional[Callable[[Mapping[str, Any], Connection], None]] = None, + comment: Any = None, + ) -> None: if "explain" in options: - raise ConfigurationError("The explain option is not supported. " - "Use Database.command instead.") + raise ConfigurationError( + "The explain option is not supported. Use Database.command instead." + ) self._target = target - common.validate_list('pipeline', pipeline) + pipeline = common.validate_list("pipeline", pipeline) self._pipeline = pipeline self._performs_write = False if pipeline and ("$out" in pipeline[-1] or "$merge" in pipeline[-1]): self._performs_write = True - common.validate_is_mapping('options', options) + common.validate_is_mapping("options", options) + if let is not None: + common.validate_is_mapping("let", let) + options["let"] = let + if comment is not None: + options["comment"] = comment + self._options = options # This is the batchSize that will be used for setting the initial # batchSize for the cursor, as well as the subsequent getMores. self._batch_size = common.validate_non_negative_integer_or_none( - "batchSize", self._options.pop("batchSize", None)) + "batchSize", self._options.pop("batchSize", None) + ) # If the cursor option is already specified, avoid overriding it. self._options.setdefault("cursor", {}) @@ -63,63 +95,59 @@ def __init__(self, target, cursor_class, pipeline, options, self._user_fields = user_fields self._result_processor = result_processor - self._collation = validate_collation_or_none( - options.pop('collation', None)) + self._collation = validate_collation_or_none(options.pop("collation", None)) - self._max_await_time_ms = options.pop('maxAwaitTimeMS', None) + self._max_await_time_ms = options.pop("maxAwaitTimeMS", None) + self._write_preference: Optional[_AggWritePref] = None @property - def _aggregation_target(self): + def _aggregation_target(self) -> Union[str, int]: """The argument to pass to the aggregate command.""" raise NotImplementedError @property - def _cursor_namespace(self): + def _cursor_namespace(self) -> str: """The namespace in which the aggregate command is run.""" raise NotImplementedError - @property - def _cursor_collection(self, cursor_doc): + def _cursor_collection(self, cursor_doc: Mapping[str, Any]) -> Collection: """The Collection used for the aggregate command cursor.""" raise NotImplementedError @property - def _database(self): + def _database(self) -> Database: """The database against which the aggregation command is run.""" raise NotImplementedError - @staticmethod - def _check_compat(sock_info): - """Check whether the server version in-use supports aggregation.""" - pass - - def _process_result(self, result, session, server, sock_info, slave_ok): - if self._result_processor: - self._result_processor( - result, session, server, sock_info, slave_ok) - - def get_read_preference(self, session): - if self._performs_write: - return ReadPreference.PRIMARY - return self._target._read_preference_for(session) - - def get_cursor(self, session, server, sock_info, slave_ok): - # Ensure command compatibility. - self._check_compat(sock_info) - + def get_read_preference( + self, session: Optional[ClientSession] + ) -> Union[_AggWritePref, _ServerMode]: + + if self._write_preference: + return self._write_preference + pref = self._target._read_preference_for(session) + if self._performs_write and pref != ReadPreference.PRIMARY: + self._write_preference = pref = _AggWritePref(pref) # type: ignore[assignment] + return pref + + def get_cursor( + self, + session: Optional[ClientSession], + server: Server, + conn: Connection, + read_preference: _ServerMode, + ) -> CommandCursor[_DocumentType]: # Serialize command. - cmd = SON([("aggregate", self._aggregation_target), - ("pipeline", self._pipeline)]) + cmd = SON([("aggregate", self._aggregation_target), ("pipeline", self._pipeline)]) cmd.update(self._options) # Apply this target's read concern if: # readConcern has not been specified as a kwarg and either # - server version is >= 4.2 or # - server version is >= 3.2 and pipeline doesn't use $out - if (('readConcern' not in cmd) and - ((sock_info.max_wire_version >= 4 and - not self._performs_write) or - (sock_info.max_wire_version >= 8))): + if ("readConcern" not in cmd) and ( + not self._performs_write or (conn.max_wire_version >= 8) + ): read_concern = self._target.read_concern else: read_concern = None @@ -127,17 +155,16 @@ def get_cursor(self, session, server, sock_info, slave_ok): # Apply this target's write concern if: # writeConcern has not been specified as a kwarg and pipeline doesn't # perform a write operation - if 'writeConcern' not in cmd and self._performs_write: + if "writeConcern" not in cmd and self._performs_write: write_concern = self._target._write_concern_for(session) else: write_concern = None # Run command. - result = sock_info.command( + result = conn.command( self._database.name, cmd, - slave_ok, - self.get_read_preference(session), + read_preference, self._target.codec_options, parse_write_concern_error=True, read_concern=read_concern, @@ -145,15 +172,17 @@ def get_cursor(self, session, server, sock_info, slave_ok): collation=self._collation, session=session, client=self._database.client, - user_fields=self._user_fields) + user_fields=self._user_fields, + ) - self._process_result(result, session, server, sock_info, slave_ok) + if self._result_processor: + self._result_processor(result, conn) # Extract cursor from result or mock/fake one if necessary. - if 'cursor' in result: - cursor = result['cursor'] + if "cursor" in result: + cursor = result["cursor"] else: - # Pre-MongoDB 2.6 or unacknowledged write. Fake a cursor. + # Unacknowledged $out/$merge write. Fake a cursor. cursor = { "id": 0, "firstBatch": result.get("result", []), @@ -161,75 +190,68 @@ def get_cursor(self, session, server, sock_info, slave_ok): } # Create and return cursor instance. - return self._cursor_class( - self._cursor_collection(cursor), cursor, sock_info.address, + cmd_cursor = self._cursor_class( + self._cursor_collection(cursor), + cursor, + conn.address, batch_size=self._batch_size or 0, max_await_time_ms=self._max_await_time_ms, - session=session, explicit_session=self._explicit_session) + session=session, + explicit_session=self._explicit_session, + comment=self._options.get("comment"), + ) + cmd_cursor._maybe_pin_connection(conn) + return cmd_cursor class _CollectionAggregationCommand(_AggregationCommand): - def __init__(self, *args, **kwargs): - # Pop additional option and initialize parent class. - use_cursor = kwargs.pop("use_cursor", True) - super(_CollectionAggregationCommand, self).__init__(*args, **kwargs) - - # Remove the cursor document if the user has set use_cursor to False. - self._use_cursor = use_cursor - if not self._use_cursor: - self._options.pop("cursor", None) + _target: Collection @property - def _aggregation_target(self): + def _aggregation_target(self) -> str: return self._target.name @property - def _cursor_namespace(self): + def _cursor_namespace(self) -> str: return self._target.full_name - def _cursor_collection(self, cursor): + def _cursor_collection(self, cursor: Mapping[str, Any]) -> Collection: """The Collection used for the aggregate command cursor.""" return self._target @property - def _database(self): + def _database(self) -> Database: return self._target.database class _CollectionRawAggregationCommand(_CollectionAggregationCommand): - def __init__(self, *args, **kwargs): - super(_CollectionRawAggregationCommand, self).__init__(*args, **kwargs) + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) # For raw-batches, we set the initial batchSize for the cursor to 0. - if self._use_cursor and not self._performs_write: + if not self._performs_write: self._options["cursor"]["batchSize"] = 0 class _DatabaseAggregationCommand(_AggregationCommand): + _target: Database + @property - def _aggregation_target(self): + def _aggregation_target(self) -> int: return 1 @property - def _cursor_namespace(self): - return "%s.$cmd.aggregate" % (self._target.name,) + def _cursor_namespace(self) -> str: + return f"{self._target.name}.$cmd.aggregate" @property - def _database(self): + def _database(self) -> Database: return self._target - def _cursor_collection(self, cursor): + def _cursor_collection(self, cursor: Mapping[str, Any]) -> Collection: """The Collection used for the aggregate command cursor.""" # Collection level aggregate may not always return the "ns" field # according to our MockupDB tests. Let's handle that case for db level # aggregate too by defaulting to the .$cmd.aggregate namespace. _, collname = cursor.get("ns", self._cursor_namespace).split(".", 1) return self._database[collname] - - @staticmethod - def _check_compat(sock_info): - # Older server version don't raise a descriptive error, so we raise - # one instead. - if not sock_info.max_wire_version >= 6: - err_msg = "Database.aggregate() is only supported on MongoDB 3.6+." - raise ConfigurationError(err_msg) diff --git a/pymongo/auth.py b/pymongo/auth.py index fef4386f19..58fc36d051 100644 --- a/pymongo/auth.py +++ b/pymongo/auth.py @@ -13,23 +13,45 @@ # limitations under the License. """Authentication helpers.""" +from __future__ import annotations import functools import hashlib import hmac import os import socket +import typing +from base64 import standard_b64decode, standard_b64encode +from collections import namedtuple +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Mapping, + MutableMapping, + Optional, + cast, +) +from urllib.parse import quote -try: - from urllib import quote -except ImportError: - from urllib.parse import quote +from bson.binary import Binary +from bson.son import SON +from pymongo.auth_aws import _authenticate_aws +from pymongo.auth_oidc import _authenticate_oidc, _get_authenticator, _OIDCProperties +from pymongo.errors import ConfigurationError, OperationFailure +from pymongo.saslprep import saslprep + +if TYPE_CHECKING: + from pymongo.hello import Hello + from pymongo.pool import Connection HAVE_KERBEROS = True _USE_PRINCIPAL = False try: import winkerberos as kerberos - if tuple(map(int, kerberos.__version__.split('.')[:2])) >= (0, 5): + + if tuple(map(int, kerberos.__version__.split(".")[:2])) >= (0, 5): _USE_PRINCIPAL = True except ImportError: try: @@ -37,208 +59,180 @@ except ImportError: HAVE_KERBEROS = False -from base64 import standard_b64decode, standard_b64encode -from collections import namedtuple - -from bson.binary import Binary -from bson.py3compat import string_type, _unicode, PY3 -from bson.son import SON -from pymongo.errors import ConfigurationError, OperationFailure -from pymongo.saslprep import saslprep - MECHANISMS = frozenset( - ['GSSAPI', - 'MONGODB-CR', - 'MONGODB-X509', - 'PLAIN', - 'SCRAM-SHA-1', - 'SCRAM-SHA-256', - 'DEFAULT']) + [ + "GSSAPI", + "MONGODB-CR", + "MONGODB-OIDC", + "MONGODB-X509", + "MONGODB-AWS", + "PLAIN", + "SCRAM-SHA-1", + "SCRAM-SHA-256", + "DEFAULT", + ] +) """The authentication mechanisms supported by PyMongo.""" -class _Cache(object): +class _Cache: __slots__ = ("data",) - _hash_val = hash('_Cache') + _hash_val = hash("_Cache") - def __init__(self): + def __init__(self) -> None: self.data = None - def __eq__(self, other): + def __eq__(self, other: object) -> bool: # Two instances must always compare equal. if isinstance(other, _Cache): return True return NotImplemented - def __ne__(self, other): + def __ne__(self, other: object) -> bool: if isinstance(other, _Cache): return False return NotImplemented - def __hash__(self): + def __hash__(self) -> int: return self._hash_val - MongoCredential = namedtuple( - 'MongoCredential', - ['mechanism', - 'source', - 'username', - 'password', - 'mechanism_properties', - 'cache']) + "MongoCredential", + ["mechanism", "source", "username", "password", "mechanism_properties", "cache"], +) """A hashable namedtuple of values used for authentication.""" -GSSAPIProperties = namedtuple('GSSAPIProperties', - ['service_name', - 'canonicalize_host_name', - 'service_realm']) +GSSAPIProperties = namedtuple( + "GSSAPIProperties", ["service_name", "canonicalize_host_name", "service_realm"] +) """Mechanism properties for GSSAPI authentication.""" -def _build_credentials_tuple(mech, source, user, passwd, extra, database): - """Build and return a mechanism specific credentials tuple. - """ - if mech != 'MONGODB-X509' and user is None: - raise ConfigurationError("%s requires a username." % (mech,)) - if mech == 'GSSAPI': - if source is not None and source != '$external': - raise ValueError( - "authentication source must be $external or None for GSSAPI") - properties = extra.get('authmechanismproperties', {}) - service_name = properties.get('SERVICE_NAME', 'mongodb') - canonicalize = properties.get('CANONICALIZE_HOST_NAME', False) - service_realm = properties.get('SERVICE_REALM') - props = GSSAPIProperties(service_name=service_name, - canonicalize_host_name=canonicalize, - service_realm=service_realm) +_AWSProperties = namedtuple("_AWSProperties", ["aws_session_token"]) +"""Mechanism properties for MONGODB-AWS authentication.""" + + +def _build_credentials_tuple( + mech: str, + source: Optional[str], + user: str, + passwd: str, + extra: Mapping[str, Any], + database: Optional[str], +) -> MongoCredential: + """Build and return a mechanism specific credentials tuple.""" + if mech not in ("MONGODB-X509", "MONGODB-AWS", "MONGODB-OIDC") and user is None: + raise ConfigurationError(f"{mech} requires a username.") + if mech == "GSSAPI": + if source is not None and source != "$external": + raise ValueError("authentication source must be $external or None for GSSAPI") + properties = extra.get("authmechanismproperties", {}) + service_name = properties.get("SERVICE_NAME", "mongodb") + canonicalize = properties.get("CANONICALIZE_HOST_NAME", False) + service_realm = properties.get("SERVICE_REALM") + props = GSSAPIProperties( + service_name=service_name, + canonicalize_host_name=canonicalize, + service_realm=service_realm, + ) # Source is always $external. - return MongoCredential(mech, '$external', user, passwd, props, None) - elif mech == 'MONGODB-X509': + return MongoCredential(mech, "$external", user, passwd, props, None) + elif mech == "MONGODB-X509": if passwd is not None: + raise ConfigurationError("Passwords are not supported by MONGODB-X509") + if source is not None and source != "$external": + raise ValueError("authentication source must be $external or None for MONGODB-X509") + # Source is always $external, user can be None. + return MongoCredential(mech, "$external", user, None, None, None) + elif mech == "MONGODB-AWS": + if user is not None and passwd is None: + raise ConfigurationError("username without a password is not supported by MONGODB-AWS") + if source is not None and source != "$external": + raise ConfigurationError( + "authentication source must be $external or None for MONGODB-AWS" + ) + + properties = extra.get("authmechanismproperties", {}) + aws_session_token = properties.get("AWS_SESSION_TOKEN") + aws_props = _AWSProperties(aws_session_token=aws_session_token) + # user can be None for temporary link-local EC2 credentials. + return MongoCredential(mech, "$external", user, passwd, aws_props, None) + elif mech == "MONGODB-OIDC": + properties = extra.get("authmechanismproperties", {}) + request_token_callback = properties.get("request_token_callback") + provider_name = properties.get("PROVIDER_NAME", "") + default_allowed = [ + "*.mongodb.net", + "*.mongodb-dev.net", + "*.mongodb-qa.net", + "*.mongodbgov.net", + "localhost", + "127.0.0.1", + "::1", + ] + allowed_hosts = properties.get("allowed_hosts", default_allowed) + if not request_token_callback and provider_name != "aws": raise ConfigurationError( - "Passwords are not supported by MONGODB-X509") - if source is not None and source != '$external': - raise ValueError( - "authentication source must be " - "$external or None for MONGODB-X509") - # user can be None. - return MongoCredential(mech, '$external', user, None, None, None) - elif mech == 'PLAIN': - source_database = source or database or '$external' + "authentication with MONGODB-OIDC requires providing an request_token_callback or a provider_name of 'aws'" + ) + oidc_props = _OIDCProperties( + request_token_callback=request_token_callback, + provider_name=provider_name, + allowed_hosts=allowed_hosts, + ) + return MongoCredential(mech, "$external", user, passwd, oidc_props, _Cache()) + + elif mech == "PLAIN": + source_database = source or database or "$external" return MongoCredential(mech, source_database, user, passwd, None, None) else: - source_database = source or database or 'admin' + source_database = source or database or "admin" if passwd is None: raise ConfigurationError("A password is required.") - return MongoCredential( - mech, source_database, user, passwd, None, _Cache()) - - -if PY3: - def _xor(fir, sec): - """XOR two byte strings together (python 3.x).""" - return b"".join([bytes([x ^ y]) for x, y in zip(fir, sec)]) - - - _from_bytes = int.from_bytes - _to_bytes = int.to_bytes -else: - from binascii import (hexlify as _hexlify, - unhexlify as _unhexlify) - - - def _xor(fir, sec): - """XOR two byte strings together (python 2.x).""" - return b"".join([chr(ord(x) ^ ord(y)) for x, y in zip(fir, sec)]) - + return MongoCredential(mech, source_database, user, passwd, None, _Cache()) - def _from_bytes(value, dummy, _int=int, _hexlify=_hexlify): - """An implementation of int.from_bytes for python 2.x.""" - return _int(_hexlify(value), 16) +def _xor(fir: bytes, sec: bytes) -> bytes: + """XOR two byte strings together.""" + return b"".join([bytes([x ^ y]) for x, y in zip(fir, sec)]) - def _to_bytes(value, length, dummy, _unhexlify=_unhexlify): - """An implementation of int.to_bytes for python 2.x.""" - fmt = '%%0%dx' % (2 * length,) - return _unhexlify(fmt % value) +def _parse_scram_response(response: bytes) -> Dict[bytes, bytes]: + """Split a scram response into key, value pairs.""" + return dict( + typing.cast(typing.Tuple[bytes, bytes], item.split(b"=", 1)) + for item in response.split(b",") + ) -try: - # The fastest option, if it's been compiled to use OpenSSL's HMAC. - from backports.pbkdf2 import pbkdf2_hmac as _hi -except ImportError: - try: - # Python 2.7.8+, or Python 3.4+. - from hashlib import pbkdf2_hmac as _hi - except ImportError: - - def _hi(hash_name, data, salt, iterations): - """A simple implementation of PBKDF2-HMAC.""" - mac = hmac.HMAC(data, None, getattr(hashlib, hash_name)) - - def _digest(msg, mac=mac): - """Get a digest for msg.""" - _mac = mac.copy() - _mac.update(msg) - return _mac.digest() - - from_bytes = _from_bytes - to_bytes = _to_bytes - _u1 = _digest(salt + b'\x00\x00\x00\x01') - _ui = from_bytes(_u1, 'big') - for _ in range(iterations - 1): - _u1 = _digest(_u1) - _ui ^= from_bytes(_u1, 'big') - return to_bytes(_ui, mac.digest_size, 'big') +def _authenticate_scram_start( + credentials: MongoCredential, mechanism: str +) -> tuple[bytes, bytes, MutableMapping[str, Any]]: + username = credentials.username + user = username.encode("utf-8").replace(b"=", b"=3D").replace(b",", b"=2C") + nonce = standard_b64encode(os.urandom(32)) + first_bare = b"n=" + user + b",r=" + nonce -try: - from hmac import compare_digest -except ImportError: - if PY3: - def _xor_bytes(a, b): - return a ^ b - else: - def _xor_bytes(a, b, _ord=ord): - return _ord(a) ^ _ord(b) - - # Python 2.x < 2.7.7 - # Note: This method is intentionally obtuse to prevent timing attacks. Do - # not refactor it! - # References: - # - http://bugs.python.org/issue14532 - # - http://bugs.python.org/issue14955 - # - http://bugs.python.org/issue15061 - def compare_digest(a, b, _xor_bytes=_xor_bytes): - left = None - right = b - if len(a) == len(b): - left = a - result = 0 - if len(a) != len(b): - left = b - result = 1 - - for x, y in zip(left, right): - result |= _xor_bytes(x, y) - return result == 0 - - -def _parse_scram_response(response): - """Split a scram response into key, value pairs.""" - return dict(item.split(b"=", 1) for item in response.split(b",")) + cmd = SON( + [ + ("saslStart", 1), + ("mechanism", mechanism), + ("payload", Binary(b"n,," + first_bare)), + ("autoAuthorize", 1), + ("options", {"skipEmptyExchange": True}), + ] + ) + return nonce, first_bare, cmd -def _authenticate_scram(credentials, sock_info, mechanism): +def _authenticate_scram(credentials: MongoCredential, conn: Connection, mechanism: str) -> None: """Authenticate using SCRAM.""" - username = credentials.username - if mechanism == 'SCRAM-SHA-256': + if mechanism == "SCRAM-SHA-256": digest = "sha256" digestmod = hashlib.sha256 data = saslprep(credentials.password).encode("utf-8") @@ -252,23 +246,24 @@ def _authenticate_scram(credentials, sock_info, mechanism): # Make local _hmac = hmac.HMAC - user = username.encode("utf-8").replace(b"=", b"=3D").replace(b",", b"=2C") - nonce = standard_b64encode(os.urandom(32)) - first_bare = b"n=" + user + b",r=" + nonce - - cmd = SON([('saslStart', 1), - ('mechanism', mechanism), - ('payload', Binary(b"n,," + first_bare)), - ('autoAuthorize', 1)]) - res = sock_info.command(source, cmd) + ctx = conn.auth_ctx + if ctx and ctx.speculate_succeeded(): + assert isinstance(ctx, _ScramContext) + assert ctx.scram_data is not None + nonce, first_bare = ctx.scram_data + res = ctx.speculative_authenticate + else: + nonce, first_bare, cmd = _authenticate_scram_start(credentials, mechanism) + res = conn.command(source, cmd) - server_first = res['payload'] + assert res is not None + server_first = res["payload"] parsed = _parse_scram_response(server_first) - iterations = int(parsed[b'i']) + iterations = int(parsed[b"i"]) if iterations < 4096: raise OperationFailure("Server returned an invalid iteration count.") - salt = parsed[b's'] - rnonce = parsed[b'r'] + salt = parsed[b"s"] + rnonce = parsed[b"r"] if not rnonce.startswith(nonce): raise OperationFailure("Server returned an invalid nonce.") @@ -281,8 +276,7 @@ def _authenticate_scram(credentials, sock_info, mechanism): # Salt and / or iterations could change for a number of different # reasons. Either changing invalidates the cache. if not client_key or salt != csalt or iterations != citerations: - salted_pass = _hi( - digest, data, standard_b64decode(salt), iterations) + salted_pass = hashlib.pbkdf2_hmac(digest, data, standard_b64decode(salt), iterations) client_key = _hmac(salted_pass, b"Client Key", digestmod).digest() server_key = _hmac(salted_pass, b"Server Key", digestmod).digest() cache.data = (client_key, server_key, salt, iterations) @@ -292,63 +286,81 @@ def _authenticate_scram(credentials, sock_info, mechanism): client_proof = b"p=" + standard_b64encode(_xor(client_key, client_sig)) client_final = b",".join((without_proof, client_proof)) - server_sig = standard_b64encode( - _hmac(server_key, auth_msg, digestmod).digest()) + server_sig = standard_b64encode(_hmac(server_key, auth_msg, digestmod).digest()) - cmd = SON([('saslContinue', 1), - ('conversationId', res['conversationId']), - ('payload', Binary(client_final))]) - res = sock_info.command(source, cmd) + cmd = SON( + [ + ("saslContinue", 1), + ("conversationId", res["conversationId"]), + ("payload", Binary(client_final)), + ] + ) + res = conn.command(source, cmd) - parsed = _parse_scram_response(res['payload']) - if not compare_digest(parsed[b'v'], server_sig): + parsed = _parse_scram_response(res["payload"]) + if not hmac.compare_digest(parsed[b"v"], server_sig): raise OperationFailure("Server returned an invalid signature.") - # Depending on how it's configured, Cyrus SASL (which the server uses) - # requires a third empty challenge. - if not res['done']: - cmd = SON([('saslContinue', 1), - ('conversationId', res['conversationId']), - ('payload', Binary(b''))]) - res = sock_info.command(source, cmd) - if not res['done']: - raise OperationFailure('SASL conversation failed to complete.') - - -def _password_digest(username, password): - """Get a password digest to use for authentication. - """ - if not isinstance(password, string_type): - raise TypeError("password must be an " - "instance of %s" % (string_type.__name__,)) + # A third empty challenge may be required if the server does not support + # skipEmptyExchange: SERVER-44857. + if not res["done"]: + cmd = SON( + [ + ("saslContinue", 1), + ("conversationId", res["conversationId"]), + ("payload", Binary(b"")), + ] + ) + res = conn.command(source, cmd) + if not res["done"]: + raise OperationFailure("SASL conversation failed to complete.") + + +def _password_digest(username: str, password: str) -> str: + """Get a password digest to use for authentication.""" + if not isinstance(password, str): + raise TypeError("password must be an instance of str") if len(password) == 0: raise ValueError("password can't be empty") - if not isinstance(username, string_type): - raise TypeError("password must be an " - "instance of %s" % (string_type.__name__,)) + if not isinstance(username, str): + raise TypeError("username must be an instance of str") - md5hash = hashlib.md5() - data = "%s:mongo:%s" % (username, password) - md5hash.update(data.encode('utf-8')) - return _unicode(md5hash.hexdigest()) + md5hash = hashlib.md5() # noqa: S324 + data = f"{username}:mongo:{password}" + md5hash.update(data.encode("utf-8")) + return md5hash.hexdigest() -def _auth_key(nonce, username, password): - """Get an auth key to use for authentication. - """ +def _auth_key(nonce: str, username: str, password: str) -> str: + """Get an auth key to use for authentication.""" digest = _password_digest(username, password) - md5hash = hashlib.md5() - data = "%s%s%s" % (nonce, username, digest) - md5hash.update(data.encode('utf-8')) - return _unicode(md5hash.hexdigest()) + md5hash = hashlib.md5() # noqa: S324 + data = f"{nonce}{username}{digest}" + md5hash.update(data.encode("utf-8")) + return md5hash.hexdigest() + + +def _canonicalize_hostname(hostname: str) -> str: + """Canonicalize hostname following MIT-krb5 behavior.""" + # https://github.com/krb5/krb5/blob/d406afa363554097ac48646a29249c04f498c88e/src/util/k5test.py#L505-L520 + af, socktype, proto, canonname, sockaddr = socket.getaddrinfo( + hostname, None, 0, 0, socket.IPPROTO_TCP, socket.AI_CANONNAME + )[0] + + try: + name = socket.getnameinfo(sockaddr, socket.NI_NAMEREQD) + except socket.gaierror: + return canonname.lower() + return name[0].lower() -def _authenticate_gssapi(credentials, sock_info): - """Authenticate using GSSAPI. - """ + +def _authenticate_gssapi(credentials: MongoCredential, conn: Connection) -> None: + """Authenticate using GSSAPI.""" if not HAVE_KERBEROS: - raise ConfigurationError('The "kerberos" module must be ' - 'installed to use GSSAPI authentication.') + raise ConfigurationError( + 'The "kerberos" module must be installed to use GSSAPI authentication.' + ) try: username = credentials.username @@ -356,12 +368,12 @@ def _authenticate_gssapi(credentials, sock_info): props = credentials.mechanism_properties # Starting here and continuing through the while loop below - establish # the security context. See RFC 4752, Section 3.1, first paragraph. - host = sock_info.address[0] + host = conn.address[0] if props.canonicalize_host_name: - host = socket.getfqdn(host) - service = props.service_name + '@' + host + host = _canonicalize_hostname(host) + service = props.service_name + "@" + host if props.service_realm is not None: - service = service + '@' + props.service_realm + service = service + "@" + props.service_realm if password is not None: if _USE_PRINCIPAL: @@ -370,200 +382,244 @@ def _authenticate_gssapi(credentials, sock_info): # by WinKerberos) doesn't support +. principal = ":".join((quote(username), quote(password))) result, ctx = kerberos.authGSSClientInit( - service, principal, gssflags=kerberos.GSS_C_MUTUAL_FLAG) + service, principal, gssflags=kerberos.GSS_C_MUTUAL_FLAG + ) else: - if '@' in username: - user, domain = username.split('@', 1) + if "@" in username: + user, domain = username.split("@", 1) else: user, domain = username, None result, ctx = kerberos.authGSSClientInit( - service, gssflags=kerberos.GSS_C_MUTUAL_FLAG, - user=user, domain=domain, password=password) + service, + gssflags=kerberos.GSS_C_MUTUAL_FLAG, + user=user, + domain=domain, + password=password, + ) else: - result, ctx = kerberos.authGSSClientInit( - service, gssflags=kerberos.GSS_C_MUTUAL_FLAG) + result, ctx = kerberos.authGSSClientInit(service, gssflags=kerberos.GSS_C_MUTUAL_FLAG) if result != kerberos.AUTH_GSS_COMPLETE: - raise OperationFailure('Kerberos context failed to initialize.') + raise OperationFailure("Kerberos context failed to initialize.") try: # pykerberos uses a weird mix of exceptions and return values # to indicate errors. # 0 == continue, 1 == complete, -1 == error # Only authGSSClientStep can return 0. - if kerberos.authGSSClientStep(ctx, '') != 0: - raise OperationFailure('Unknown kerberos ' - 'failure in step function.') + if kerberos.authGSSClientStep(ctx, "") != 0: + raise OperationFailure("Unknown kerberos failure in step function.") # Start a SASL conversation with mongod/s # Note: pykerberos deals with base64 encoded byte strings. # Since mongo accepts base64 strings as the payload we don't # have to use bson.binary.Binary. payload = kerberos.authGSSClientResponse(ctx) - cmd = SON([('saslStart', 1), - ('mechanism', 'GSSAPI'), - ('payload', payload), - ('autoAuthorize', 1)]) - response = sock_info.command('$external', cmd) + cmd = SON( + [ + ("saslStart", 1), + ("mechanism", "GSSAPI"), + ("payload", payload), + ("autoAuthorize", 1), + ] + ) + response = conn.command("$external", cmd) # Limit how many times we loop to catch protocol / library issues for _ in range(10): - result = kerberos.authGSSClientStep(ctx, - str(response['payload'])) + result = kerberos.authGSSClientStep(ctx, str(response["payload"])) if result == -1: - raise OperationFailure('Unknown kerberos ' - 'failure in step function.') + raise OperationFailure("Unknown kerberos failure in step function.") - payload = kerberos.authGSSClientResponse(ctx) or '' + payload = kerberos.authGSSClientResponse(ctx) or "" - cmd = SON([('saslContinue', 1), - ('conversationId', response['conversationId']), - ('payload', payload)]) - response = sock_info.command('$external', cmd) + cmd = SON( + [ + ("saslContinue", 1), + ("conversationId", response["conversationId"]), + ("payload", payload), + ] + ) + response = conn.command("$external", cmd) if result == kerberos.AUTH_GSS_COMPLETE: break else: - raise OperationFailure('Kerberos ' - 'authentication failed to complete.') + raise OperationFailure("Kerberos authentication failed to complete.") # Once the security context is established actually authenticate. # See RFC 4752, Section 3.1, last two paragraphs. - if kerberos.authGSSClientUnwrap(ctx, - str(response['payload'])) != 1: - raise OperationFailure('Unknown kerberos ' - 'failure during GSS_Unwrap step.') + if kerberos.authGSSClientUnwrap(ctx, str(response["payload"])) != 1: + raise OperationFailure("Unknown kerberos failure during GSS_Unwrap step.") - if kerberos.authGSSClientWrap(ctx, - kerberos.authGSSClientResponse(ctx), - username) != 1: - raise OperationFailure('Unknown kerberos ' - 'failure during GSS_Wrap step.') + if kerberos.authGSSClientWrap(ctx, kerberos.authGSSClientResponse(ctx), username) != 1: + raise OperationFailure("Unknown kerberos failure during GSS_Wrap step.") payload = kerberos.authGSSClientResponse(ctx) - cmd = SON([('saslContinue', 1), - ('conversationId', response['conversationId']), - ('payload', payload)]) - sock_info.command('$external', cmd) + cmd = SON( + [ + ("saslContinue", 1), + ("conversationId", response["conversationId"]), + ("payload", payload), + ] + ) + conn.command("$external", cmd) finally: kerberos.authGSSClientClean(ctx) except kerberos.KrbError as exc: - raise OperationFailure(str(exc)) + raise OperationFailure(str(exc)) from None -def _authenticate_plain(credentials, sock_info): - """Authenticate using SASL PLAIN (RFC 4616) - """ +def _authenticate_plain(credentials: MongoCredential, conn: Connection) -> None: + """Authenticate using SASL PLAIN (RFC 4616)""" source = credentials.source username = credentials.username password = credentials.password - payload = ('\x00%s\x00%s' % (username, password)).encode('utf-8') - cmd = SON([('saslStart', 1), - ('mechanism', 'PLAIN'), - ('payload', Binary(payload)), - ('autoAuthorize', 1)]) - sock_info.command(source, cmd) - - -def _authenticate_cram_md5(credentials, sock_info): - """Authenticate using CRAM-MD5 (RFC 2195) - """ - source = credentials.source - username = credentials.username - password = credentials.password - # The password used as the mac key is the - # same as what we use for MONGODB-CR - passwd = _password_digest(username, password) - cmd = SON([('saslStart', 1), - ('mechanism', 'CRAM-MD5'), - ('payload', Binary(b'')), - ('autoAuthorize', 1)]) - response = sock_info.command(source, cmd) - # MD5 as implicit default digest for digestmod is deprecated - # in python 3.4 - mac = hmac.HMAC(key=passwd.encode('utf-8'), digestmod=hashlib.md5) - mac.update(response['payload']) - challenge = username.encode('utf-8') + b' ' + mac.hexdigest().encode('utf-8') - cmd = SON([('saslContinue', 1), - ('conversationId', response['conversationId']), - ('payload', Binary(challenge))]) - sock_info.command(source, cmd) - - -def _authenticate_x509(credentials, sock_info): - """Authenticate using MONGODB-X509. - """ - query = SON([('authenticate', 1), - ('mechanism', 'MONGODB-X509')]) - if credentials.username is not None: - query['user'] = credentials.username - elif sock_info.max_wire_version < 5: - raise ConfigurationError( - "A username is required for MONGODB-X509 authentication " - "when connected to MongoDB versions older than 3.4.") - sock_info.command('$external', query) - - -def _authenticate_mongo_cr(credentials, sock_info): - """Authenticate using MONGODB-CR. - """ + payload = (f"\x00{username}\x00{password}").encode() + cmd = SON( + [ + ("saslStart", 1), + ("mechanism", "PLAIN"), + ("payload", Binary(payload)), + ("autoAuthorize", 1), + ] + ) + conn.command(source, cmd) + + +def _authenticate_x509(credentials: MongoCredential, conn: Connection) -> None: + """Authenticate using MONGODB-X509.""" + ctx = conn.auth_ctx + if ctx and ctx.speculate_succeeded(): + # MONGODB-X509 is done after the speculative auth step. + return + + cmd = _X509Context(credentials, conn.address).speculate_command() + conn.command("$external", cmd) + + +def _authenticate_mongo_cr(credentials: MongoCredential, conn: Connection) -> None: + """Authenticate using MONGODB-CR.""" source = credentials.source username = credentials.username password = credentials.password # Get a nonce - response = sock_info.command(source, {'getnonce': 1}) - nonce = response['nonce'] + response = conn.command(source, {"getnonce": 1}) + nonce = response["nonce"] key = _auth_key(nonce, username, password) # Actually authenticate - query = SON([('authenticate', 1), - ('user', username), - ('nonce', nonce), - ('key', key)]) - sock_info.command(source, query) - - -def _authenticate_default(credentials, sock_info): - if sock_info.max_wire_version >= 7: - source = credentials.source - cmd = SON([ - ('ismaster', 1), - ('saslSupportedMechs', source + '.' + credentials.username)]) - mechs = sock_info.command( - source, cmd, publish_events=False).get('saslSupportedMechs', []) - if 'SCRAM-SHA-256' in mechs: - return _authenticate_scram(credentials, sock_info, 'SCRAM-SHA-256') + query = SON([("authenticate", 1), ("user", username), ("nonce", nonce), ("key", key)]) + conn.command(source, query) + + +def _authenticate_default(credentials: MongoCredential, conn: Connection) -> None: + if conn.max_wire_version >= 7: + if conn.negotiated_mechs: + mechs = conn.negotiated_mechs + else: + source = credentials.source + cmd = conn.hello_cmd() + cmd["saslSupportedMechs"] = source + "." + credentials.username + mechs = conn.command(source, cmd, publish_events=False).get("saslSupportedMechs", []) + if "SCRAM-SHA-256" in mechs: + return _authenticate_scram(credentials, conn, "SCRAM-SHA-256") else: - return _authenticate_scram(credentials, sock_info, 'SCRAM-SHA-1') - elif sock_info.max_wire_version >= 3: - return _authenticate_scram(credentials, sock_info, 'SCRAM-SHA-1') + return _authenticate_scram(credentials, conn, "SCRAM-SHA-1") else: - return _authenticate_mongo_cr(credentials, sock_info) - - -_AUTH_MAP = { - 'CRAM-MD5': _authenticate_cram_md5, - 'GSSAPI': _authenticate_gssapi, - 'MONGODB-CR': _authenticate_mongo_cr, - 'MONGODB-X509': _authenticate_x509, - 'PLAIN': _authenticate_plain, - 'SCRAM-SHA-1': functools.partial( - _authenticate_scram, mechanism='SCRAM-SHA-1'), - 'SCRAM-SHA-256': functools.partial( - _authenticate_scram, mechanism='SCRAM-SHA-256'), - 'DEFAULT': _authenticate_default, + return _authenticate_scram(credentials, conn, "SCRAM-SHA-1") + + +_AUTH_MAP: Mapping[str, Callable[..., None]] = { + "GSSAPI": _authenticate_gssapi, + "MONGODB-CR": _authenticate_mongo_cr, + "MONGODB-X509": _authenticate_x509, + "MONGODB-AWS": _authenticate_aws, + "PLAIN": _authenticate_plain, + "SCRAM-SHA-1": functools.partial(_authenticate_scram, mechanism="SCRAM-SHA-1"), + "SCRAM-SHA-256": functools.partial(_authenticate_scram, mechanism="SCRAM-SHA-256"), + "DEFAULT": _authenticate_default, } -def authenticate(credentials, sock_info): - """Authenticate sock_info.""" - mechanism = credentials.mechanism - auth_func = _AUTH_MAP.get(mechanism) - auth_func(credentials, sock_info) +class _AuthContext: + def __init__(self, credentials: MongoCredential, address: tuple[str, int]) -> None: + self.credentials = credentials + self.speculative_authenticate: Optional[Mapping[str, Any]] = None + self.address = address + + @staticmethod + def from_credentials( + creds: MongoCredential, address: tuple[str, int] + ) -> Optional[_AuthContext]: + spec_cls = _SPECULATIVE_AUTH_MAP.get(creds.mechanism) + if spec_cls: + return cast(_AuthContext, spec_cls(creds, address)) + return None + + def speculate_command(self) -> Optional[MutableMapping[str, Any]]: + raise NotImplementedError + + def parse_response(self, hello: Hello[Mapping[str, Any]]) -> None: + self.speculative_authenticate = hello.speculative_authenticate + + def speculate_succeeded(self) -> bool: + return bool(self.speculative_authenticate) + + +class _ScramContext(_AuthContext): + def __init__( + self, credentials: MongoCredential, address: tuple[str, int], mechanism: str + ) -> None: + super().__init__(credentials, address) + self.scram_data: Optional[tuple[bytes, bytes]] = None + self.mechanism = mechanism + + def speculate_command(self) -> Optional[MutableMapping[str, Any]]: + nonce, first_bare, cmd = _authenticate_scram_start(self.credentials, self.mechanism) + # The 'db' field is included only on the speculative command. + cmd["db"] = self.credentials.source + # Save for later use. + self.scram_data = (nonce, first_bare) + return cmd + + +class _X509Context(_AuthContext): + def speculate_command(self) -> MutableMapping[str, Any]: + cmd = SON([("authenticate", 1), ("mechanism", "MONGODB-X509")]) + if self.credentials.username is not None: + cmd["user"] = self.credentials.username + return cmd + + +class _OIDCContext(_AuthContext): + def speculate_command(self) -> Optional[MutableMapping[str, Any]]: + authenticator = _get_authenticator(self.credentials, self.address) + cmd = authenticator.auth_start_cmd(False) + if cmd is None: + return None + cmd["db"] = self.credentials.source + return cmd + + +_SPECULATIVE_AUTH_MAP: Mapping[str, Any] = { + "MONGODB-X509": _X509Context, + "SCRAM-SHA-1": functools.partial(_ScramContext, mechanism="SCRAM-SHA-1"), + "SCRAM-SHA-256": functools.partial(_ScramContext, mechanism="SCRAM-SHA-256"), + "MONGODB-OIDC": _OIDCContext, + "DEFAULT": functools.partial(_ScramContext, mechanism="SCRAM-SHA-256"), +} -def logout(source, sock_info): - """Log out from a database.""" - sock_info.command(source, {'logout': 1}) +def authenticate( + credentials: MongoCredential, conn: Connection, reauthenticate: bool = False +) -> None: + """Authenticate connection.""" + mechanism = credentials.mechanism + auth_func = _AUTH_MAP[mechanism] + if mechanism == "MONGODB-OIDC": + _authenticate_oidc(credentials, conn, reauthenticate) + else: + auth_func(credentials, conn) diff --git a/pymongo/auth_aws.py b/pymongo/auth_aws.py new file mode 100644 index 0000000000..81f30c7ae3 --- /dev/null +++ b/pymongo/auth_aws.py @@ -0,0 +1,119 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""MONGODB-AWS Authentication helpers.""" +from __future__ import annotations + +try: + import pymongo_auth_aws + from pymongo_auth_aws import AwsCredential, AwsSaslContext, PyMongoAuthAwsError + + _HAVE_MONGODB_AWS = True +except ImportError: + + class AwsSaslContext: # type: ignore + def __init__(self, credentials: MongoCredential): + pass + + _HAVE_MONGODB_AWS = False + +try: + from pymongo_auth_aws.auth import set_cached_credentials, set_use_cached_credentials + + # Enable credential caching. + set_use_cached_credentials(True) +except ImportError: + + def set_cached_credentials(_creds: Optional[AwsCredential]) -> None: + pass + + +from typing import TYPE_CHECKING, Any, Mapping, Optional, Type + +import bson +from bson.binary import Binary +from bson.son import SON +from pymongo.errors import ConfigurationError, OperationFailure + +if TYPE_CHECKING: + from bson.typings import _ReadableBuffer + from pymongo.auth import MongoCredential + from pymongo.pool import Connection + + +class _AwsSaslContext(AwsSaslContext): # type: ignore + # Dependency injection: + def binary_type(self) -> Type[Binary]: + """Return the bson.binary.Binary type.""" + return Binary + + def bson_encode(self, doc: Mapping[str, Any]) -> bytes: + """Encode a dictionary to BSON.""" + return bson.encode(doc) + + def bson_decode(self, data: _ReadableBuffer) -> Mapping[str, Any]: + """Decode BSON to a dictionary.""" + return bson.decode(data) + + +def _authenticate_aws(credentials: MongoCredential, conn: Connection) -> None: + """Authenticate using MONGODB-AWS.""" + if not _HAVE_MONGODB_AWS: + raise ConfigurationError( + "MONGODB-AWS authentication requires pymongo-auth-aws: " + "install with: python -m pip install 'pymongo[aws]'" + ) + + if conn.max_wire_version < 9: + raise ConfigurationError("MONGODB-AWS authentication requires MongoDB version 4.4 or later") + + try: + ctx = _AwsSaslContext( + AwsCredential( + credentials.username, + credentials.password, + credentials.mechanism_properties.aws_session_token, + ) + ) + client_payload = ctx.step(None) + client_first = SON( + [("saslStart", 1), ("mechanism", "MONGODB-AWS"), ("payload", client_payload)] + ) + server_first = conn.command("$external", client_first) + res = server_first + # Limit how many times we loop to catch protocol / library issues + for _ in range(10): + client_payload = ctx.step(res["payload"]) + cmd = SON( + [ + ("saslContinue", 1), + ("conversationId", server_first["conversationId"]), + ("payload", client_payload), + ] + ) + res = conn.command("$external", cmd) + if res["done"]: + # SASL complete. + break + except PyMongoAuthAwsError as exc: + # Clear the cached credentials if we hit a failure in auth. + set_cached_credentials(None) + # Convert to OperationFailure and include pymongo-auth-aws version. + raise OperationFailure( + f"{exc} (pymongo-auth-aws version {pymongo_auth_aws.__version__})" + ) from None + except Exception: + # Clear the cached credentials if we hit a failure in auth. + set_cached_credentials(None) + raise diff --git a/pymongo/auth_oidc.py b/pymongo/auth_oidc.py new file mode 100644 index 0000000000..ad9223809e --- /dev/null +++ b/pymongo/auth_oidc.py @@ -0,0 +1,276 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""MONGODB-OIDC Authentication helpers.""" +from __future__ import annotations + +import threading +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, Any, Callable, Mapping, MutableMapping, Optional + +import bson +from bson.binary import Binary +from bson.son import SON +from pymongo.errors import ConfigurationError, OperationFailure + +if TYPE_CHECKING: + from pymongo.auth import MongoCredential + from pymongo.pool import Connection + + +@dataclass +class _OIDCProperties: + request_token_callback: Optional[Callable[..., dict]] + provider_name: Optional[str] + allowed_hosts: list[str] + + +"""Mechanism properties for MONGODB-OIDC authentication.""" + +TOKEN_BUFFER_MINUTES = 5 +CALLBACK_TIMEOUT_SECONDS = 5 * 60 +CALLBACK_VERSION = 1 + + +def _get_authenticator( + credentials: MongoCredential, address: tuple[str, int] +) -> _OIDCAuthenticator: + if credentials.cache.data: + return credentials.cache.data + + # Extract values. + principal_name = credentials.username + properties = credentials.mechanism_properties + + # Validate that the address is allowed. + if not properties.provider_name: + found = False + allowed_hosts = properties.allowed_hosts + for patt in allowed_hosts: + if patt == address[0]: + found = True + elif patt.startswith("*.") and address[0].endswith(patt[1:]): + found = True + if not found: + raise ConfigurationError( + f"Refusing to connect to {address[0]}, which is not in authOIDCAllowedHosts: {allowed_hosts}" + ) + + # Get or create the cache data. + credentials.cache.data = _OIDCAuthenticator(username=principal_name, properties=properties) + return credentials.cache.data + + +@dataclass +class _OIDCAuthenticator: + username: str + properties: _OIDCProperties + refresh_token: Optional[str] = field(default=None) + access_token: Optional[str] = field(default=None) + idp_info: Optional[dict] = field(default=None) + token_gen_id: int = field(default=0) + lock: threading.Lock = field(default_factory=threading.Lock) + + def get_current_token(self, use_callback: bool = True) -> Optional[str]: + properties = self.properties + + # TODO: DRIVERS-2672, handle machine callback here as well. + cb = properties.request_token_callback if use_callback else None + cb_type = "human" + + prev_token = self.access_token + if prev_token: + return prev_token + + if not use_callback and not prev_token: + return None + + if not prev_token and cb is not None: + with self.lock: + # See if the token was changed while we were waiting for the + # lock. + new_token = self.access_token + if new_token != prev_token: + return new_token + + # TODO: DRIVERS-2672 handle machine callback here. + if cb_type == "human": + context = { + "timeout_seconds": CALLBACK_TIMEOUT_SECONDS, + "version": CALLBACK_VERSION, + "refresh_token": self.refresh_token, + } + resp = cb(self.idp_info, context) + + self.validate_request_token_response(resp) + + self.token_gen_id += 1 + + return self.access_token + + def validate_request_token_response(self, resp: Mapping[str, Any]) -> None: + # Validate callback return value. + if not isinstance(resp, dict): + raise ValueError("OIDC callback returned invalid result") + + if "access_token" not in resp: + raise ValueError("OIDC callback did not return an access_token") + + expected = ["access_token", "refresh_token", "expires_in_seconds"] + for key in resp: + if key not in expected: + raise ValueError(f'Unexpected field in callback result "{key}"') + + self.access_token = resp["access_token"] + self.refresh_token = resp.get("refresh_token") + + def principal_step_cmd(self) -> SON[str, Any]: + """Get a SASL start command with an optional principal name""" + # Send the SASL start with the optional principal name. + payload = {} + + principal_name = self.username + if principal_name: + payload["n"] = principal_name + + return SON( + [ + ("saslStart", 1), + ("mechanism", "MONGODB-OIDC"), + ("payload", Binary(bson.encode(payload))), + ("autoAuthorize", 1), + ] + ) + + def auth_start_cmd(self, use_callback: bool = True) -> Optional[SON[str, Any]]: + # TODO: DRIVERS-2672, check for provider_name in self.properties here. + if self.idp_info is None: + return self.principal_step_cmd() + + token = self.get_current_token(use_callback) + if not token: + return None + bin_payload = Binary(bson.encode({"jwt": token})) + return SON( + [ + ("saslStart", 1), + ("mechanism", "MONGODB-OIDC"), + ("payload", bin_payload), + ] + ) + + def run_command( + self, conn: Connection, cmd: MutableMapping[str, Any] + ) -> Optional[Mapping[str, Any]]: + try: + return conn.command("$external", cmd, no_reauth=True) # type: ignore[call-arg] + except OperationFailure: + self.access_token = None + raise + + def reauthenticate(self, conn: Connection) -> Optional[Mapping[str, Any]]: + """Handle a reauthenticate from the server.""" + # First see if we have the a newer token on the authenticator. + prev_id = conn.oidc_token_gen_id or 0 + # If we've already changed tokens, make one optimistic attempt. + if (prev_id < self.token_gen_id) and self.access_token: + try: + return self.authenticate(conn) + except OperationFailure: + pass + + self.access_token = None + + # TODO: DRIVERS-2672, check for provider_name in self.properties here. + # If so, we clear the access token and return finish_auth. + + # Next see if the idp info has changed. + prev_idp_info = self.idp_info + self.idp_info = None + cmd = self.principal_step_cmd() + resp = self.run_command(conn, cmd) + assert resp is not None + server_resp: dict = bson.decode(resp["payload"]) + if "issuer" in server_resp: + self.idp_info = server_resp + + # Handle the case of changed idp info. + if self.idp_info != prev_idp_info: + self.access_token = None + self.refresh_token = None + + # If we have a refresh token, try using that. + if self.refresh_token: + try: + return self.finish_auth(resp, conn) + except OperationFailure: + self.refresh_token = None + # If that fails, try again without the refresh token. + return self.authenticate(conn) + + # If we don't have a refresh token, just try once. + return self.finish_auth(resp, conn) + + def authenticate(self, conn: Connection) -> Optional[Mapping[str, Any]]: + ctx = conn.auth_ctx + cmd = None + + if ctx and ctx.speculate_succeeded(): + resp = ctx.speculative_authenticate + else: + cmd = self.auth_start_cmd() + assert cmd is not None + resp = self.run_command(conn, cmd) + + assert resp is not None + if resp["done"]: + conn.oidc_token_gen_id = self.token_gen_id + return None + + server_resp: dict = bson.decode(resp["payload"]) + if "issuer" in server_resp: + self.idp_info = server_resp + + return self.finish_auth(resp, conn) + + def finish_auth( + self, orig_resp: Mapping[str, Any], conn: Connection + ) -> Optional[Mapping[str, Any]]: + conversation_id = orig_resp["conversationId"] + token = self.get_current_token() + conn.oidc_token_gen_id = self.token_gen_id + bin_payload = Binary(bson.encode({"jwt": token})) + cmd = SON( + [ + ("saslContinue", 1), + ("conversationId", conversation_id), + ("payload", bin_payload), + ] + ) + resp = self.run_command(conn, cmd) + assert resp is not None + if not resp["done"]: + raise OperationFailure("SASL conversation failed to complete.") + return resp + + +def _authenticate_oidc( + credentials: MongoCredential, conn: Connection, reauthenticate: bool +) -> Optional[Mapping[str, Any]]: + """Authenticate using MONGODB-OIDC.""" + authenticator = _get_authenticator(credentials, conn.address) + if reauthenticate: + return authenticator.reauthenticate(conn) + else: + return authenticator.authenticate(conn) diff --git a/pymongo/bulk.py b/pymongo/bulk.py index cd942a4884..10e77d8b12 100644 --- a/pymongo/bulk.py +++ b/pymongo/bulk.py @@ -16,62 +16,77 @@ .. versionadded:: 2.7 """ -import copy +from __future__ import annotations +import copy +from collections.abc import MutableMapping from itertools import islice +from typing import ( + TYPE_CHECKING, + Any, + Iterator, + Mapping, + NoReturn, + Optional, + Type, + Union, +) from bson.objectid import ObjectId from bson.raw_bson import RawBSONDocument from bson.son import SON -from pymongo.client_session import _validate_session_write_concern -from pymongo.common import (validate_is_mapping, - validate_is_document_type, - validate_ok_for_replace, - validate_ok_for_update) -from pymongo.helpers import _RETRYABLE_ERROR_CODES -from pymongo.collation import validate_collation_or_none -from pymongo.errors import (BulkWriteError, - ConfigurationError, - InvalidOperation, - OperationFailure) -from pymongo.message import (_INSERT, _UPDATE, _DELETE, - _do_batched_insert, - _randint, - _BulkWriteContext, - _EncryptedBulkWriteContext) +from pymongo import _csot, common +from pymongo.client_session import ClientSession, _validate_session_write_concern +from pymongo.common import ( + validate_is_document_type, + validate_ok_for_replace, + validate_ok_for_update, +) +from pymongo.errors import ( + BulkWriteError, + ConfigurationError, + InvalidOperation, + OperationFailure, +) +from pymongo.helpers import _RETRYABLE_ERROR_CODES, _get_wce_doc +from pymongo.message import ( + _DELETE, + _INSERT, + _UPDATE, + _BulkWriteContext, + _EncryptedBulkWriteContext, + _randint, +) from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern +if TYPE_CHECKING: + from pymongo.collection import Collection + from pymongo.pool import Connection + from pymongo.typings import _DocumentOut, _DocumentType, _Pipeline -_DELETE_ALL = 0 -_DELETE_ONE = 1 +_DELETE_ALL: int = 0 +_DELETE_ONE: int = 1 # For backwards compatibility. See MongoDB src/mongo/base/error_codes.err -_BAD_VALUE = 2 -_UNKNOWN_ERROR = 8 -_WRITE_CONCERN_ERROR = 64 - -_COMMANDS = ('insert', 'update', 'delete') +_BAD_VALUE: int = 2 +_UNKNOWN_ERROR: int = 8 +_WRITE_CONCERN_ERROR: int = 64 +_COMMANDS: tuple[str, str, str] = ("insert", "update", "delete") -# These string literals are used when we create fake server return -# documents client side. We use unicode literals in python 2.x to -# match the actual return values from the server. -_UOP = u"op" +class _Run: + """Represents a batch of write operations.""" -class _Run(object): - """Represents a batch of write operations. - """ - def __init__(self, op_type): - """Initialize a new Run object. - """ - self.op_type = op_type - self.index_map = [] - self.ops = [] - self.idx_offset = 0 + def __init__(self, op_type: int) -> None: + """Initialize a new Run object.""" + self.op_type: int = op_type + self.index_map: list[int] = [] + self.ops: list[Any] = [] + self.idx_offset: int = 0 - def index(self, idx): + def index(self, idx: int) -> int: """Get the original index of an operation in this run. :Parameters: @@ -79,7 +94,7 @@ def index(self, idx): """ return self.index_map[idx] - def add(self, original_index, operation): + def add(self, original_index: int, operation: Any) -> None: """Add an operation to this Run instance. :Parameters: @@ -91,9 +106,13 @@ def add(self, original_index, operation): self.ops.append(operation) -def _merge_command(run, full_result, offset, result): - """Merge a write command result into the full bulk result. - """ +def _merge_command( + run: _Run, + full_result: MutableMapping[str, Any], + offset: int, + result: Mapping[str, Any], +) -> None: + """Merge a write command result into the full bulk result.""" affected = result.get("n", 0) if run.op_type == _INSERT: @@ -110,7 +129,7 @@ def _merge_command(run, full_result, offset, result): doc["index"] = run.index(doc["index"] + offset) full_result["upserted"].extend(upserted) full_result["nUpserted"] += n_upserted - full_result["nMatched"] += (affected - n_upserted) + full_result["nMatched"] += affected - n_upserted else: full_result["nMatched"] += affected full_result["nModified"] += result["nModified"] @@ -123,108 +142,142 @@ def _merge_command(run, full_result, offset, result): idx = doc["index"] + offset replacement["index"] = run.index(idx) # Add the failed operation to the error document. - replacement[_UOP] = run.ops[idx] + replacement["op"] = run.ops[idx] full_result["writeErrors"].append(replacement) - wc_error = result.get("writeConcernError") - if wc_error: - full_result["writeConcernErrors"].append(wc_error) + wce = _get_wce_doc(result) + if wce: + full_result["writeConcernErrors"].append(wce) -def _raise_bulk_write_error(full_result): - """Raise a BulkWriteError from the full bulk api result. - """ +def _raise_bulk_write_error(full_result: _DocumentOut) -> NoReturn: + """Raise a BulkWriteError from the full bulk api result.""" if full_result["writeErrors"]: - full_result["writeErrors"].sort( - key=lambda error: error["index"]) + full_result["writeErrors"].sort(key=lambda error: error["index"]) raise BulkWriteError(full_result) -class _Bulk(object): - """The private guts of the bulk write API. - """ - def __init__(self, collection, ordered, bypass_document_validation): - """Initialize a _Bulk instance. - """ +class _Bulk: + """The private guts of the bulk write API.""" + + def __init__( + self, + collection: Collection[_DocumentType], + ordered: bool, + bypass_document_validation: bool, + comment: Optional[str] = None, + let: Optional[Any] = None, + ) -> None: + """Initialize a _Bulk instance.""" self.collection = collection.with_options( codec_options=collection.codec_options._replace( - unicode_decode_error_handler='replace', - document_class=dict)) + unicode_decode_error_handler="replace", document_class=dict + ) + ) + self.let = let + if self.let is not None: + common.validate_is_document_type("let", self.let) + self.comment: Optional[str] = comment self.ordered = ordered - self.ops = [] + self.ops: list[tuple[int, Mapping[str, Any]]] = [] self.executed = False self.bypass_doc_val = bypass_document_validation self.uses_collation = False self.uses_array_filters = False + self.uses_hint_update = False + self.uses_hint_delete = False self.is_retryable = True self.retrying = False self.started_retryable_write = False # Extra state so that we know where to pick up on a retry attempt. self.current_run = None + self.next_run = None @property - def bulk_ctx_class(self): + def bulk_ctx_class(self) -> Type[_BulkWriteContext]: encrypter = self.collection.database.client._encrypter if encrypter and not encrypter._bypass_auto_encryption: return _EncryptedBulkWriteContext else: return _BulkWriteContext - def add_insert(self, document): - """Add an insert document to the list of ops. - """ + def add_insert(self, document: _DocumentOut) -> None: + """Add an insert document to the list of ops.""" validate_is_document_type("document", document) # Generate ObjectId client side. - if not (isinstance(document, RawBSONDocument) or '_id' in document): - document['_id'] = ObjectId() + if not (isinstance(document, RawBSONDocument) or "_id" in document): + document["_id"] = ObjectId() self.ops.append((_INSERT, document)) - def add_update(self, selector, update, multi=False, upsert=False, - collation=None, array_filters=None): - """Create an update document and add it to the list of ops. - """ + def add_update( + self, + selector: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + multi: bool = False, + upsert: bool = False, + collation: Optional[Mapping[str, Any]] = None, + array_filters: Optional[list[Mapping[str, Any]]] = None, + hint: Union[str, SON[str, Any], None] = None, + ) -> None: + """Create an update document and add it to the list of ops.""" validate_ok_for_update(update) - cmd = SON([('q', selector), ('u', update), - ('multi', multi), ('upsert', upsert)]) - collation = validate_collation_or_none(collation) + cmd: dict[str, Any] = dict( # noqa: C406 + [("q", selector), ("u", update), ("multi", multi), ("upsert", upsert)] + ) if collation is not None: self.uses_collation = True - cmd['collation'] = collation + cmd["collation"] = collation if array_filters is not None: self.uses_array_filters = True - cmd['arrayFilters'] = array_filters + cmd["arrayFilters"] = array_filters + if hint is not None: + self.uses_hint_update = True + cmd["hint"] = hint if multi: # A bulk_write containing an update_many is not retryable. self.is_retryable = False self.ops.append((_UPDATE, cmd)) - def add_replace(self, selector, replacement, upsert=False, - collation=None): - """Create a replace document and add it to the list of ops. - """ + def add_replace( + self, + selector: Mapping[str, Any], + replacement: Mapping[str, Any], + upsert: bool = False, + collation: Optional[Mapping[str, Any]] = None, + hint: Union[str, SON[str, Any], None] = None, + ) -> None: + """Create a replace document and add it to the list of ops.""" validate_ok_for_replace(replacement) - cmd = SON([('q', selector), ('u', replacement), - ('multi', False), ('upsert', upsert)]) - collation = validate_collation_or_none(collation) + cmd = SON([("q", selector), ("u", replacement), ("multi", False), ("upsert", upsert)]) if collation is not None: self.uses_collation = True - cmd['collation'] = collation + cmd["collation"] = collation + if hint is not None: + self.uses_hint_update = True + cmd["hint"] = hint self.ops.append((_UPDATE, cmd)) - def add_delete(self, selector, limit, collation=None): - """Create a delete document and add it to the list of ops. - """ - cmd = SON([('q', selector), ('limit', limit)]) - collation = validate_collation_or_none(collation) + def add_delete( + self, + selector: Mapping[str, Any], + limit: int, + collation: Optional[Mapping[str, Any]] = None, + hint: Union[str, SON[str, Any], None] = None, + ) -> None: + """Create a delete document and add it to the list of ops.""" + cmd = SON([("q", selector), ("limit", limit)]) if collation is not None: self.uses_collation = True - cmd['collation'] = collation + cmd["collation"] = collation + if hint is not None: + self.uses_hint_delete = True + cmd["hint"] = hint if limit == _DELETE_ALL: # A bulk_write containing a delete_many is not retryable. self.is_retryable = False self.ops.append((_DELETE, cmd)) - def gen_ordered(self): + def gen_ordered(self) -> Iterator[Optional[_Run]]: """Generate batches of operations, batched by type of operation, in the order **provided**. """ @@ -238,7 +291,7 @@ def gen_ordered(self): run.add(idx, operation) yield run - def gen_unordered(self): + def gen_unordered(self) -> Iterator[_Run]: """Generate batches of operations, batched by type of operation, in arbitrary order. """ @@ -250,78 +303,116 @@ def gen_unordered(self): if run.ops: yield run - def _execute_command(self, generator, write_concern, session, - sock_info, op_id, retryable, full_result): - if sock_info.max_wire_version < 5 and self.uses_collation: - raise ConfigurationError( - 'Must be connected to MongoDB 3.4+ to use a collation.') - if sock_info.max_wire_version < 6 and self.uses_array_filters: - raise ConfigurationError( - 'Must be connected to MongoDB 3.6+ to use arrayFilters.') - + def _execute_command( + self, + generator: Iterator[Any], + write_concern: WriteConcern, + session: Optional[ClientSession], + conn: Connection, + op_id: int, + retryable: bool, + full_result: MutableMapping[str, Any], + final_write_concern: Optional[WriteConcern] = None, + ) -> None: db_name = self.collection.database.name client = self.collection.database.client listeners = client._event_listeners if not self.current_run: self.current_run = next(generator) + self.next_run = None run = self.current_run - # sock_info.command validates the session, but we use - # sock_info.write_command. - sock_info.validate_session(client, session) + # Connection.command validates the session, but we use + # Connection.write_command + conn.validate_session(client, session) + last_run = False + while run: - cmd = SON([(_COMMANDS[run.op_type], self.collection.name), - ('ordered', self.ordered)]) - if not write_concern.is_server_default: - cmd['writeConcern'] = write_concern.document - if self.bypass_doc_val and sock_info.max_wire_version >= 4: - cmd['bypassDocumentValidation'] = True + if not self.retrying: + self.next_run = next(generator, None) + if self.next_run is None: + last_run = True + + cmd_name = _COMMANDS[run.op_type] bwc = self.bulk_ctx_class( - db_name, cmd, sock_info, op_id, listeners, session, - run.op_type, self.collection.codec_options) + db_name, + cmd_name, + conn, + op_id, + listeners, + session, + run.op_type, + self.collection.codec_options, + ) while run.idx_offset < len(run.ops): + # If this is the last possible operation, use the + # final write concern. + if last_run and (len(run.ops) - run.idx_offset) == 1: + write_concern = final_write_concern or write_concern + + cmd = SON([(cmd_name, self.collection.name), ("ordered", self.ordered)]) + if self.comment: + cmd["comment"] = self.comment + _csot.apply_write_concern(cmd, write_concern) + if self.bypass_doc_val: + cmd["bypassDocumentValidation"] = True + if self.let is not None and run.op_type in (_DELETE, _UPDATE): + cmd["let"] = self.let if session: # Start a new retryable write unless one was already # started for this command. if retryable and not self.started_retryable_write: session._start_retryable_write() self.started_retryable_write = True - session._apply_to(cmd, retryable, ReadPreference.PRIMARY) - sock_info.send_cluster_time(cmd, session, client) + session._apply_to(cmd, retryable, ReadPreference.PRIMARY, conn) + conn.send_cluster_time(cmd, session, client) + conn.add_server_api(cmd) + # CSOT: apply timeout before encoding the command. + conn.apply_timeout(client, cmd) ops = islice(run.ops, run.idx_offset, None) + # Run as many ops as possible in one command. - result, to_send = bwc.execute(ops, client) - - # Retryable writeConcernErrors halt the execution of this run. - wce = result.get('writeConcernError', {}) - if wce.get('code', 0) in _RETRYABLE_ERROR_CODES: - # Synthesize the full bulk result without modifying the - # current one because this write operation may be retried. - full = copy.deepcopy(full_result) - _merge_command(run, full, run.idx_offset, result) - _raise_bulk_write_error(full) - - _merge_command(run, full_result, run.idx_offset, result) - # We're no longer in a retry once a command succeeds. - self.retrying = False - self.started_retryable_write = False - - if self.ordered and "writeErrors" in result: - break + if write_concern.acknowledged: + result, to_send = bwc.execute(cmd, ops, client) + + # Retryable writeConcernErrors halt the execution of this run. + wce = result.get("writeConcernError", {}) + if wce.get("code", 0) in _RETRYABLE_ERROR_CODES: + # Synthesize the full bulk result without modifying the + # current one because this write operation may be retried. + full = copy.deepcopy(full_result) + _merge_command(run, full, run.idx_offset, result) + _raise_bulk_write_error(full) + + _merge_command(run, full_result, run.idx_offset, result) + + # We're no longer in a retry once a command succeeds. + self.retrying = False + self.started_retryable_write = False + + if self.ordered and "writeErrors" in result: + break + else: + to_send = bwc.execute_unack(cmd, ops, client) + run.idx_offset += len(to_send) # We're supposed to continue if errors are # at the write concern level (e.g. wtimeout) - if self.ordered and full_result['writeErrors']: + if self.ordered and full_result["writeErrors"]: break # Reset our state - self.current_run = run = next(generator, None) - - def execute_command(self, generator, write_concern, session): - """Execute using write commands. - """ + self.current_run = run = self.next_run + + def execute_command( + self, + generator: Iterator[Any], + write_concern: WriteConcern, + session: Optional[ClientSession], + ) -> dict[str, Any]: + """Execute using write commands.""" # nModified is only reported for write commands, not legacy ops. full_result = { "writeErrors": [], @@ -335,41 +426,28 @@ def execute_command(self, generator, write_concern, session): } op_id = _randint() - def retryable_bulk(session, sock_info, retryable): + def retryable_bulk( + session: Optional[ClientSession], conn: Connection, retryable: bool + ) -> None: self._execute_command( - generator, write_concern, session, sock_info, op_id, - retryable, full_result) + generator, + write_concern, + session, + conn, + op_id, + retryable, + full_result, + ) client = self.collection.database.client - with client._tmp_session(session) as s: - client._retry_with_session( - self.is_retryable, retryable_bulk, s, self) + client._retryable_write(self.is_retryable, retryable_bulk, session, bulk=self) if full_result["writeErrors"] or full_result["writeConcernErrors"]: _raise_bulk_write_error(full_result) return full_result - def execute_insert_no_results(self, sock_info, run, op_id, acknowledged): - """Execute insert, returning no results. - """ - command = SON([('insert', self.collection.name), - ('ordered', self.ordered)]) - concern = {'w': int(self.ordered)} - command['writeConcern'] = concern - if self.bypass_doc_val and sock_info.max_wire_version >= 4: - command['bypassDocumentValidation'] = True - db = self.collection.database - bwc = _BulkWriteContext( - db.name, command, sock_info, op_id, db.client._event_listeners, - None, _INSERT, self.collection.codec_options) - # Legacy batched OP_INSERT. - _do_batched_insert( - self.collection.full_name, run.ops, True, acknowledged, concern, - not self.ordered, self.collection.codec_options, bwc) - - def execute_op_msg_no_results(self, sock_info, generator): - """Execute write commands with OP_MSG and w=0 writeConcern, unordered. - """ + def execute_op_msg_no_results(self, conn: Connection, generator: Iterator[Any]) -> None: + """Execute write commands with OP_MSG and w=0 writeConcern, unordered.""" db_name = self.collection.database.name client = self.collection.database.client listeners = client._event_listeners @@ -380,23 +458,40 @@ def execute_op_msg_no_results(self, sock_info, generator): run = self.current_run while run: - cmd = SON([(_COMMANDS[run.op_type], self.collection.name), - ('ordered', False), - ('writeConcern', {'w': 0})]) + cmd_name = _COMMANDS[run.op_type] bwc = self.bulk_ctx_class( - db_name, cmd, sock_info, op_id, listeners, None, - run.op_type, self.collection.codec_options) + db_name, + cmd_name, + conn, + op_id, + listeners, + None, + run.op_type, + self.collection.codec_options, + ) while run.idx_offset < len(run.ops): + cmd = SON( + [ + (cmd_name, self.collection.name), + ("ordered", False), + ("writeConcern", {"w": 0}), + ] + ) + conn.add_server_api(cmd) ops = islice(run.ops, run.idx_offset, None) # Run as many ops as possible. - to_send = bwc.execute_unack(ops, client) + to_send = bwc.execute_unack(cmd, ops, client) run.idx_offset += len(to_send) self.current_run = run = next(generator, None) - def execute_command_no_results(self, sock_info, generator): - """Execute write commands with OP_MSG and w=0 WriteConcern, ordered. - """ + def execute_command_no_results( + self, + conn: Connection, + generator: Iterator[Any], + write_concern: WriteConcern, + ) -> None: + """Execute write commands with OP_MSG and w=0 WriteConcern, ordered.""" full_result = { "writeErrors": [], "writeConcernErrors": [], @@ -410,90 +505,59 @@ def execute_command_no_results(self, sock_info, generator): # Ordered bulk writes have to be acknowledged so that we stop # processing at the first error, even when the application # specified unacknowledged writeConcern. - write_concern = WriteConcern() + initial_write_concern = WriteConcern() op_id = _randint() try: self._execute_command( - generator, write_concern, None, - sock_info, op_id, False, full_result) + generator, + initial_write_concern, + None, + conn, + op_id, + False, + full_result, + write_concern, + ) except OperationFailure: pass - def execute_no_results(self, sock_info, generator): - """Execute all operations, returning no results (w=0). - """ + def execute_no_results( + self, + conn: Connection, + generator: Iterator[Any], + write_concern: WriteConcern, + ) -> None: + """Execute all operations, returning no results (w=0).""" if self.uses_collation: - raise ConfigurationError( - 'Collation is unsupported for unacknowledged writes.') + raise ConfigurationError("Collation is unsupported for unacknowledged writes.") if self.uses_array_filters: + raise ConfigurationError("arrayFilters is unsupported for unacknowledged writes.") + # Guard against unsupported unacknowledged writes. + unack = write_concern and not write_concern.acknowledged + if unack and self.uses_hint_delete and conn.max_wire_version < 9: + raise ConfigurationError( + "Must be connected to MongoDB 4.4+ to use hint on unacknowledged delete commands." + ) + if unack and self.uses_hint_update and conn.max_wire_version < 8: raise ConfigurationError( - 'arrayFilters is unsupported for unacknowledged writes.') + "Must be connected to MongoDB 4.2+ to use hint on unacknowledged update commands." + ) # Cannot have both unacknowledged writes and bypass document validation. - if self.bypass_doc_val and sock_info.max_wire_version >= 4: - raise OperationFailure("Cannot set bypass_document_validation with" - " unacknowledged write concern") - - # OP_MSG - if sock_info.max_wire_version > 5: - if self.ordered: - return self.execute_command_no_results(sock_info, generator) - return self.execute_op_msg_no_results(sock_info, generator) - - coll = self.collection - # If ordered is True we have to send GLE or use write - # commands so we can abort on the first error. - write_concern = WriteConcern(w=int(self.ordered)) - op_id = _randint() + if self.bypass_doc_val: + raise OperationFailure( + "Cannot set bypass_document_validation with unacknowledged write concern" + ) - next_run = next(generator) - while next_run: - # An ordered bulk write needs to send acknowledged writes to short - # circuit the next run. However, the final message on the final - # run can be unacknowledged. - run = next_run - next_run = next(generator, None) - needs_ack = self.ordered and next_run is not None - try: - if run.op_type == _INSERT: - self.execute_insert_no_results( - sock_info, run, op_id, needs_ack) - elif run.op_type == _UPDATE: - for operation in run.ops: - doc = operation['u'] - check_keys = True - if doc and next(iter(doc)).startswith('$'): - check_keys = False - coll._update( - sock_info, - operation['q'], - doc, - operation['upsert'], - check_keys, - operation['multi'], - write_concern=write_concern, - op_id=op_id, - ordered=self.ordered, - bypass_doc_val=self.bypass_doc_val) - else: - for operation in run.ops: - coll._delete(sock_info, - operation['q'], - not operation['limit'], - write_concern, - op_id, - self.ordered) - except OperationFailure: - if self.ordered: - break - - def execute(self, write_concern, session): - """Execute operations. - """ + if self.ordered: + return self.execute_command_no_results(conn, generator, write_concern) + return self.execute_op_msg_no_results(conn, generator) + + def execute(self, write_concern: WriteConcern, session: Optional[ClientSession]) -> Any: + """Execute operations.""" if not self.ops: - raise InvalidOperation('No operations to execute') + raise InvalidOperation("No operations to execute") if self.executed: - raise InvalidOperation('Bulk operations can ' - 'only be executed once.') + raise InvalidOperation("Bulk operations can only be executed once.") self.executed = True write_concern = write_concern or self.collection.write_concern session = _validate_session_write_concern(session, write_concern) @@ -505,187 +569,8 @@ def execute(self, write_concern, session): client = self.collection.database.client if not write_concern.acknowledged: - with client._socket_for_writes(session) as sock_info: - self.execute_no_results(sock_info, generator) + with client._conn_for_writes(session) as connection: + self.execute_no_results(connection, generator, write_concern) + return None else: return self.execute_command(generator, write_concern, session) - - -class BulkUpsertOperation(object): - """An interface for adding upsert operations. - """ - - __slots__ = ('__selector', '__bulk', '__collation') - - def __init__(self, selector, bulk, collation): - self.__selector = selector - self.__bulk = bulk - self.__collation = collation - - def update_one(self, update): - """Update one document matching the selector. - - :Parameters: - - `update` (dict): the update operations to apply - """ - self.__bulk.add_update(self.__selector, - update, multi=False, upsert=True, - collation=self.__collation) - - def update(self, update): - """Update all documents matching the selector. - - :Parameters: - - `update` (dict): the update operations to apply - """ - self.__bulk.add_update(self.__selector, - update, multi=True, upsert=True, - collation=self.__collation) - - def replace_one(self, replacement): - """Replace one entire document matching the selector criteria. - - :Parameters: - - `replacement` (dict): the replacement document - """ - self.__bulk.add_replace(self.__selector, replacement, upsert=True, - collation=self.__collation) - - -class BulkWriteOperation(object): - """An interface for adding update or remove operations. - """ - - __slots__ = ('__selector', '__bulk', '__collation') - - def __init__(self, selector, bulk, collation): - self.__selector = selector - self.__bulk = bulk - self.__collation = collation - - def update_one(self, update): - """Update one document matching the selector criteria. - - :Parameters: - - `update` (dict): the update operations to apply - """ - self.__bulk.add_update(self.__selector, update, multi=False, - collation=self.__collation) - - def update(self, update): - """Update all documents matching the selector criteria. - - :Parameters: - - `update` (dict): the update operations to apply - """ - self.__bulk.add_update(self.__selector, update, multi=True, - collation=self.__collation) - - def replace_one(self, replacement): - """Replace one entire document matching the selector criteria. - - :Parameters: - - `replacement` (dict): the replacement document - """ - self.__bulk.add_replace(self.__selector, replacement, - collation=self.__collation) - - def remove_one(self): - """Remove a single document matching the selector criteria. - """ - self.__bulk.add_delete(self.__selector, _DELETE_ONE, - collation=self.__collation) - - def remove(self): - """Remove all documents matching the selector criteria. - """ - self.__bulk.add_delete(self.__selector, _DELETE_ALL, - collation=self.__collation) - - def upsert(self): - """Specify that all chained update operations should be - upserts. - - :Returns: - - A :class:`BulkUpsertOperation` instance, used to add - update operations to this bulk operation. - """ - return BulkUpsertOperation(self.__selector, self.__bulk, - self.__collation) - - -class BulkOperationBuilder(object): - """**DEPRECATED**: An interface for executing a batch of write operations. - """ - - __slots__ = '__bulk' - - def __init__(self, collection, ordered=True, - bypass_document_validation=False): - """**DEPRECATED**: Initialize a new BulkOperationBuilder instance. - - :Parameters: - - `collection`: A :class:`~pymongo.collection.Collection` instance. - - `ordered` (optional): If ``True`` all operations will be executed - serially, in the order provided, and the entire execution will - abort on the first error. If ``False`` operations will be executed - in arbitrary order (possibly in parallel on the server), reporting - any errors that occurred after attempting all operations. Defaults - to ``True``. - - `bypass_document_validation`: (optional) If ``True``, allows the - write to opt-out of document level validation. Default is - ``False``. - - .. note:: `bypass_document_validation` requires server version - **>= 3.2** - - .. versionchanged:: 3.5 - Deprecated. Use :meth:`~pymongo.collection.Collection.bulk_write` - instead. - - .. versionchanged:: 3.2 - Added bypass_document_validation support - """ - self.__bulk = _Bulk(collection, ordered, bypass_document_validation) - - def find(self, selector, collation=None): - """Specify selection criteria for bulk operations. - - :Parameters: - - `selector` (dict): the selection criteria for update - and remove operations. - - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only - supported on MongoDB 3.4 and above. - - :Returns: - - A :class:`BulkWriteOperation` instance, used to add - update and remove operations to this bulk operation. - - .. versionchanged:: 3.4 - Added the `collation` option. - - """ - validate_is_mapping("selector", selector) - return BulkWriteOperation(selector, self.__bulk, collation) - - def insert(self, document): - """Insert a single document. - - :Parameters: - - `document` (dict): the document to insert - - .. seealso:: :ref:`writes-and-ids` - """ - self.__bulk.add_insert(document) - - def execute(self, write_concern=None): - """Execute all provided operations. - - :Parameters: - - write_concern (optional): the write concern for this bulk - execution. - """ - if write_concern is not None: - write_concern = WriteConcern(**write_concern) - return self.__bulk.execute(write_concern, session=None) diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index 00b56e4869..75cd169790 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -13,70 +13,129 @@ # permissions and limitations under the License. """Watch changes on a collection, a database, or the entire cluster.""" +from __future__ import annotations import copy +from typing import TYPE_CHECKING, Any, Generic, Mapping, Optional, Type, Union -from bson import _bson_to_dict +from bson import CodecOptions, _bson_to_dict from bson.raw_bson import RawBSONDocument - -from pymongo import common -from pymongo.aggregation import (_CollectionAggregationCommand, - _DatabaseAggregationCommand) +from bson.timestamp import Timestamp +from pymongo import _csot, common +from pymongo.aggregation import ( + _AggregationCommand, + _CollectionAggregationCommand, + _DatabaseAggregationCommand, +) from pymongo.collation import validate_collation_or_none from pymongo.command_cursor import CommandCursor -from pymongo.errors import (ConnectionFailure, - InvalidOperation, - OperationFailure, - PyMongoError) - +from pymongo.errors import ( + ConnectionFailure, + CursorNotFound, + InvalidOperation, + OperationFailure, + PyMongoError, +) +from pymongo.typings import _CollationIn, _DocumentType, _Pipeline # The change streams spec considers the following server errors from the # getMore command non-resumable. All other getMore errors are resumable. -_NON_RESUMABLE_GETMORE_ERRORS = frozenset([ - 11601, # Interrupted - 136, # CappedPositionLost - 237, # CursorKilled - None, # No error code was returned. -]) - - -class ChangeStream(object): +_RESUMABLE_GETMORE_ERRORS = frozenset( + [ + 6, # HostUnreachable + 7, # HostNotFound + 89, # NetworkTimeout + 91, # ShutdownInProgress + 189, # PrimarySteppedDown + 262, # ExceededTimeLimit + 9001, # SocketException + 10107, # NotWritablePrimary + 11600, # InterruptedAtShutdown + 11602, # InterruptedDueToReplStateChange + 13435, # NotPrimaryNoSecondaryOk + 13436, # NotPrimaryOrSecondary + 63, # StaleShardVersion + 150, # StaleEpoch + 13388, # StaleConfig + 234, # RetryChangeStream + 133, # FailedToSatisfyReadPreference + ] +) + + +if TYPE_CHECKING: + from pymongo.client_session import ClientSession + from pymongo.collection import Collection + from pymongo.database import Database + from pymongo.mongo_client import MongoClient + from pymongo.pool import Connection + + +def _resumable(exc: PyMongoError) -> bool: + """Return True if given a resumable change stream error.""" + if isinstance(exc, (ConnectionFailure, CursorNotFound)): + return True + if isinstance(exc, OperationFailure): + if exc._max_wire_version is None: + return False + return ( + exc._max_wire_version >= 9 and exc.has_error_label("ResumableChangeStreamError") + ) or (exc._max_wire_version < 9 and exc.code in _RESUMABLE_GETMORE_ERRORS) + return False + + +class ChangeStream(Generic[_DocumentType]): """The internal abstract base class for change stream cursors. - Should not be called directly by application developers. Use + Should not be called directly by application developers. Use :meth:`pymongo.collection.Collection.watch`, :meth:`pymongo.database.Database.watch`, or :meth:`pymongo.mongo_client.MongoClient.watch` instead. .. versionadded:: 3.6 - .. mongodoc:: changeStreams + .. seealso:: The MongoDB documentation on `changeStreams `_. """ - def __init__(self, target, pipeline, full_document, resume_after, - max_await_time_ms, batch_size, collation, - start_at_operation_time, session, start_after): + + def __init__( + self, + target: Union[ + MongoClient[_DocumentType], Database[_DocumentType], Collection[_DocumentType] + ], + pipeline: Optional[_Pipeline], + full_document: Optional[str], + resume_after: Optional[Mapping[str, Any]], + max_await_time_ms: Optional[int], + batch_size: Optional[int], + collation: Optional[_CollationIn], + start_at_operation_time: Optional[Timestamp], + session: Optional[ClientSession], + start_after: Optional[Mapping[str, Any]], + comment: Optional[Any] = None, + full_document_before_change: Optional[str] = None, + show_expanded_events: Optional[bool] = None, + ) -> None: if pipeline is None: pipeline = [] - elif not isinstance(pipeline, list): - raise TypeError("pipeline must be a list") - - common.validate_string_or_none('full_document', full_document) + pipeline = common.validate_list("pipeline", pipeline) + common.validate_string_or_none("full_document", full_document) validate_collation_or_none(collation) common.validate_non_negative_integer_or_none("batchSize", batch_size) self._decode_custom = False - self._orig_codec_options = target.codec_options + self._orig_codec_options: CodecOptions[_DocumentType] = target.codec_options if target.codec_options.type_registry._decoder_map: self._decode_custom = True # Keep the type registry so that we support encoding custom types # in the pipeline. - self._target = target.with_options( - codec_options=target.codec_options.with_options( - document_class=RawBSONDocument)) + self._target = target.with_options( # type: ignore + codec_options=target.codec_options.with_options(document_class=RawBSONDocument) + ) else: self._target = target self._pipeline = copy.deepcopy(pipeline) self._full_document = full_document + self._full_document_before_change = full_document_before_change self._uses_start_after = start_after is not None self._uses_resume_after = resume_after is not None self._resume_token = copy.deepcopy(start_after or resume_after) @@ -85,39 +144,50 @@ def __init__(self, target, pipeline, full_document, resume_after, self._collation = collation self._start_at_operation_time = start_at_operation_time self._session = session - + self._comment = comment + self._closed = False + self._timeout = self._target._timeout + self._show_expanded_events = show_expanded_events # Initialize cursor. self._cursor = self._create_cursor() @property - def _aggregation_command_class(self): + def _aggregation_command_class(self) -> Type[_AggregationCommand]: """The aggregation command class to be used.""" raise NotImplementedError @property - def _client(self): + def _client(self) -> MongoClient: """The client against which the aggregation commands for - this ChangeStream will be run. """ + this ChangeStream will be run. + """ raise NotImplementedError - def _change_stream_options(self): + def _change_stream_options(self) -> dict[str, Any]: """Return the options dict for the $changeStream pipeline stage.""" - options = {} + options: dict[str, Any] = {} if self._full_document is not None: - options['fullDocument'] = self._full_document + options["fullDocument"] = self._full_document + + if self._full_document_before_change is not None: + options["fullDocumentBeforeChange"] = self._full_document_before_change resume_token = self.resume_token if resume_token is not None: if self._uses_start_after: - options['startAfter'] = resume_token - if self._uses_resume_after: - options['resumeAfter'] = resume_token + options["startAfter"] = resume_token + else: + options["resumeAfter"] = resume_token if self._start_at_operation_time is not None: - options['startAtOperationTime'] = self._start_at_operation_time + options["startAtOperationTime"] = self._start_at_operation_time + + if self._show_expanded_events: + options["showExpandedEvents"] = self._show_expanded_events + return options - def _command_options(self): + def _command_options(self) -> dict[str, Any]: """Return the options dict for the aggregation command.""" options = {} if self._max_await_time_ms is not None: @@ -126,47 +196,62 @@ def _command_options(self): options["batchSize"] = self._batch_size return options - def _aggregation_pipeline(self): + def _aggregation_pipeline(self) -> list[dict[str, Any]]: """Return the full aggregation pipeline for this ChangeStream.""" options = self._change_stream_options() - full_pipeline = [{'$changeStream': options}] + full_pipeline: list = [{"$changeStream": options}] full_pipeline.extend(self._pipeline) return full_pipeline - def _process_result(self, result, session, server, sock_info, slave_ok): - """Callback that caches the startAtOperationTime from a changeStream - aggregate command response containing an empty batch of change - documents. + def _process_result(self, result: Mapping[str, Any], conn: Connection) -> None: + """Callback that caches the postBatchResumeToken or + startAtOperationTime from a changeStream aggregate command response + containing an empty batch of change documents. This is implemented as a callback because we need access to the wire version in order to determine whether to cache this value. """ - if not result['cursor']['firstBatch']: - if (self._start_at_operation_time is None and - self.resume_token is None and - sock_info.max_wire_version >= 7): - self._start_at_operation_time = result["operationTime"] - - def _run_aggregation_cmd(self, session, explicit_session): + if not result["cursor"]["firstBatch"]: + if "postBatchResumeToken" in result["cursor"]: + self._resume_token = result["cursor"]["postBatchResumeToken"] + elif ( + self._start_at_operation_time is None + and self._uses_resume_after is False + and self._uses_start_after is False + and conn.max_wire_version >= 7 + ): + self._start_at_operation_time = result.get("operationTime") + # PYTHON-2181: informative error on missing operationTime. + if self._start_at_operation_time is None: + raise OperationFailure( + "Expected field 'operationTime' missing from command " + f"response : {result!r}" + ) + + def _run_aggregation_cmd( + self, session: Optional[ClientSession], explicit_session: bool + ) -> CommandCursor: """Run the full aggregation pipeline for this ChangeStream and return the corresponding CommandCursor. """ cmd = self._aggregation_command_class( - self._target, CommandCursor, self._aggregation_pipeline(), - self._command_options(), explicit_session, - result_processor=self._process_result) - + self._target, + CommandCursor, + self._aggregation_pipeline(), + self._command_options(), + explicit_session, + result_processor=self._process_result, + comment=self._comment, + ) return self._client._retryable_read( - cmd.get_cursor, self._target._read_preference_for(session), - session) + cmd.get_cursor, self._target._read_preference_for(session), session + ) - def _create_cursor(self): + def _create_cursor(self) -> CommandCursor: with self._client._tmp_session(self._session, close=False) as s: - return self._run_aggregation_cmd( - session=s, - explicit_session=self._session is not None) + return self._run_aggregation_cmd(session=s, explicit_session=self._session is not None) - def _resume(self): + def _resume(self) -> None: """Reestablish this change stream after a resumable error.""" try: self._cursor.close() @@ -174,15 +259,16 @@ def _resume(self): pass self._cursor = self._create_cursor() - def close(self): + def close(self) -> None: """Close this ChangeStream.""" + self._closed = True self._cursor.close() - def __iter__(self): + def __iter__(self) -> ChangeStream[_DocumentType]: return self @property - def resume_token(self): + def resume_token(self) -> Optional[Mapping[str, Any]]: """The cached resume token that will be used to resume after the most recently returned change. @@ -190,7 +276,8 @@ def resume_token(self): """ return copy.deepcopy(self._resume_token) - def next(self): + @_csot.apply + def next(self) -> _DocumentType: """Advance the cursor. This method blocks until the next change document is returned or an @@ -232,7 +319,7 @@ def next(self): __next__ = next @property - def alive(self): + def alive(self) -> bool: """Does this cursor have the potential to return more data? .. note:: Even if :attr:`alive` is ``True``, :meth:`next` can raise @@ -240,9 +327,10 @@ def alive(self): .. versionadded:: 3.8 """ - return self._cursor.alive + return not self._closed - def try_next(self): + @_csot.apply + def try_next(self) -> Optional[_DocumentType]: """Advance the cursor without blocking indefinitely. This method returns the next change document without waiting @@ -274,19 +362,31 @@ def try_next(self): .. versionadded:: 3.8 """ + if not self._closed and not self._cursor.alive: + self._resume() + # Attempt to get the next change with at most one getMore and at most # one resume attempt. try: - change = self._cursor._try_next(True) - except ConnectionFailure: - self._resume() - change = self._cursor._try_next(False) - except OperationFailure as exc: - if (exc.code in _NON_RESUMABLE_GETMORE_ERRORS or - exc.has_error_label("NonResumableChangeStreamError")): - raise - self._resume() - change = self._cursor._try_next(False) + try: + change = self._cursor._try_next(True) + except PyMongoError as exc: + if not _resumable(exc): + raise + self._resume() + change = self._cursor._try_next(False) + except PyMongoError as exc: + # Close the stream after a fatal error. + if not _resumable(exc) and not exc.timeout: + self.close() + raise + except Exception: + self.close() + raise + + # Check if the cursor was invalidated. + if not self._cursor.alive: + self._closed = True # If no changes are available. if change is None: @@ -301,17 +401,16 @@ def try_next(self): # Else, changes are available. try: - resume_token = change['_id'] + resume_token = change["_id"] except KeyError: self.close() raise InvalidOperation( - "Cannot provide resume functionality when the resume " - "token is missing.") + "Cannot provide resume functionality when the resume token is missing." + ) from None # If this is the last change document from the current batch, cache the # postBatchResumeToken. - if (not self._cursor._has_next() and - self._cursor._post_batch_resume_token): + if not self._cursor._has_next() and self._cursor._post_batch_resume_token: resume_token = self._cursor._post_batch_resume_token # Hereafter, don't use startAfter; instead use resumeAfter. @@ -326,14 +425,14 @@ def try_next(self): return _bson_to_dict(change.raw, self._orig_codec_options) return change - def __enter__(self): + def __enter__(self) -> ChangeStream[_DocumentType]: return self - def __exit__(self, exc_type, exc_val, exc_tb): + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: self.close() -class CollectionChangeStream(ChangeStream): +class CollectionChangeStream(ChangeStream[_DocumentType]): """A change stream that watches changes on a single collection. Should not be called directly by application developers. Use @@ -341,16 +440,19 @@ class CollectionChangeStream(ChangeStream): .. versionadded:: 3.7 """ + + _target: Collection[_DocumentType] + @property - def _aggregation_command_class(self): + def _aggregation_command_class(self) -> Type[_CollectionAggregationCommand]: return _CollectionAggregationCommand @property - def _client(self): + def _client(self) -> MongoClient[_DocumentType]: return self._target.database.client -class DatabaseChangeStream(ChangeStream): +class DatabaseChangeStream(ChangeStream[_DocumentType]): """A change stream that watches changes on all collections in a database. Should not be called directly by application developers. Use @@ -358,16 +460,19 @@ class DatabaseChangeStream(ChangeStream): .. versionadded:: 3.7 """ + + _target: Database[_DocumentType] + @property - def _aggregation_command_class(self): + def _aggregation_command_class(self) -> Type[_DatabaseAggregationCommand]: return _DatabaseAggregationCommand @property - def _client(self): + def _client(self) -> MongoClient[_DocumentType]: return self._target.client -class ClusterChangeStream(DatabaseChangeStream): +class ClusterChangeStream(DatabaseChangeStream[_DocumentType]): """A change stream that watches changes on all collections in the cluster. Should not be called directly by application developers. Use @@ -375,7 +480,8 @@ class ClusterChangeStream(DatabaseChangeStream): .. versionadded:: 3.7 """ - def _change_stream_options(self): - options = super(ClusterChangeStream, self)._change_stream_options() + + def _change_stream_options(self) -> dict[str, Any]: + options = super()._change_stream_options() options["allChangesForCluster"] = True return options diff --git a/pymongo/client_options.py b/pymongo/client_options.py index 7bb68a9105..d5f9cfcccd 100644 --- a/pymongo/client_options.py +++ b/pymongo/client_options.py @@ -13,237 +13,319 @@ # permissions and limitations under the License. """Tools to parse mongo client options.""" +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence, cast from bson.codec_options import _parse_codec_options -from pymongo.auth import _build_credentials_tuple -from pymongo.common import validate_boolean from pymongo import common +from pymongo.auth import MongoCredential, _build_credentials_tuple +from pymongo.common import validate_boolean from pymongo.compression_support import CompressionSettings from pymongo.errors import ConfigurationError -from pymongo.monitoring import _EventListeners +from pymongo.monitoring import _EventListener, _EventListeners from pymongo.pool import PoolOptions from pymongo.read_concern import ReadConcern -from pymongo.read_preferences import (make_read_preference, - read_pref_mode_from_name) +from pymongo.read_preferences import ( + _ServerMode, + make_read_preference, + read_pref_mode_from_name, +) from pymongo.server_selectors import any_server_selector from pymongo.ssl_support import get_ssl_context from pymongo.write_concern import WriteConcern +if TYPE_CHECKING: + from bson.codec_options import CodecOptions + from pymongo.encryption import AutoEncryptionOpts + from pymongo.pyopenssl_context import SSLContext + from pymongo.topology_description import _ServerSelector -def _parse_credentials(username, password, database, options): + +def _parse_credentials( + username: str, password: str, database: Optional[str], options: Mapping[str, Any] +) -> Optional[MongoCredential]: """Parse authentication credentials.""" - mechanism = options.get('authmechanism', 'DEFAULT' if username else None) - source = options.get('authsource') + mechanism = options.get("authmechanism", "DEFAULT" if username else None) + source = options.get("authsource") if username or mechanism: - return _build_credentials_tuple( - mechanism, source, username, password, options, database) + return _build_credentials_tuple(mechanism, source, username, password, options, database) return None -def _parse_read_preference(options): +def _parse_read_preference(options: Mapping[str, Any]) -> _ServerMode: """Parse read preference options.""" - if 'read_preference' in options: - return options['read_preference'] + if "read_preference" in options: + return options["read_preference"] - name = options.get('readpreference', 'primary') + name = options.get("readpreference", "primary") mode = read_pref_mode_from_name(name) - tags = options.get('readpreferencetags') - max_staleness = options.get('maxstalenessseconds', -1) + tags = options.get("readpreferencetags") + max_staleness = options.get("maxstalenessseconds", -1) return make_read_preference(mode, tags, max_staleness) -def _parse_write_concern(options): +def _parse_write_concern(options: Mapping[str, Any]) -> WriteConcern: """Parse write concern options.""" - concern = options.get('w') - wtimeout = options.get('wtimeoutms') - j = options.get('journal') - fsync = options.get('fsync') + concern = options.get("w") + wtimeout = options.get("wtimeoutms") + j = options.get("journal") + fsync = options.get("fsync") return WriteConcern(concern, wtimeout, j, fsync) -def _parse_read_concern(options): +def _parse_read_concern(options: Mapping[str, Any]) -> ReadConcern: """Parse read concern options.""" - concern = options.get('readconcernlevel') + concern = options.get("readconcernlevel") return ReadConcern(concern) -def _parse_ssl_options(options): +def _parse_ssl_options(options: Mapping[str, Any]) -> tuple[Optional[SSLContext], bool]: """Parse ssl options.""" - use_ssl = options.get('ssl') - if use_ssl is not None: - validate_boolean('ssl', use_ssl) - - certfile = options.get('ssl_certfile') - keyfile = options.get('ssl_keyfile') - passphrase = options.get('ssl_pem_passphrase') - ca_certs = options.get('ssl_ca_certs') - cert_reqs = options.get('ssl_cert_reqs') - match_hostname = options.get('ssl_match_hostname', True) - crlfile = options.get('ssl_crlfile') - - ssl_kwarg_keys = [k for k in options - if k.startswith('ssl_') and options[k]] - if use_ssl == False and ssl_kwarg_keys: - raise ConfigurationError("ssl has not been enabled but the " - "following ssl parameters have been set: " - "%s. Please set `ssl=True` or remove." - % ', '.join(ssl_kwarg_keys)) - - if ssl_kwarg_keys and use_ssl is None: - # ssl options imply ssl = True - use_ssl = True - - if use_ssl is True: + use_tls = options.get("tls") + if use_tls is not None: + validate_boolean("tls", use_tls) + + certfile = options.get("tlscertificatekeyfile") + passphrase = options.get("tlscertificatekeyfilepassword") + ca_certs = options.get("tlscafile") + crlfile = options.get("tlscrlfile") + allow_invalid_certificates = options.get("tlsallowinvalidcertificates", False) + allow_invalid_hostnames = options.get("tlsallowinvalidhostnames", False) + disable_ocsp_endpoint_check = options.get("tlsdisableocspendpointcheck", False) + + enabled_tls_opts = [] + for opt in ( + "tlscertificatekeyfile", + "tlscertificatekeyfilepassword", + "tlscafile", + "tlscrlfile", + ): + # Any non-null value of these options implies tls=True. + if opt in options and options[opt]: + enabled_tls_opts.append(opt) + for opt in ( + "tlsallowinvalidcertificates", + "tlsallowinvalidhostnames", + "tlsdisableocspendpointcheck", + ): + # A value of False for these options implies tls=True. + if opt in options and not options[opt]: + enabled_tls_opts.append(opt) + + if enabled_tls_opts: + if use_tls is None: + # Implicitly enable TLS when one of the tls* options is set. + use_tls = True + elif not use_tls: + # Error since tls is explicitly disabled but a tls option is set. + raise ConfigurationError( + "TLS has not been enabled but the " + "following tls parameters have been set: " + "%s. Please set `tls=True` or remove." % ", ".join(enabled_tls_opts) + ) + + if use_tls: ctx = get_ssl_context( certfile, - keyfile, passphrase, ca_certs, - cert_reqs, crlfile, - match_hostname) - return ctx, match_hostname - return None, match_hostname + allow_invalid_certificates, + allow_invalid_hostnames, + disable_ocsp_endpoint_check, + ) + return ctx, allow_invalid_hostnames + return None, allow_invalid_hostnames -def _parse_pool_options(options): +def _parse_pool_options( + username: str, password: str, database: Optional[str], options: Mapping[str, Any] +) -> PoolOptions: """Parse connection pool options.""" - max_pool_size = options.get('maxpoolsize', common.MAX_POOL_SIZE) - min_pool_size = options.get('minpoolsize', common.MIN_POOL_SIZE) - max_idle_time_seconds = options.get( - 'maxidletimems', common.MAX_IDLE_TIME_SEC) + credentials = _parse_credentials(username, password, database, options) + max_pool_size = options.get("maxpoolsize", common.MAX_POOL_SIZE) + min_pool_size = options.get("minpoolsize", common.MIN_POOL_SIZE) + max_idle_time_seconds = options.get("maxidletimems", common.MAX_IDLE_TIME_SEC) if max_pool_size is not None and min_pool_size > max_pool_size: raise ValueError("minPoolSize must be smaller or equal to maxPoolSize") - connect_timeout = options.get('connecttimeoutms', common.CONNECT_TIMEOUT) - socket_keepalive = options.get('socketkeepalive', True) - socket_timeout = options.get('sockettimeoutms') - wait_queue_timeout = options.get( - 'waitqueuetimeoutms', common.WAIT_QUEUE_TIMEOUT) - wait_queue_multiple = options.get('waitqueuemultiple') - event_listeners = options.get('event_listeners') - appname = options.get('appname') - driver = options.get('driver') + connect_timeout = options.get("connecttimeoutms", common.CONNECT_TIMEOUT) + socket_timeout = options.get("sockettimeoutms") + wait_queue_timeout = options.get("waitqueuetimeoutms", common.WAIT_QUEUE_TIMEOUT) + event_listeners = cast(Optional[Sequence[_EventListener]], options.get("event_listeners")) + appname = options.get("appname") + driver = options.get("driver") + server_api = options.get("server_api") compression_settings = CompressionSettings( - options.get('compressors', []), - options.get('zlibcompressionlevel', -1)) - ssl_context, ssl_match_hostname = _parse_ssl_options(options) - return PoolOptions(max_pool_size, - min_pool_size, - max_idle_time_seconds, - connect_timeout, socket_timeout, - wait_queue_timeout, wait_queue_multiple, - ssl_context, ssl_match_hostname, socket_keepalive, - _EventListeners(event_listeners), - appname, - driver, - compression_settings) - - -class ClientOptions(object): - - """ClientOptions""" - - def __init__(self, username, password, database, options): + options.get("compressors", []), options.get("zlibcompressionlevel", -1) + ) + ssl_context, tls_allow_invalid_hostnames = _parse_ssl_options(options) + load_balanced = options.get("loadbalanced") + max_connecting = options.get("maxconnecting", common.MAX_CONNECTING) + return PoolOptions( + max_pool_size, + min_pool_size, + max_idle_time_seconds, + connect_timeout, + socket_timeout, + wait_queue_timeout, + ssl_context, + tls_allow_invalid_hostnames, + _EventListeners(event_listeners), + appname, + driver, + compression_settings, + max_connecting=max_connecting, + server_api=server_api, + load_balanced=load_balanced, + credentials=credentials, + ) + + +class ClientOptions: + """Read only configuration options for a MongoClient. + + Should not be instantiated directly by application developers. Access + a client's options via :attr:`pymongo.mongo_client.MongoClient.options` + instead. + """ + + def __init__( + self, username: str, password: str, database: Optional[str], options: Mapping[str, Any] + ): self.__options = options - self.__codec_options = _parse_codec_options(options) - self.__credentials = _parse_credentials( - username, password, database, options) - self.__local_threshold_ms = options.get( - 'localthresholdms', common.LOCAL_THRESHOLD_MS) + self.__direct_connection = options.get("directconnection") + self.__local_threshold_ms = options.get("localthresholdms", common.LOCAL_THRESHOLD_MS) # self.__server_selection_timeout is in seconds. Must use full name for # common.SERVER_SELECTION_TIMEOUT because it is set directly by tests. self.__server_selection_timeout = options.get( - 'serverselectiontimeoutms', common.SERVER_SELECTION_TIMEOUT) - self.__pool_options = _parse_pool_options(options) + "serverselectiontimeoutms", common.SERVER_SELECTION_TIMEOUT + ) + self.__pool_options = _parse_pool_options(username, password, database, options) self.__read_preference = _parse_read_preference(options) - self.__replica_set_name = options.get('replicaset') + self.__replica_set_name = options.get("replicaset") self.__write_concern = _parse_write_concern(options) self.__read_concern = _parse_read_concern(options) - self.__connect = options.get('connect') - self.__heartbeat_frequency = options.get( - 'heartbeatfrequencyms', common.HEARTBEAT_FREQUENCY) - self.__retry_writes = options.get('retrywrites', common.RETRY_WRITES) - self.__retry_reads = options.get('retryreads', common.RETRY_READS) - self.__server_selector = options.get( - 'server_selector', any_server_selector) - self.__auto_encryption_opts = options.get('auto_encryption_opts') + self.__connect = options.get("connect") + self.__heartbeat_frequency = options.get("heartbeatfrequencyms", common.HEARTBEAT_FREQUENCY) + self.__retry_writes = options.get("retrywrites", common.RETRY_WRITES) + self.__retry_reads = options.get("retryreads", common.RETRY_READS) + self.__server_selector = options.get("server_selector", any_server_selector) + self.__auto_encryption_opts = options.get("auto_encryption_opts") + self.__load_balanced = options.get("loadbalanced") + self.__timeout = options.get("timeoutms") + self.__server_monitoring_mode = options.get( + "servermonitoringmode", common.SERVER_MONITORING_MODE + ) @property - def _options(self): + def _options(self) -> Mapping[str, Any]: """The original options used to create this ClientOptions.""" return self.__options @property - def connect(self): + def connect(self) -> Optional[bool]: """Whether to begin discovering a MongoDB topology automatically.""" return self.__connect @property - def codec_options(self): + def codec_options(self) -> CodecOptions: """A :class:`~bson.codec_options.CodecOptions` instance.""" return self.__codec_options @property - def credentials(self): - """A :class:`~pymongo.auth.MongoCredentials` instance or None.""" - return self.__credentials + def direct_connection(self) -> Optional[bool]: + """Whether to connect to the deployment in 'Single' topology.""" + return self.__direct_connection @property - def local_threshold_ms(self): + def local_threshold_ms(self) -> int: """The local threshold for this instance.""" return self.__local_threshold_ms @property - def server_selection_timeout(self): + def server_selection_timeout(self) -> int: """The server selection timeout for this instance in seconds.""" return self.__server_selection_timeout @property - def server_selector(self): + def server_selector(self) -> _ServerSelector: return self.__server_selector @property - def heartbeat_frequency(self): + def heartbeat_frequency(self) -> int: """The monitoring frequency in seconds.""" return self.__heartbeat_frequency @property - def pool_options(self): + def pool_options(self) -> PoolOptions: """A :class:`~pymongo.pool.PoolOptions` instance.""" return self.__pool_options @property - def read_preference(self): + def read_preference(self) -> _ServerMode: """A read preference instance.""" return self.__read_preference @property - def replica_set_name(self): + def replica_set_name(self) -> Optional[str]: """Replica set name or None.""" return self.__replica_set_name @property - def write_concern(self): + def write_concern(self) -> WriteConcern: """A :class:`~pymongo.write_concern.WriteConcern` instance.""" return self.__write_concern @property - def read_concern(self): + def read_concern(self) -> ReadConcern: """A :class:`~pymongo.read_concern.ReadConcern` instance.""" return self.__read_concern @property - def retry_writes(self): + def timeout(self) -> Optional[float]: + """The configured timeoutMS converted to seconds, or None. + + .. versionadded:: 4.2 + """ + return self.__timeout + + @property + def retry_writes(self) -> bool: """If this instance should retry supported write operations.""" return self.__retry_writes @property - def retry_reads(self): + def retry_reads(self) -> bool: """If this instance should retry supported read operations.""" return self.__retry_reads @property - def auto_encryption_opts(self): + def auto_encryption_opts(self) -> Optional[AutoEncryptionOpts]: """A :class:`~pymongo.encryption.AutoEncryptionOpts` or None.""" return self.__auto_encryption_opts + + @property + def load_balanced(self) -> Optional[bool]: + """True if the client was configured to connect to a load balancer.""" + return self.__load_balanced + + @property + def event_listeners(self) -> list[_EventListeners]: + """The event listeners registered for this client. + + See :mod:`~pymongo.monitoring` for details. + + .. versionadded:: 4.0 + """ + assert self.__pool_options._event_listeners is not None + return self.__pool_options._event_listeners.event_listeners() + + @property + def server_monitoring_mode(self) -> str: + """The configured serverMonitoringMode option. + + .. versionadded:: 4.5 + """ + return self.__server_monitoring_mode diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 0c0e7c436d..0aac770111 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -14,8 +14,6 @@ """Logical sessions for ordering sequential operations. -Requires MongoDB 3.6. - .. versionadded:: 3.6 Causally Consistent Reads @@ -25,25 +23,26 @@ with client.start_session(causal_consistency=True) as session: collection = client.db.collection - collection.update_one({'_id': 1}, {'$set': {'x': 10}}, session=session) - secondary_c = collection.with_options( - read_preference=ReadPreference.SECONDARY) + collection.update_one({"_id": 1}, {"$set": {"x": 10}}, session=session) + secondary_c = collection.with_options(read_preference=ReadPreference.SECONDARY) # A secondary read waits for replication of the write. - secondary_c.find_one({'_id': 1}, session=session) + secondary_c.find_one({"_id": 1}, session=session) If `causal_consistency` is True (the default), read operations that use the session are causally after previous read and write operations. Using a causally consistent session, an application can read its own writes and is guaranteed monotonic reads, even when reading from replica set secondaries. -.. mongodoc:: causal-consistency +.. seealso:: The MongoDB documentation on `causal-consistency `_. .. _transactions-ref: Transactions ============ +.. versionadded:: 3.7 + MongoDB 4.0 adds support for transactions on replica set primaries. A transaction is associated with a :class:`ClientSession`. To start a transaction on a session, use :meth:`ClientSession.start_transaction` in a with-statement. @@ -57,96 +56,178 @@ with client.start_session() as session: with session.start_transaction(): orders.insert_one({"sku": "abc123", "qty": 100}, session=session) - inventory.update_one({"sku": "abc123", "qty": {"$gte": 100}}, - {"$inc": {"qty": -100}}, session=session) + inventory.update_one( + {"sku": "abc123", "qty": {"$gte": 100}}, + {"$inc": {"qty": -100}}, + session=session, + ) Upon normal completion of ``with session.start_transaction()`` block, the transaction automatically calls :meth:`ClientSession.commit_transaction`. If the block exits with an exception, the transaction automatically calls :meth:`ClientSession.abort_transaction`. -For multi-document transactions, you can only specify read/write (CRUD) -operations on existing collections. For example, a multi-document transaction -cannot include a create or drop collection/index operations, including an +In general, multi-document transactions only support read/write (CRUD) +operations on existing collections. However, MongoDB 4.4 adds support for +creating collections and indexes with some limitations, including an insert operation that would result in the creation of a new collection. +For a complete description of all the supported and unsupported operations +see the `MongoDB server's documentation for transactions +`_. A session may only have a single active transaction at a time, multiple transactions on the same session can be executed in sequence. -.. versionadded:: 3.7 - Sharded Transactions ^^^^^^^^^^^^^^^^^^^^ +.. versionadded:: 3.9 + PyMongo 3.9 adds support for transactions on sharded clusters running MongoDB -4.2. Sharded transactions have the same API as replica set transactions. +>=4.2. Sharded transactions have the same API as replica set transactions. When running a transaction against a sharded cluster, the session is pinned to the mongos server selected for the first operation in the transaction. All subsequent operations that are part of the same transaction are routed to the same mongos server. When the transaction is completed, by running either commitTransaction or abortTransaction, the session is unpinned. -.. versionadded:: 3.9 +.. seealso:: The MongoDB documentation on `transactions `_. + +.. _snapshot-reads-ref: + +Snapshot Reads +============== + +.. versionadded:: 3.12 -.. mongodoc:: transactions +MongoDB 5.0 adds support for snapshot reads. Snapshot reads are requested by +passing the ``snapshot`` option to +:meth:`~pymongo.mongo_client.MongoClient.start_session`. +If ``snapshot`` is True, all read operations that use this session read data +from the same snapshot timestamp. The server chooses the latest +majority-committed snapshot timestamp when executing the first read operation +using the session. Subsequent reads on this session read from the same +snapshot timestamp. Snapshot reads are also supported when reading from +replica set secondaries. + +.. code-block:: python + + # Each read using this session reads data from the same point in time. + with client.start_session(snapshot=True) as session: + order = orders.find_one({"sku": "abc123"}, session=session) + inventory = inventory.find_one({"sku": "abc123"}, session=session) + +Snapshot Reads Limitations +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Snapshot reads sessions are incompatible with ``causal_consistency=True``. +Only the following read operations are supported in a snapshot reads session: + +- :meth:`~pymongo.collection.Collection.find` +- :meth:`~pymongo.collection.Collection.find_one` +- :meth:`~pymongo.collection.Collection.aggregate` +- :meth:`~pymongo.collection.Collection.count_documents` +- :meth:`~pymongo.collection.Collection.distinct` (on unsharded collections) Classes ======= """ +from __future__ import annotations + import collections -import os -import sys +import time import uuid +from collections.abc import Mapping as _Mapping +from typing import ( + TYPE_CHECKING, + Any, + Callable, + ContextManager, + Mapping, + MutableMapping, + NoReturn, + Optional, + Type, + TypeVar, +) from bson.binary import Binary from bson.int64 import Int64 -from bson.py3compat import abc, integer_types, reraise_instance from bson.son import SON from bson.timestamp import Timestamp - -from pymongo import monotonic -from pymongo.errors import (ConfigurationError, - ConnectionFailure, - InvalidOperation, - OperationFailure, - PyMongoError, - ServerSelectionTimeoutError, - WTimeoutError) +from pymongo import _csot +from pymongo.cursor import _ConnectionManager +from pymongo.errors import ( + ConfigurationError, + ConnectionFailure, + InvalidOperation, + OperationFailure, + PyMongoError, + WTimeoutError, +) from pymongo.helpers import _RETRYABLE_ERROR_CODES from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference, _ServerMode +from pymongo.server_type import SERVER_TYPE from pymongo.write_concern import WriteConcern +if TYPE_CHECKING: + from types import TracebackType + + from pymongo.pool import Connection + from pymongo.server import Server + from pymongo.typings import ClusterTime, _Address + -class SessionOptions(object): +class SessionOptions: """Options for a new :class:`ClientSession`. :Parameters: - - `causal_consistency` (optional): If True (the default), read - operations are causally ordered within the session. + - `causal_consistency` (optional): If True, read operations are causally + ordered within the session. Defaults to True when the ``snapshot`` + option is ``False``. - `default_transaction_options` (optional): The default TransactionOptions to use for transactions started on this session. + - `snapshot` (optional): If True, then all reads performed using this + session will read from the same snapshot. This option is incompatible + with ``causal_consistency=True``. Defaults to ``False``. + + .. versionchanged:: 3.12 + Added the ``snapshot`` parameter. """ - def __init__(self, - causal_consistency=True, - default_transaction_options=None): + + def __init__( + self, + causal_consistency: Optional[bool] = None, + default_transaction_options: Optional[TransactionOptions] = None, + snapshot: Optional[bool] = False, + ) -> None: + if snapshot: + if causal_consistency: + raise ConfigurationError("snapshot reads do not support causal_consistency=True") + causal_consistency = False + elif causal_consistency is None: + causal_consistency = True self._causal_consistency = causal_consistency if default_transaction_options is not None: if not isinstance(default_transaction_options, TransactionOptions): raise TypeError( "default_transaction_options must be an instance of " - "pymongo.client_session.TransactionOptions, not: %r" % - (default_transaction_options,)) + "pymongo.client_session.TransactionOptions, not: {!r}".format( + default_transaction_options + ) + ) self._default_transaction_options = default_transaction_options + self._snapshot = snapshot @property - def causal_consistency(self): + def causal_consistency(self) -> bool: """Whether causal consistency is configured.""" return self._causal_consistency @property - def default_transaction_options(self): + def default_transaction_options(self) -> Optional[TransactionOptions]: """The default TransactionOptions to use for transactions started on this session. @@ -154,10 +235,18 @@ def default_transaction_options(self): """ return self._default_transaction_options + @property + def snapshot(self) -> Optional[bool]: + """Whether snapshot reads are configured. + + .. versionadded:: 3.12 + """ + return self._snapshot + -class TransactionOptions(object): +class TransactionOptions: """Options for :meth:`ClientSession.start_transaction`. - + :Parameters: - `read_concern` (optional): The :class:`~pymongo.read_concern.ReadConcern` to use for this transaction. @@ -182,54 +271,63 @@ class TransactionOptions(object): .. versionadded:: 3.7 """ - def __init__(self, read_concern=None, write_concern=None, - read_preference=None, max_commit_time_ms=None): + + def __init__( + self, + read_concern: Optional[ReadConcern] = None, + write_concern: Optional[WriteConcern] = None, + read_preference: Optional[_ServerMode] = None, + max_commit_time_ms: Optional[int] = None, + ) -> None: self._read_concern = read_concern self._write_concern = write_concern self._read_preference = read_preference self._max_commit_time_ms = max_commit_time_ms if read_concern is not None: if not isinstance(read_concern, ReadConcern): - raise TypeError("read_concern must be an instance of " - "pymongo.read_concern.ReadConcern, not: %r" % - (read_concern,)) + raise TypeError( + "read_concern must be an instance of " + f"pymongo.read_concern.ReadConcern, not: {read_concern!r}" + ) if write_concern is not None: if not isinstance(write_concern, WriteConcern): - raise TypeError("write_concern must be an instance of " - "pymongo.write_concern.WriteConcern, not: %r" % - (write_concern,)) + raise TypeError( + "write_concern must be an instance of " + f"pymongo.write_concern.WriteConcern, not: {write_concern!r}" + ) if not write_concern.acknowledged: raise ConfigurationError( "transactions do not support unacknowledged write concern" - ": %r" % (write_concern,)) + f": {write_concern!r}" + ) if read_preference is not None: if not isinstance(read_preference, _ServerMode): - raise TypeError("%r is not valid for read_preference. See " - "pymongo.read_preferences for valid " - "options." % (read_preference,)) - if max_commit_time_ms is not None: - if not isinstance(max_commit_time_ms, integer_types): raise TypeError( - "max_commit_time_ms must be an integer or None") + f"{read_preference!r} is not valid for read_preference. See " + "pymongo.read_preferences for valid " + "options." + ) + if max_commit_time_ms is not None: + if not isinstance(max_commit_time_ms, int): + raise TypeError("max_commit_time_ms must be an integer or None") @property - def read_concern(self): + def read_concern(self) -> Optional[ReadConcern]: """This transaction's :class:`~pymongo.read_concern.ReadConcern`.""" return self._read_concern @property - def write_concern(self): + def write_concern(self) -> Optional[WriteConcern]: """This transaction's :class:`~pymongo.write_concern.WriteConcern`.""" return self._write_concern @property - def read_preference(self): - """This transaction's :class:`~pymongo.read_preferences.ReadPreference`. - """ + def read_preference(self) -> Optional[_ServerMode]: + """This transaction's :class:`~pymongo.read_preferences.ReadPreference`.""" return self._read_preference @property - def max_commit_time_ms(self): + def max_commit_time_ms(self) -> Optional[int]: """The maxTimeMS to use when running a commitTransaction command. .. versionadded:: 3.9 @@ -237,7 +335,9 @@ def max_commit_time_ms(self): return self._max_commit_time_ms -def _validate_session_write_concern(session, write_concern): +def _validate_session_write_concern( + session: Optional[ClientSession], write_concern: Optional[WriteConcern] +) -> Optional[ClientSession]: """Validate that an explicit session is not used with an unack'ed write. Returns the session to use for the next operation. @@ -253,21 +353,27 @@ def _validate_session_write_concern(session, write_concern): return None else: raise ConfigurationError( - 'Explicit sessions are incompatible with ' - 'unacknowledged write concern: %r' % ( - write_concern,)) + "Explicit sessions are incompatible with " + f"unacknowledged write concern: {write_concern!r}" + ) return session -class _TransactionContext(object): +class _TransactionContext: """Internal transaction context manager for start_transaction.""" - def __init__(self, session): + + def __init__(self, session: ClientSession): self.__session = session - def __enter__(self): + def __enter__(self) -> _TransactionContext: return self - def __exit__(self, exc_type, exc_val, exc_tb): + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: if self.__session.in_transaction: if exc_val is None: self.__session.commit_transaction() @@ -275,7 +381,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): self.__session.abort_transaction() -class _TxnState(object): +class _TxnState: NONE = 1 STARTING = 2 IN_PROGRESS = 3 @@ -284,42 +390,78 @@ class _TxnState(object): ABORTED = 6 -class _Transaction(object): +class _Transaction: """Internal class to hold transaction information in a ClientSession.""" - def __init__(self, opts): + + def __init__(self, opts: Optional[TransactionOptions], client: MongoClient): self.opts = opts self.state = _TxnState.NONE self.sharded = False - self.pinned_address = None + self.pinned_address: Optional[_Address] = None + self.conn_mgr: Optional[_ConnectionManager] = None self.recovery_token = None + self.attempt = 0 + self.client = client - def active(self): + def active(self) -> bool: return self.state in (_TxnState.STARTING, _TxnState.IN_PROGRESS) - def reset(self): + def starting(self) -> bool: + return self.state == _TxnState.STARTING + + @property + def pinned_conn(self) -> Optional[Connection]: + if self.active() and self.conn_mgr: + return self.conn_mgr.conn + return None + + def pin(self, server: Server, conn: Connection) -> None: + self.sharded = True + self.pinned_address = server.description.address + if server.description.server_type == SERVER_TYPE.LoadBalancer: + conn.pin_txn() + self.conn_mgr = _ConnectionManager(conn, False) + + def unpin(self) -> None: + self.pinned_address = None + if self.conn_mgr: + self.conn_mgr.close() + self.conn_mgr = None + + def reset(self) -> None: + self.unpin() self.state = _TxnState.NONE self.sharded = False - self.pinned_address = None self.recovery_token = None + self.attempt = 0 + def __del__(self) -> None: + if self.conn_mgr: + # Reuse the cursor closing machinery to return the socket to the + # pool soon. + self.client._close_cursor_soon(0, None, self.conn_mgr) + self.conn_mgr = None -def _reraise_with_unknown_commit(exc): + +def _reraise_with_unknown_commit(exc: Any) -> NoReturn: """Re-raise an exception with the UnknownTransactionCommitResult label.""" exc._add_error_label("UnknownTransactionCommitResult") - reraise_instance(exc, trace=sys.exc_info()[2]) + raise -def _max_time_expired_error(exc): +def _max_time_expired_error(exc: PyMongoError) -> bool: """Return true if exc is a MaxTimeMSExpired error.""" return isinstance(exc, OperationFailure) and exc.code == 50 # From the transactions spec, all the retryable writes errors plus # WriteConcernFailed. -_UNKNOWN_COMMIT_ERROR_CODES = _RETRYABLE_ERROR_CODES | frozenset([ - 64, # WriteConcernFailed - 50, # MaxTimeMSExpired -]) +_UNKNOWN_COMMIT_ERROR_CODES: frozenset = _RETRYABLE_ERROR_CODES | frozenset( + [ + 64, # WriteConcernFailed + 50, # MaxTimeMSExpired + ] +) # From the Convenient API for Transactions spec, with_transaction must # halt retries after 120 seconds. @@ -328,95 +470,127 @@ def _max_time_expired_error(exc): _WITH_TRANSACTION_RETRY_TIME_LIMIT = 120 -def _within_time_limit(start_time): +def _within_time_limit(start_time: float) -> bool: """Are we within the with_transaction retry limit?""" - return monotonic.time() - start_time < _WITH_TRANSACTION_RETRY_TIME_LIMIT + return time.monotonic() - start_time < _WITH_TRANSACTION_RETRY_TIME_LIMIT + + +_T = TypeVar("_T") +if TYPE_CHECKING: + from pymongo.mongo_client import MongoClient + + +class ClientSession: + """A session for ordering sequential operations. + + :class:`ClientSession` instances are **not thread-safe or fork-safe**. + They can only be used by one thread or process at a time. A single + :class:`ClientSession` cannot be used to run multiple operations + concurrently. + + Should not be initialized directly by application developers - to create a + :class:`ClientSession`, call + :meth:`~pymongo.mongo_client.MongoClient.start_session`. + """ -class ClientSession(object): - """A session for ordering sequential operations.""" - def __init__(self, client, server_session, options, authset, implicit): + def __init__( + self, + client: MongoClient, + server_session: Any, + options: SessionOptions, + implicit: bool, + ) -> None: # A MongoClient, a _ServerSession, a SessionOptions, and a set. - self._client = client + self._client: MongoClient = client self._server_session = server_session self._options = options - self._authset = authset - self._cluster_time = None - self._operation_time = None + self._cluster_time: Optional[Mapping[str, Any]] = None + self._operation_time: Optional[Timestamp] = None + self._snapshot_time = None # Is this an implicitly created session? self._implicit = implicit - self._transaction = _Transaction(None) + self._transaction = _Transaction(None, client) - def end_session(self): + def end_session(self) -> None: """Finish this session. If a transaction has started, abort it. It is an error to use the session after the session has ended. """ self._end_session(lock=True) - def _end_session(self, lock): + def _end_session(self, lock: bool) -> None: if self._server_session is not None: try: if self.in_transaction: self.abort_transaction() + # It's possible we're still pinned here when the transaction + # is in the committed state when the session is discarded. + self._unpin() finally: self._client._return_server_session(self._server_session, lock) self._server_session = None - def _check_ended(self): + def _check_ended(self) -> None: if self._server_session is None: raise InvalidOperation("Cannot use ended session") - def __enter__(self): + def __enter__(self) -> ClientSession: return self - def __exit__(self, exc_type, exc_val, exc_tb): + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: self._end_session(lock=True) @property - def client(self): + def client(self) -> MongoClient: """The :class:`~pymongo.mongo_client.MongoClient` this session was created from. """ return self._client @property - def options(self): + def options(self) -> SessionOptions: """The :class:`SessionOptions` this session was created with.""" return self._options @property - def session_id(self): + def session_id(self) -> Mapping[str, Any]: """A BSON document, the opaque server session identifier.""" self._check_ended() return self._server_session.session_id @property - def cluster_time(self): + def cluster_time(self) -> Optional[ClusterTime]: """The cluster time returned by the last operation executed in this session. """ return self._cluster_time @property - def operation_time(self): + def operation_time(self) -> Optional[Timestamp]: """The operation time returned by the last operation executed in this session. """ return self._operation_time - def _inherit_option(self, name, val): + def _inherit_option(self, name: str, val: _T) -> _T: """Return the inherited TransactionOption value.""" if val: return val txn_opts = self.options.default_transaction_options - val = txn_opts and getattr(txn_opts, name) - if val: - return val + parent_val = txn_opts and getattr(txn_opts, name) + if parent_val: + return parent_val return getattr(self.client, name) - def with_transaction(self, callback, read_concern=None, write_concern=None, - read_preference=None, max_commit_time_ms=None): + def with_transaction( + self, + callback: Callable[[ClientSession], _T], + read_concern: Optional[ReadConcern] = None, + write_concern: Optional[WriteConcern] = None, + read_preference: Optional[_ServerMode] = None, + max_commit_time_ms: Optional[int] = None, + ) -> _T: """Execute a callback in a transaction. This method starts a transaction on this session, executes ``callback`` @@ -445,7 +619,7 @@ def callback(session, custom_arg, custom_kwarg=None): In the event of an exception, ``with_transaction`` may retry the commit or the entire transaction, therefore ``callback`` may be invoked multiple times by a single call to ``with_transaction``. Developers - should be mindful of this possiblity when writing a ``callback`` that + should be mindful of this possibility when writing a ``callback`` that modifies application state or has any other side-effects. Note that even when the ``callback`` is invoked multiple times, ``with_transaction`` ensures that the transaction will be committed @@ -459,6 +633,10 @@ def callback(session, custom_arg, custom_kwarg=None): however, ``with_transaction`` will return without taking further action. + :class:`ClientSession` instances are **not thread-safe or fork-safe**. + Consequently, the ``callback`` must not attempt to execute multiple + operations concurrently. + When ``callback`` raises an exception, ``with_transaction`` automatically aborts the current transaction. When ``callback`` or :meth:`~ClientSession.commit_transaction` raises an exception that @@ -498,19 +676,19 @@ def callback(session, custom_arg, custom_kwarg=None): .. versionadded:: 3.9 """ - start_time = monotonic.time() + start_time = time.monotonic() while True: - self.start_transaction( - read_concern, write_concern, read_preference, - max_commit_time_ms) + self.start_transaction(read_concern, write_concern, read_preference, max_commit_time_ms) try: ret = callback(self) except Exception as exc: if self.in_transaction: self.abort_transaction() - if (isinstance(exc, PyMongoError) and - exc.has_error_label("TransientTransactionError") and - _within_time_limit(start_time)): + if ( + isinstance(exc, PyMongoError) + and exc.has_error_label("TransientTransactionError") + and _within_time_limit(start_time) + ): # Retry the entire transaction. continue raise @@ -523,14 +701,17 @@ def callback(session, custom_arg, custom_kwarg=None): try: self.commit_transaction() except PyMongoError as exc: - if (exc.has_error_label("UnknownTransactionCommitResult") - and _within_time_limit(start_time) - and not _max_time_expired_error(exc)): + if ( + exc.has_error_label("UnknownTransactionCommitResult") + and _within_time_limit(start_time) + and not _max_time_expired_error(exc) + ): # Retry the commit. continue - if (exc.has_error_label("TransientTransactionError") and - _within_time_limit(start_time)): + if exc.has_error_label("TransientTransactionError") and _within_time_limit( + start_time + ): # Retry the entire transaction. break raise @@ -538,8 +719,13 @@ def callback(session, custom_arg, custom_kwarg=None): # Commit succeeded. return ret - def start_transaction(self, read_concern=None, write_concern=None, - read_preference=None, max_commit_time_ms=None): + def start_transaction( + self, + read_concern: Optional[ReadConcern] = None, + write_concern: Optional[WriteConcern] = None, + read_preference: Optional[_ServerMode] = None, + max_commit_time_ms: Optional[int] = None, + ) -> ContextManager: """Start a multi-statement transaction. Takes the same arguments as :class:`TransactionOptions`. @@ -551,32 +737,34 @@ def start_transaction(self, read_concern=None, write_concern=None, """ self._check_ended() + if self.options.snapshot: + raise InvalidOperation("Transactions are not supported in snapshot sessions") + if self.in_transaction: raise InvalidOperation("Transaction already in progress") read_concern = self._inherit_option("read_concern", read_concern) write_concern = self._inherit_option("write_concern", write_concern) - read_preference = self._inherit_option( - "read_preference", read_preference) + read_preference = self._inherit_option("read_preference", read_preference) if max_commit_time_ms is None: opts = self.options.default_transaction_options if opts: max_commit_time_ms = opts.max_commit_time_ms self._transaction.opts = TransactionOptions( - read_concern, write_concern, read_preference, max_commit_time_ms) + read_concern, write_concern, read_preference, max_commit_time_ms + ) self._transaction.reset() self._transaction.state = _TxnState.STARTING self._start_retryable_write() return _TransactionContext(self) - def commit_transaction(self): + def commit_transaction(self) -> None: """Commit a multi-statement transaction. .. versionadded:: 3.7 """ self._check_ended() - retry = False state = self._transaction.state if state is _TxnState.NONE: raise InvalidOperation("No transaction started") @@ -585,16 +773,14 @@ def commit_transaction(self): self._transaction.state = _TxnState.COMMITTED_EMPTY return elif state is _TxnState.ABORTED: - raise InvalidOperation( - "Cannot call commitTransaction after calling abortTransaction") + raise InvalidOperation("Cannot call commitTransaction after calling abortTransaction") elif state is _TxnState.COMMITTED: # We're explicitly retrying the commit, move the state back to # "in progress" so that in_transaction returns true. self._transaction.state = _TxnState.IN_PROGRESS - retry = True try: - self._finish_transaction_with_retry("commitTransaction", retry) + self._finish_transaction_with_retry("commitTransaction") except ConnectionFailure as exc: # We do not know if the commit was successfully applied on the # server or if it satisfied the provided write concern, set the @@ -616,7 +802,7 @@ def commit_transaction(self): finally: self._transaction.state = _TxnState.COMMITTED - def abort_transaction(self): + def abort_transaction(self) -> None: """Abort a multi-statement transaction. .. versionadded:: 3.7 @@ -633,76 +819,59 @@ def abort_transaction(self): elif state is _TxnState.ABORTED: raise InvalidOperation("Cannot call abortTransaction twice") elif state in (_TxnState.COMMITTED, _TxnState.COMMITTED_EMPTY): - raise InvalidOperation( - "Cannot call abortTransaction after calling commitTransaction") + raise InvalidOperation("Cannot call abortTransaction after calling commitTransaction") try: - self._finish_transaction_with_retry("abortTransaction", False) + self._finish_transaction_with_retry("abortTransaction") except (OperationFailure, ConnectionFailure): # The transactions spec says to ignore abortTransaction errors. pass finally: self._transaction.state = _TxnState.ABORTED + self._unpin() - def _finish_transaction_with_retry(self, command_name, explict_retry): + def _finish_transaction_with_retry(self, command_name: str) -> dict[str, Any]: """Run commit or abort with one retry after any retryable error. :Parameters: - `command_name`: Either "commitTransaction" or "abortTransaction". - - `explict_retry`: True when this is an explict commit retry attempt, - ie the application called session.commit_transaction() twice. """ - # This can be refactored with MongoClient._retry_with_session. - try: - return self._finish_transaction(command_name, explict_retry) - except ServerSelectionTimeoutError: - raise - except ConnectionFailure as exc: - try: - return self._finish_transaction(command_name, True) - except ServerSelectionTimeoutError: - # Raise the original error so the application can infer that - # an attempt was made. - raise exc - except OperationFailure as exc: - if exc.code not in _RETRYABLE_ERROR_CODES: - raise - try: - return self._finish_transaction(command_name, True) - except ServerSelectionTimeoutError: - # Raise the original error so the application can infer that - # an attempt was made. - raise exc - def _finish_transaction(self, command_name, retrying): + def func( + _session: Optional[ClientSession], conn: Connection, _retryable: bool + ) -> dict[str, Any]: + return self._finish_transaction(conn, command_name) + + return self._client._retry_internal(func, self, None, retryable=True) + + def _finish_transaction(self, conn: Connection, command_name: str) -> dict[str, Any]: + self._transaction.attempt += 1 opts = self._transaction.opts + assert opts wc = opts.write_concern cmd = SON([(command_name, 1)]) if command_name == "commitTransaction": - if opts.max_commit_time_ms: - cmd['maxTimeMS'] = opts.max_commit_time_ms + if opts.max_commit_time_ms and _csot.get_timeout() is None: + cmd["maxTimeMS"] = opts.max_commit_time_ms # Transaction spec says that after the initial commit attempt, # subsequent commitTransaction commands should be upgraded to use # w:"majority" and set a default value of 10 seconds for wtimeout. - if retrying: + if self._transaction.attempt > 1: + assert wc wc_doc = wc.document wc_doc["w"] = "majority" wc_doc.setdefault("wtimeout", 10000) wc = WriteConcern(**wc_doc) if self._transaction.recovery_token: - cmd['recoveryToken'] = self._transaction.recovery_token + cmd["recoveryToken"] = self._transaction.recovery_token - with self._client._socket_for_writes(self) as sock_info: - return self._client.admin._command( - sock_info, - cmd, - session=self, - write_concern=wc, - parse_write_concern_error=True) + return self._client.admin._command( + conn, cmd, session=self, write_concern=wc, parse_write_concern_error=True + ) - def _advance_cluster_time(self, cluster_time): + def _advance_cluster_time(self, cluster_time: Optional[Mapping[str, Any]]) -> None: """Internal cluster time helper.""" if self._cluster_time is None: self._cluster_time = cluster_time @@ -710,7 +879,7 @@ def _advance_cluster_time(self, cluster_time): if cluster_time["clusterTime"] > self._cluster_time["clusterTime"]: self._cluster_time = cluster_time - def advance_cluster_time(self, cluster_time): + def advance_cluster_time(self, cluster_time: Mapping[str, Any]) -> None: """Update the cluster time for this session. :Parameters: @@ -718,14 +887,13 @@ def advance_cluster_time(self, cluster_time): :data:`~pymongo.client_session.ClientSession.cluster_time` from another `ClientSession` instance. """ - if not isinstance(cluster_time, abc.Mapping): - raise TypeError( - "cluster_time must be a subclass of collections.Mapping") + if not isinstance(cluster_time, _Mapping): + raise TypeError("cluster_time must be a subclass of collections.Mapping") if not isinstance(cluster_time.get("clusterTime"), Timestamp): raise ValueError("Invalid cluster_time") self._advance_cluster_time(cluster_time) - def _advance_operation_time(self, operation_time): + def _advance_operation_time(self, operation_time: Optional[Timestamp]) -> None: """Internal operation time helper.""" if self._operation_time is None: self._operation_time = operation_time @@ -733,7 +901,7 @@ def _advance_operation_time(self, operation_time): if operation_time > self._operation_time: self._operation_time = operation_time - def advance_operation_time(self, operation_time): + def advance_operation_time(self, operation_time: Timestamp) -> None: """Update the operation time for this session. :Parameters: @@ -742,26 +910,31 @@ def advance_operation_time(self, operation_time): another `ClientSession` instance. """ if not isinstance(operation_time, Timestamp): - raise TypeError("operation_time must be an instance " - "of bson.timestamp.Timestamp") + raise TypeError("operation_time must be an instance of bson.timestamp.Timestamp") self._advance_operation_time(operation_time) - def _process_response(self, reply): + def _process_response(self, reply: Mapping[str, Any]) -> None: """Process a response to a command that was run with this session.""" - self._advance_cluster_time(reply.get('$clusterTime')) - self._advance_operation_time(reply.get('operationTime')) + self._advance_cluster_time(reply.get("$clusterTime")) + self._advance_operation_time(reply.get("operationTime")) + if self._options.snapshot and self._snapshot_time is None: + if "cursor" in reply: + ct = reply["cursor"].get("atClusterTime") + else: + ct = reply.get("atClusterTime") + self._snapshot_time = ct if self.in_transaction and self._transaction.sharded: - recovery_token = reply.get('recoveryToken') + recovery_token = reply.get("recoveryToken") if recovery_token: self._transaction.recovery_token = recovery_token @property - def has_ended(self): + def has_ended(self) -> bool: """True if this session is finished.""" return self._server_session is None @property - def in_transaction(self): + def in_transaction(self) -> bool: """True if this session has an active multi-statement transaction. .. versionadded:: 3.10 @@ -769,81 +942,127 @@ def in_transaction(self): return self._transaction.active() @property - def _pinned_address(self): + def _starting_transaction(self) -> bool: + """True if this session is starting a multi-statement transaction.""" + return self._transaction.starting() + + @property + def _pinned_address(self) -> Optional[_Address]: """The mongos address this transaction was created on.""" if self._transaction.active(): return self._transaction.pinned_address return None - def _pin_mongos(self, server): - """Pin this session to the given mongos Server.""" - self._transaction.sharded = True - self._transaction.pinned_address = server.description.address + @property + def _pinned_connection(self) -> Optional[Connection]: + """The connection this transaction was started on.""" + return self._transaction.pinned_conn - def _unpin_mongos(self): - """Unpin this session from any pinned mongos address.""" - self._transaction.pinned_address = None + def _pin(self, server: Server, conn: Connection) -> None: + """Pin this session to the given Server or to the given connection.""" + self._transaction.pin(server, conn) - def _txn_read_preference(self): + def _unpin(self) -> None: + """Unpin this session from any pinned Server.""" + self._transaction.unpin() + + def _txn_read_preference(self) -> Optional[_ServerMode]: """Return read preference of this transaction or None.""" if self.in_transaction: + assert self._transaction.opts return self._transaction.opts.read_preference return None - def _apply_to(self, command, is_retryable, read_preference): + def _materialize(self) -> None: + if isinstance(self._server_session, _EmptyServerSession): + old = self._server_session + self._server_session = self._client._topology.get_server_session() + if old.started_retryable_write: + self._server_session.inc_transaction_id() + + def _apply_to( + self, + command: MutableMapping[str, Any], + is_retryable: bool, + read_preference: _ServerMode, + conn: Connection, + ) -> None: self._check_ended() + self._materialize() + if self.options.snapshot: + self._update_read_concern(command, conn) - self._server_session.last_use = monotonic.time() - command['lsid'] = self._server_session.session_id - - if not self.in_transaction: - self._transaction.reset() + self._server_session.last_use = time.monotonic() + command["lsid"] = self._server_session.session_id if is_retryable: - command['txnNumber'] = self._server_session.transaction_id + command["txnNumber"] = self._server_session.transaction_id return if self.in_transaction: if read_preference != ReadPreference.PRIMARY: raise InvalidOperation( - 'read preference in a transaction must be primary, not: ' - '%r' % (read_preference,)) + f"read preference in a transaction must be primary, not: {read_preference!r}" + ) if self._transaction.state == _TxnState.STARTING: # First command begins a new transaction. self._transaction.state = _TxnState.IN_PROGRESS - command['startTransaction'] = True + command["startTransaction"] = True + assert self._transaction.opts if self._transaction.opts.read_concern: rc = self._transaction.opts.read_concern.document - else: - rc = {} - - if (self.options.causal_consistency - and self.operation_time is not None): - rc['afterClusterTime'] = self.operation_time + if rc: + command["readConcern"] = rc + self._update_read_concern(command, conn) - if rc: - command['readConcern'] = rc + command["txnNumber"] = self._server_session.transaction_id + command["autocommit"] = False - command['txnNumber'] = self._server_session.transaction_id - command['autocommit'] = False - - def _start_retryable_write(self): + def _start_retryable_write(self) -> None: self._check_ended() self._server_session.inc_transaction_id() + def _update_read_concern(self, cmd: MutableMapping[str, Any], conn: Connection) -> None: + if self.options.causal_consistency and self.operation_time is not None: + cmd.setdefault("readConcern", {})["afterClusterTime"] = self.operation_time + if self.options.snapshot: + if conn.max_wire_version < 13: + raise ConfigurationError("Snapshot reads require MongoDB 5.0 or later") + rc = cmd.setdefault("readConcern", {}) + rc["level"] = "snapshot" + if self._snapshot_time is not None: + rc["atClusterTime"] = self._snapshot_time + + def __copy__(self) -> NoReturn: + raise TypeError("A ClientSession cannot be copied, create a new session instead") -class _ServerSession(object): - def __init__(self, pool_id): + +class _EmptyServerSession: + __slots__ = "dirty", "started_retryable_write" + + def __init__(self) -> None: + self.dirty = False + self.started_retryable_write = False + + def mark_dirty(self) -> None: + self.dirty = True + + def inc_transaction_id(self) -> None: + self.started_retryable_write = True + + +class _ServerSession: + def __init__(self, generation: int): # Ensure id is type 4, regardless of CodecOptions.uuid_representation. - self.session_id = {'id': Binary(uuid.uuid4().bytes, 4)} - self.last_use = monotonic.time() + self.session_id = {"id": Binary(uuid.uuid4().bytes, 4)} + self.last_use = time.monotonic() self._transaction_id = 0 self.dirty = False - self.pool_id = pool_id + self.generation = generation - def mark_dirty(self): + def mark_dirty(self) -> None: """Mark this session as dirty. A server session is marked dirty when a command fails with a network @@ -851,18 +1070,18 @@ def mark_dirty(self): """ self.dirty = True - def timed_out(self, session_timeout_minutes): - idle_seconds = monotonic.time() - self.last_use + def timed_out(self, session_timeout_minutes: float) -> bool: + idle_seconds = time.monotonic() - self.last_use # Timed out if we have less than a minute to live. return idle_seconds > (session_timeout_minutes - 1) * 60 @property - def transaction_id(self): + def transaction_id(self) -> Int64: """Positive 64-bit integer.""" return Int64(self._transaction_id) - def inc_transaction_id(self): + def inc_transaction_id(self) -> None: self._transaction_id += 1 @@ -871,21 +1090,22 @@ class _ServerSessionPool(collections.deque): This class is not thread-safe, access it while holding the Topology lock. """ - def __init__(self, *args, **kwargs): - super(_ServerSessionPool, self).__init__(*args, **kwargs) - self.pool_id = 0 - def reset(self): - self.pool_id += 1 + def __init__(self, *args: Any, **kwargs: Any): + super().__init__(*args, **kwargs) + self.generation = 0 + + def reset(self) -> None: + self.generation += 1 self.clear() - def pop_all(self): + def pop_all(self) -> list[_ServerSession]: ids = [] while self: ids.append(self.pop().session_id) return ids - def get_server_session(self, session_timeout_minutes): + def get_server_session(self, session_timeout_minutes: float) -> _ServerSession: # Although the Driver Sessions Spec says we only clear stale sessions # in return_server_session, PyMongo can't take a lock when returning # sessions from a __del__ method (like in Cursor.__die), so it can't @@ -899,20 +1119,24 @@ def get_server_session(self, session_timeout_minutes): if not s.timed_out(session_timeout_minutes): return s - return _ServerSession(self.pool_id) + return _ServerSession(self.generation) - def return_server_session(self, server_session, session_timeout_minutes): - self._clear_stale(session_timeout_minutes) - if not server_session.timed_out(session_timeout_minutes): - self.return_server_session_no_lock(server_session) + def return_server_session( + self, server_session: _ServerSession, session_timeout_minutes: Optional[float] + ) -> None: + if session_timeout_minutes is not None: + self._clear_stale(session_timeout_minutes) + if server_session.timed_out(session_timeout_minutes): + return + self.return_server_session_no_lock(server_session) - def return_server_session_no_lock(self, server_session): + def return_server_session_no_lock(self, server_session: _ServerSession) -> None: # Discard sessions from an old pool to avoid duplicate sessions in the # child process after a fork. - if server_session.pool_id == self.pool_id and not server_session.dirty: + if server_session.generation == self.generation and not server_session.dirty: self.appendleft(server_session) - def _clear_stale(self, session_timeout_minutes): + def _clear_stale(self, session_timeout_minutes: float) -> None: # Clear stale sessions. The least recently used are on the right. while self: if self[-1].timed_out(session_timeout_minutes): diff --git a/pymongo/collation.py b/pymongo/collation.py index 873d603336..e940868e59 100644 --- a/pymongo/collation.py +++ b/pymongo/collation.py @@ -14,13 +14,16 @@ """Tools for working with `collations`_. -.. _collations: http://userguide.icu-project.org/collation/concepts +.. _collations: https://www.mongodb.com/docs/manual/reference/collation/ """ +from __future__ import annotations + +from typing import Any, Mapping, Optional, Union from pymongo import common -class CollationStrength(object): +class CollationStrength: """ An enum that defines values for `strength` on a :class:`~pymongo.collation.Collation`. @@ -42,16 +45,16 @@ class CollationStrength(object): """Differentiate unicode code point (characters are exactly identical).""" -class CollationAlternate(object): +class CollationAlternate: """ An enum that defines values for `alternate` on a :class:`~pymongo.collation.Collation`. """ - NON_IGNORABLE = 'non-ignorable' + NON_IGNORABLE = "non-ignorable" """Spaces and punctuation are treated as base characters.""" - SHIFTED = 'shifted' + SHIFTED = "shifted" """Spaces and punctuation are *not* considered base characters. Spaces and punctuation are distinguished regardless when the @@ -61,36 +64,36 @@ class CollationAlternate(object): """ -class CollationMaxVariable(object): +class CollationMaxVariable: """ An enum that defines values for `max_variable` on a :class:`~pymongo.collation.Collation`. """ - PUNCT = 'punct' + PUNCT = "punct" """Both punctuation and spaces are ignored.""" - SPACE = 'space' + SPACE = "space" """Spaces alone are ignored.""" -class CollationCaseFirst(object): +class CollationCaseFirst: """ An enum that defines values for `case_first` on a :class:`~pymongo.collation.Collation`. """ - UPPER = 'upper' + UPPER = "upper" """Sort uppercase characters first.""" - LOWER = 'lower' + LOWER = "lower" """Sort lowercase characters first.""" - OFF = 'off' + OFF = "off" """Default for locale or collation strength.""" -class Collation(object): +class Collation: """Collation :Parameters: @@ -151,46 +154,45 @@ class Collation(object): __slots__ = ("__document",) - def __init__(self, locale, - caseLevel=None, - caseFirst=None, - strength=None, - numericOrdering=None, - alternate=None, - maxVariable=None, - normalization=None, - backwards=None, - **kwargs): - locale = common.validate_string('locale', locale) - self.__document = {'locale': locale} + def __init__( + self, + locale: str, + caseLevel: Optional[bool] = None, + caseFirst: Optional[str] = None, + strength: Optional[int] = None, + numericOrdering: Optional[bool] = None, + alternate: Optional[str] = None, + maxVariable: Optional[str] = None, + normalization: Optional[bool] = None, + backwards: Optional[bool] = None, + **kwargs: Any, + ) -> None: + locale = common.validate_string("locale", locale) + self.__document: dict[str, Any] = {"locale": locale} if caseLevel is not None: - self.__document['caseLevel'] = common.validate_boolean( - 'caseLevel', caseLevel) + self.__document["caseLevel"] = common.validate_boolean("caseLevel", caseLevel) if caseFirst is not None: - self.__document['caseFirst'] = common.validate_string( - 'caseFirst', caseFirst) + self.__document["caseFirst"] = common.validate_string("caseFirst", caseFirst) if strength is not None: - self.__document['strength'] = common.validate_integer( - 'strength', strength) + self.__document["strength"] = common.validate_integer("strength", strength) if numericOrdering is not None: - self.__document['numericOrdering'] = common.validate_boolean( - 'numericOrdering', numericOrdering) + self.__document["numericOrdering"] = common.validate_boolean( + "numericOrdering", numericOrdering + ) if alternate is not None: - self.__document['alternate'] = common.validate_string( - 'alternate', alternate) + self.__document["alternate"] = common.validate_string("alternate", alternate) if maxVariable is not None: - self.__document['maxVariable'] = common.validate_string( - 'maxVariable', maxVariable) + self.__document["maxVariable"] = common.validate_string("maxVariable", maxVariable) if normalization is not None: - self.__document['normalization'] = common.validate_boolean( - 'normalization', normalization) + self.__document["normalization"] = common.validate_boolean( + "normalization", normalization + ) if backwards is not None: - self.__document['backwards'] = common.validate_boolean( - 'backwards', backwards) + self.__document["backwards"] = common.validate_boolean("backwards", backwards) self.__document.update(kwargs) @property - def document(self): + def document(self) -> dict[str, Any]: """The document representation of this collation. .. note:: @@ -199,27 +201,26 @@ def document(self): """ return self.__document.copy() - def __repr__(self): + def __repr__(self) -> str: document = self.document - return 'Collation(%s)' % ( - ', '.join('%s=%r' % (key, document[key]) for key in document),) + return "Collation({})".format(", ".join(f"{key}={document[key]!r}" for key in document)) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, Collation): return self.document == other.document return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other -def validate_collation_or_none(value): +def validate_collation_or_none( + value: Optional[Union[Mapping[str, Any], Collation]] +) -> Optional[dict[str, Any]]: if value is None: return None if isinstance(value, Collation): return value.document if isinstance(value, dict): return value - raise TypeError( - 'collation must be a dict, an instance of collation.Collation, ' - 'or None.') + raise TypeError("collation must be a dict, an instance of collation.Collation, or None.") diff --git a/pymongo/collection.py b/pymongo/collection.py index 8bedb02243..9630b83d91 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -13,57 +13,96 @@ # limitations under the License. """Collection level utilities for Mongo.""" - -import datetime -import warnings - -from bson.code import Code +from __future__ import annotations + +from collections import abc +from typing import ( + TYPE_CHECKING, + Any, + Callable, + ContextManager, + Generic, + Iterable, + Iterator, + Mapping, + MutableMapping, + NoReturn, + Optional, + Sequence, + Type, + TypeVar, + Union, + cast, +) + +from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions from bson.objectid import ObjectId -from bson.py3compat import (_unicode, - abc, - integer_types, - string_type) from bson.raw_bson import RawBSONDocument -from bson.codec_options import CodecOptions from bson.son import SON -from pymongo import (common, - helpers, - message) -from pymongo.aggregation import (_CollectionAggregationCommand, - _CollectionRawAggregationCommand) -from pymongo.bulk import BulkOperationBuilder, _Bulk -from pymongo.command_cursor import CommandCursor, RawBatchCommandCursor -from pymongo.common import ORDERED_TYPES -from pymongo.collation import validate_collation_or_none +from bson.timestamp import Timestamp +from pymongo import ASCENDING, _csot, common, helpers, message +from pymongo.aggregation import ( + _CollectionAggregationCommand, + _CollectionRawAggregationCommand, +) +from pymongo.bulk import _Bulk from pymongo.change_stream import CollectionChangeStream +from pymongo.collation import validate_collation_or_none +from pymongo.command_cursor import CommandCursor, RawBatchCommandCursor +from pymongo.common import _ecoc_coll_name, _esc_coll_name from pymongo.cursor import Cursor, RawBatchCursor -from pymongo.errors import (BulkWriteError, - ConfigurationError, - InvalidName, - InvalidOperation, - OperationFailure) -from pymongo.helpers import (_check_write_command_response, - _raise_last_error) +from pymongo.errors import ( + ConfigurationError, + InvalidName, + InvalidOperation, + OperationFailure, +) +from pymongo.helpers import _check_write_command_response from pymongo.message import _UNICODE_REPLACE_CODEC_OPTIONS -from pymongo.operations import IndexModel -from pymongo.read_preferences import ReadPreference -from pymongo.results import (BulkWriteResult, - DeleteResult, - InsertOneResult, - InsertManyResult, - UpdateResult) +from pymongo.operations import ( + DeleteMany, + DeleteOne, + IndexModel, + InsertOne, + ReplaceOne, + SearchIndexModel, + UpdateMany, + UpdateOne, + _IndexKeyHint, + _IndexList, +) +from pymongo.read_preferences import ReadPreference, _ServerMode +from pymongo.results import ( + BulkWriteResult, + DeleteResult, + InsertManyResult, + InsertOneResult, + UpdateResult, +) +from pymongo.typings import _CollationIn, _DocumentType, _DocumentTypeArg, _Pipeline from pymongo.write_concern import WriteConcern -_NO_OBJ_ERROR = "No matching object found" -_UJOIN = u"%s.%s" -_FIND_AND_MODIFY_DOC_FIELDS = {'value': 1} +T = TypeVar("T") + +_FIND_AND_MODIFY_DOC_FIELDS = {"value": 1} + +_WriteOp = Union[ + InsertOne[_DocumentType], + DeleteOne, + DeleteMany, + ReplaceOne[_DocumentType], + UpdateOne, + UpdateMany, +] -class ReturnDocument(object): + +class ReturnDocument: """An enum used with :meth:`~pymongo.collection.Collection.find_one_and_replace` and :meth:`~pymongo.collection.Collection.find_one_and_update`. """ + BEFORE = False """Return the original document before it was updated/replaced, or ``None`` if no document matches the query. @@ -72,19 +111,38 @@ class ReturnDocument(object): """Return the updated/replaced or inserted document.""" -class Collection(common.BaseObject): - """A Mongo collection. - """ - - def __init__(self, database, name, create=False, codec_options=None, - read_preference=None, write_concern=None, read_concern=None, - session=None, **kwargs): +if TYPE_CHECKING: + + import bson + from pymongo.aggregation import _AggregationCommand + from pymongo.client_session import ClientSession + from pymongo.collation import Collation + from pymongo.database import Database + from pymongo.pool import Connection + from pymongo.read_concern import ReadConcern + from pymongo.server import Server + + +class Collection(common.BaseObject, Generic[_DocumentType]): + """A Mongo collection.""" + + def __init__( + self, + database: Database[_DocumentType], + name: str, + create: Optional[bool] = False, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + session: Optional[ClientSession] = None, + **kwargs: Any, + ) -> None: """Get / create a Mongo collection. Raises :class:`TypeError` if `name` is not an instance of - :class:`basestring` (:class:`str` in python 3). Raises - :class:`~pymongo.errors.InvalidName` if `name` is not a valid - collection name. Any additional keyword arguments will be used + :class:`str`. Raises :class:`~pymongo.errors.InvalidName` if `name` is + not a valid collection name. Any additional keyword arguments will be used as options passed to the create command. See :meth:`~pymongo.database.Database.create_collection` for valid options. @@ -114,14 +172,23 @@ def __init__(self, database, name, create=False, codec_options=None, default) database.read_concern is used. - `collation` (optional): An instance of :class:`~pymongo.collation.Collation`. If a collation is provided, - it will be passed to the create collection command. This option is - only supported on MongoDB 3.4 and above. + it will be passed to the create collection command. - `session` (optional): a :class:`~pymongo.client_session.ClientSession` that is used with the create collection command - `**kwargs` (optional): additional keyword arguments will be passed as options for the create collection command + .. versionchanged:: 4.2 + Added the ``clusteredIndex`` and ``encryptedFields`` parameters. + + .. versionchanged:: 4.0 + Removed the reindex, map_reduce, inline_map_reduce, + parallel_scan, initialize_unordered_bulk_op, + initialize_ordered_bulk_op, group, count, insert, save, + update, remove, find_and_modify, and ensure_index methods. See the + :ref:`pymongo4-migration-guide`. + .. versionchanged:: 3.6 Added ``session`` parameter. @@ -145,70 +212,77 @@ def __init__(self, database, name, create=False, codec_options=None, collection.__my_collection__ - .. versionchanged:: 2.2 - Removed deprecated argument: options - - .. versionadded:: 2.1 - uuid_subtype attribute - - .. mongodoc:: collections + .. seealso:: The MongoDB documentation on `collections `_. """ - super(Collection, self).__init__( + super().__init__( codec_options or database.codec_options, read_preference or database.read_preference, write_concern or database.write_concern, - read_concern or database.read_concern) - - if not isinstance(name, string_type): - raise TypeError("name must be an instance " - "of %s" % (string_type.__name__,)) + read_concern or database.read_concern, + ) + if not isinstance(name, str): + raise TypeError("name must be an instance of str") if not name or ".." in name: raise InvalidName("collection names cannot be empty") - if "$" in name and not (name.startswith("oplog.$main") or - name.startswith("$cmd")): - raise InvalidName("collection names must not " - "contain '$': %r" % name) + if "$" in name and not (name.startswith(("oplog.$main", "$cmd"))): + raise InvalidName("collection names must not contain '$': %r" % name) if name[0] == "." or name[-1] == ".": - raise InvalidName("collection names must not start " - "or end with '.': %r" % name) + raise InvalidName("collection names must not start or end with '.': %r" % name) if "\x00" in name: - raise InvalidName("collection names must not contain the " - "null character") - collation = validate_collation_or_none(kwargs.pop('collation', None)) - - self.__database = database - self.__name = _unicode(name) - self.__full_name = _UJOIN % (self.__database.name, self.__name) - if create or kwargs or collation: - self.__create(kwargs, collation, session) + raise InvalidName("collection names must not contain the null character") + collation = validate_collation_or_none(kwargs.pop("collation", None)) + self.__database: Database[_DocumentType] = database + self.__name = name + self.__full_name = f"{self.__database.name}.{self.__name}" self.__write_response_codec_options = self.codec_options._replace( - unicode_decode_error_handler='replace', - document_class=dict) - - def _socket_for_reads(self, session): - return self.__database.client._socket_for_reads( - self._read_preference_for(session), session) - - def _socket_for_writes(self, session): - return self.__database.client._socket_for_writes(session) - - def _command(self, sock_info, command, slave_ok=False, - read_preference=None, - codec_options=None, check=True, allowable_errors=None, - read_concern=None, - write_concern=None, - collation=None, - session=None, - retryable_write=False, - user_fields=None): + unicode_decode_error_handler="replace", document_class=dict + ) + self._timeout = database.client.options.timeout + encrypted_fields = kwargs.pop("encryptedFields", None) + if create or kwargs or collation: + if encrypted_fields: + common.validate_is_mapping("encrypted_fields", encrypted_fields) + opts = {"clusteredIndex": {"key": {"_id": 1}, "unique": True}} + self.__create( + _esc_coll_name(encrypted_fields, name), opts, None, session, qev2_required=True + ) + self.__create(_ecoc_coll_name(encrypted_fields, name), opts, None, session) + self.__create(name, kwargs, collation, session, encrypted_fields=encrypted_fields) + self.create_index([("__safeContent__", ASCENDING)], session) + else: + self.__create(name, kwargs, collation, session) + + def _conn_for_reads( + self, session: ClientSession + ) -> ContextManager[tuple[Connection, _ServerMode]]: + return self.__database.client._conn_for_reads(self._read_preference_for(session), session) + + def _conn_for_writes(self, session: Optional[ClientSession]) -> ContextManager[Connection]: + return self.__database.client._conn_for_writes(session) + + def _command( + self, + conn: Connection, + command: MutableMapping[str, Any], + read_preference: Optional[_ServerMode] = None, + codec_options: Optional[CodecOptions] = None, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_concern: Optional[ReadConcern] = None, + write_concern: Optional[WriteConcern] = None, + collation: Optional[_CollationIn] = None, + session: Optional[ClientSession] = None, + retryable_write: bool = False, + user_fields: Optional[Any] = None, + ) -> Mapping[str, Any]: """Internal command helper. :Parameters: - - `sock_info` - A SocketInfo instance. - - `command` - The command itself, as a SON instance. - - `slave_ok`: whether to set the SlaveOkay wire protocol bit. + - `conn` - A Connection instance. + - `command` - The command itself, as a :class:`~bson.son.SON` instance. + - `read_preference` (optional) - The read preference to use. - `codec_options` (optional) - An instance of :class:`~bson.codec_options.CodecOptions`. - `check`: raise OperationFailure if there are errors @@ -216,8 +290,7 @@ def _command(self, sock_info, command, slave_ok=False, - `read_concern` (optional) - An instance of :class:`~pymongo.read_concern.ReadConcern`. - `write_concern`: An instance of - :class:`~pymongo.write_concern.WriteConcern`. This option is only - valid for MongoDB 3.4 and above. + :class:`~pymongo.write_concern.WriteConcern`. - `collation` (optional) - An instance of :class:`~pymongo.collation.Collation`. - `session` (optional): a @@ -232,10 +305,9 @@ def _command(self, sock_info, command, slave_ok=False, The result document. """ with self.__database.client._tmp_session(session) as s: - return sock_info.command( + return conn.command( self.__database.name, command, - slave_ok, read_preference or self._read_preference_for(session), codec_options or self.codec_options, check, @@ -247,23 +319,45 @@ def _command(self, sock_info, command, slave_ok=False, session=s, client=self.__database.client, retryable_write=retryable_write, - user_fields=user_fields) + user_fields=user_fields, + ) + + def __create( + self, + name: str, + options: MutableMapping[str, Any], + collation: Optional[_CollationIn], + session: Optional[ClientSession], + encrypted_fields: Optional[Mapping[str, Any]] = None, + qev2_required: bool = False, + ) -> None: + """Sends a create command with the given options.""" + cmd: SON[str, Any] = SON([("create", name)]) + if encrypted_fields: + cmd["encryptedFields"] = encrypted_fields - def __create(self, options, collation, session): - """Sends a create command with the given options. - """ - cmd = SON([("create", self.__name)]) if options: if "size" in options: options["size"] = float(options["size"]) cmd.update(options) - with self._socket_for_writes(session) as sock_info: + with self._conn_for_writes(session) as conn: + if qev2_required and conn.max_wire_version < 21: + raise ConfigurationError( + "Driver support of Queryable Encryption is incompatible with server. " + "Upgrade server to use Queryable Encryption. " + f"Got maxWireVersion {conn.max_wire_version} but need maxWireVersion >= 21 (MongoDB >=7.0)" + ) + self._command( - sock_info, cmd, read_preference=ReadPreference.PRIMARY, + conn, + cmd, + read_preference=ReadPreference.PRIMARY, write_concern=self._write_concern_for(session), - collation=collation, session=session) + collation=collation, + session=session, + ) - def __getattr__(self, name): + def __getattr__(self, name: str) -> Collection[_DocumentType]: """Get a sub-collection of this collection by name. Raises InvalidName if an invalid collection name is used. @@ -271,37 +365,48 @@ def __getattr__(self, name): :Parameters: - `name`: the name of the collection to get """ - if name.startswith('_'): - full_name = _UJOIN % (self.__name, name) + if name.startswith("_"): + full_name = f"{self.__name}.{name}" raise AttributeError( - "Collection has no attribute %r. To access the %s" - " collection, use database['%s']." % ( - name, full_name, full_name)) + f"Collection has no attribute {name!r}. To access the {full_name}" + f" collection, use database['{full_name}']." + ) return self.__getitem__(name) - def __getitem__(self, name): - return Collection(self.__database, - _UJOIN % (self.__name, name), - False, - self.codec_options, - self.read_preference, - self.write_concern, - self.read_concern) + def __getitem__(self, name: str) -> Collection[_DocumentType]: + return Collection( + self.__database, + f"{self.__name}.{name}", + False, + self.codec_options, + self.read_preference, + self.write_concern, + self.read_concern, + ) - def __repr__(self): - return "Collection(%r, %r)" % (self.__database, self.__name) + def __repr__(self) -> str: + return f"Collection({self.__database!r}, {self.__name!r})" - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, Collection): - return (self.__database == other.database and - self.__name == other.name) + return self.__database == other.database and self.__name == other.name return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other + def __hash__(self) -> int: + return hash((self.__database, self.__name)) + + def __bool__(self) -> NoReturn: + raise NotImplementedError( + "Collection objects do not implement truth " + "value testing or bool(). Please compare " + "with None instead: collection is not None" + ) + @property - def full_name(self): + def full_name(self) -> str: """The full name of this :class:`Collection`. The full name is of the form `database_name.collection_name`. @@ -309,19 +414,24 @@ def full_name(self): return self.__full_name @property - def name(self): + def name(self) -> str: """The name of this :class:`Collection`.""" return self.__name @property - def database(self): + def database(self) -> Database[_DocumentType]: """The :class:`~pymongo.database.Database` that this :class:`Collection` is a part of. """ return self.__database - def with_options(self, codec_options=None, read_preference=None, - write_concern=None, read_concern=None): + def with_options( + self, + codec_options: Optional[bson.CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> Collection[_DocumentType]: """Get a clone of this collection changing the specified settings. >>> coll1.read_preference @@ -351,79 +461,26 @@ def with_options(self, codec_options=None, read_preference=None, default) the :attr:`read_concern` of this :class:`Collection` is used. """ - return Collection(self.__database, - self.__name, - False, - codec_options or self.codec_options, - read_preference or self.read_preference, - write_concern or self.write_concern, - read_concern or self.read_concern) - - def initialize_unordered_bulk_op(self, bypass_document_validation=False): - """**DEPRECATED** - Initialize an unordered batch of write operations. - - Operations will be performed on the server in arbitrary order, - possibly in parallel. All operations will be attempted. - - :Parameters: - - `bypass_document_validation`: (optional) If ``True``, allows the - write to opt-out of document level validation. Default is - ``False``. - - Returns a :class:`~pymongo.bulk.BulkOperationBuilder` instance. - - See :ref:`unordered_bulk` for examples. - - .. note:: `bypass_document_validation` requires server version - **>= 3.2** - - .. versionchanged:: 3.5 - Deprecated. Use :meth:`~pymongo.collection.Collection.bulk_write` - instead. - - .. versionchanged:: 3.2 - Added bypass_document_validation support - - .. versionadded:: 2.7 - """ - warnings.warn("initialize_unordered_bulk_op is deprecated", - DeprecationWarning, stacklevel=2) - return BulkOperationBuilder(self, False, bypass_document_validation) - - def initialize_ordered_bulk_op(self, bypass_document_validation=False): - """**DEPRECATED** - Initialize an ordered batch of write operations. - - Operations will be performed on the server serially, in the - order provided. If an error occurs all remaining operations - are aborted. - - :Parameters: - - `bypass_document_validation`: (optional) If ``True``, allows the - write to opt-out of document level validation. Default is - ``False``. - - Returns a :class:`~pymongo.bulk.BulkOperationBuilder` instance. - - See :ref:`ordered_bulk` for examples. - - .. note:: `bypass_document_validation` requires server version - **>= 3.2** - - .. versionchanged:: 3.5 - Deprecated. Use :meth:`~pymongo.collection.Collection.bulk_write` - instead. - - .. versionchanged:: 3.2 - Added bypass_document_validation support - - .. versionadded:: 2.7 - """ - warnings.warn("initialize_ordered_bulk_op is deprecated", - DeprecationWarning, stacklevel=2) - return BulkOperationBuilder(self, True, bypass_document_validation) - - def bulk_write(self, requests, ordered=True, - bypass_document_validation=False, session=None): + return Collection( + self.__database, + self.__name, + False, + codec_options or self.codec_options, + read_preference or self.read_preference, + write_concern or self.write_concern, + read_concern or self.read_concern, + ) + + @_csot.apply + def bulk_write( + self, + requests: Sequence[_WriteOp[_DocumentType]], + ordered: bool = True, + bypass_document_validation: bool = False, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + let: Optional[Mapping] = None, + ) -> BulkWriteResult: """Send a batch of write operations to the server. Requests are passed as a list of write operation instances ( @@ -437,8 +494,8 @@ def bulk_write(self, requests, ordered=True, >>> for doc in db.test.find({}): ... print(doc) ... - {u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634ef')} - {u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634f0')} + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634ef')} + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634f0')} >>> # DeleteMany, UpdateOne, and UpdateMany are also available. ... >>> from pymongo import InsertOne, DeleteOne, ReplaceOne @@ -456,9 +513,9 @@ def bulk_write(self, requests, ordered=True, >>> for doc in db.test.find({}): ... print(doc) ... - {u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634f0')} - {u'y': 1, u'_id': ObjectId('54f62ee2fba5226811f634f1')} - {u'z': 1, u'_id': ObjectId('54f62ee28891e756a6e1abd5')} + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634f0')} + {'y': 1, '_id': ObjectId('54f62ee2fba5226811f634f1')} + {'z': 1, '_id': ObjectId('54f62ee28891e756a6e1abd5')} :Parameters: - `requests`: A list of write operations (see examples above). @@ -472,6 +529,12 @@ def bulk_write(self, requests, ordered=True, ``False``. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. + - `let` (optional): Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). :Returns: An instance of :class:`~pymongo.results.BulkWriteResult`. @@ -481,6 +544,10 @@ def bulk_write(self, requests, ordered=True, .. note:: `bypass_document_validation` requires server version **>= 3.2** + .. versionchanged:: 4.1 + Added ``comment`` parameter. + Added ``let`` parameter. + .. versionchanged:: 3.6 Added ``session`` parameter. @@ -491,12 +558,12 @@ def bulk_write(self, requests, ordered=True, """ common.validate_list("requests", requests) - blk = _Bulk(self, ordered, bypass_document_validation) + blk = _Bulk(self, ordered, bypass_document_validation, comment=comment, let=let) for request in requests: try: request._add_to_bulk(blk) except AttributeError: - raise TypeError("%r is not a valid request" % (request,)) + raise TypeError(f"{request!r} is not a valid request") from None write_concern = self._write_concern_for(session) bulk_api_result = blk.execute(write_concern, session) @@ -504,152 +571,54 @@ def bulk_write(self, requests, ordered=True, return BulkWriteResult(bulk_api_result, True) return BulkWriteResult({}, False) - def _legacy_write(self, sock_info, name, cmd, op_id, - bypass_doc_val, func, *args): - """Internal legacy unacknowledged write helper.""" - # Cannot have both unacknowledged write and bypass document validation. - if bypass_doc_val and sock_info.max_wire_version >= 4: - raise OperationFailure("Cannot set bypass_document_validation with" - " unacknowledged write concern") - listeners = self.database.client._event_listeners - publish = listeners.enabled_for_commands - - if publish: - start = datetime.datetime.now() - args = args + (sock_info.compression_context,) - rqst_id, msg, max_size = func(*args) - if publish: - duration = datetime.datetime.now() - start - listeners.publish_command_start( - cmd, self.__database.name, rqst_id, sock_info.address, op_id) - start = datetime.datetime.now() - try: - result = sock_info.legacy_write(rqst_id, msg, max_size, False) - except Exception as exc: - if publish: - dur = (datetime.datetime.now() - start) + duration - if isinstance(exc, OperationFailure): - details = exc.details - # Succeed if GLE was successful and this is a write error. - if details.get("ok") and "n" in details: - reply = message._convert_write_result( - name, cmd, details) - listeners.publish_command_success( - dur, reply, name, rqst_id, sock_info.address, op_id) - raise - else: - details = message._convert_exception(exc) - listeners.publish_command_failure( - dur, details, name, rqst_id, sock_info.address, op_id) - raise - if publish: - if result is not None: - reply = message._convert_write_result(name, cmd, result) - else: - # Comply with APM spec. - reply = {'ok': 1} - duration = (datetime.datetime.now() - start) + duration - listeners.publish_command_success( - duration, reply, name, rqst_id, sock_info.address, op_id) - return result - def _insert_one( - self, doc, ordered, - check_keys, manipulate, write_concern, op_id, bypass_doc_val, - session): + self, + doc: Mapping[str, Any], + ordered: bool, + write_concern: WriteConcern, + op_id: Optional[int], + bypass_doc_val: bool, + session: Optional[ClientSession], + comment: Optional[Any] = None, + ) -> Any: """Internal helper for inserting a single document.""" - if manipulate: - doc = self.__database._apply_incoming_manipulators(doc, self) - if not isinstance(doc, RawBSONDocument) and '_id' not in doc: - doc['_id'] = ObjectId() - doc = self.__database._apply_incoming_copying_manipulators(doc, - self) write_concern = write_concern or self.write_concern acknowledged = write_concern.acknowledged - command = SON([('insert', self.name), - ('ordered', ordered), - ('documents', [doc])]) - if not write_concern.is_server_default: - command['writeConcern'] = write_concern.document - - def _insert_command(session, sock_info, retryable_write): - if not sock_info.op_msg_enabled and not acknowledged: - # Legacy OP_INSERT. - return self._legacy_write( - sock_info, 'insert', command, op_id, - bypass_doc_val, message.insert, self.__full_name, - [doc], check_keys, False, write_concern.document, False, - self.__write_response_codec_options) - - if bypass_doc_val and sock_info.max_wire_version >= 4: - command['bypassDocumentValidation'] = True - - result = sock_info.command( + command = SON([("insert", self.name), ("ordered", ordered), ("documents", [doc])]) + if comment is not None: + command["comment"] = comment + + def _insert_command( + session: Optional[ClientSession], conn: Connection, retryable_write: bool + ) -> None: + if bypass_doc_val: + command["bypassDocumentValidation"] = True + + result = conn.command( self.__database.name, command, write_concern=write_concern, codec_options=self.__write_response_codec_options, - check_keys=check_keys, session=session, client=self.__database.client, - retryable_write=retryable_write) + retryable_write=retryable_write, + ) _check_write_command_response(result) - self.__database.client._retryable_write( - acknowledged, _insert_command, session) + self.__database.client._retryable_write(acknowledged, _insert_command, session) if not isinstance(doc, RawBSONDocument): - return doc.get('_id') - - def _insert(self, docs, ordered=True, check_keys=True, - manipulate=False, write_concern=None, op_id=None, - bypass_doc_val=False, session=None): - """Internal insert helper.""" - if isinstance(docs, abc.Mapping): - return self._insert_one( - docs, ordered, check_keys, manipulate, write_concern, op_id, - bypass_doc_val, session) - - ids = [] - - if manipulate: - def gen(): - """Generator that applies SON manipulators to each document - and adds _id if necessary. - """ - _db = self.__database - for doc in docs: - # Apply user-configured SON manipulators. This order of - # operations is required for backwards compatibility, - # see PYTHON-709. - doc = _db._apply_incoming_manipulators(doc, self) - if not (isinstance(doc, RawBSONDocument) or '_id' in doc): - doc['_id'] = ObjectId() - - doc = _db._apply_incoming_copying_manipulators(doc, self) - ids.append(doc['_id']) - yield doc - else: - def gen(): - """Generator that only tracks existing _ids.""" - for doc in docs: - # Don't inflate RawBSONDocument by touching fields. - if not isinstance(doc, RawBSONDocument): - ids.append(doc.get('_id')) - yield doc - - write_concern = write_concern or self._write_concern_for(session) - blk = _Bulk(self, ordered, bypass_doc_val) - blk.ops = [(message._INSERT, doc) for doc in gen()] - try: - blk.execute(write_concern, session=session) - except BulkWriteError as bwe: - _raise_last_error(bwe.details) - return ids - - def insert_one(self, document, bypass_document_validation=False, - session=None): + return doc.get("_id") + return None + + def insert_one( + self, + document: Union[_DocumentType, RawBSONDocument], + bypass_document_validation: bool = False, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + ) -> InsertOneResult: """Insert a single document. >>> db.test.count_documents({'x': 1}) @@ -658,7 +627,7 @@ def insert_one(self, document, bypass_document_validation=False, >>> result.inserted_id ObjectId('54f112defba522406c9cc208') >>> db.test.find_one({'x': 1}) - {u'x': 1, u'_id': ObjectId('54f112defba522406c9cc208')} + {'x': 1, '_id': ObjectId('54f112defba522406c9cc208')} :Parameters: - `document`: The document to insert. Must be a mutable mapping @@ -669,6 +638,8 @@ def insert_one(self, document, bypass_document_validation=False, ``False``. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. :Returns: - An instance of :class:`~pymongo.results.InsertOneResult`. @@ -678,6 +649,9 @@ def insert_one(self, document, bypass_document_validation=False, .. note:: `bypass_document_validation` requires server version **>= 3.2** + .. versionchanged:: 4.1 + Added ``comment`` parameter. + .. versionchanged:: 3.6 Added ``session`` parameter. @@ -688,18 +662,31 @@ def insert_one(self, document, bypass_document_validation=False, """ common.validate_is_document_type("document", document) if not (isinstance(document, RawBSONDocument) or "_id" in document): - document["_id"] = ObjectId() + document["_id"] = ObjectId() # type: ignore[index] write_concern = self._write_concern_for(session) return InsertOneResult( - self._insert(document, - write_concern=write_concern, - bypass_doc_val=bypass_document_validation, - session=session), - write_concern.acknowledged) - - def insert_many(self, documents, ordered=True, - bypass_document_validation=False, session=None): + self._insert_one( + document, + ordered=True, + write_concern=write_concern, + op_id=None, + bypass_doc_val=bypass_document_validation, + session=session, + comment=comment, + ), + write_concern.acknowledged, + ) + + @_csot.apply + def insert_many( + self, + documents: Iterable[Union[_DocumentType, RawBSONDocument]], + ordered: bool = True, + bypass_document_validation: bool = False, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + ) -> InsertManyResult: """Insert an iterable of documents. >>> db.test.count_documents({}) @@ -722,6 +709,8 @@ def insert_many(self, documents, ordered=True, ``False``. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. :Returns: An instance of :class:`~pymongo.results.InsertManyResult`. @@ -731,6 +720,9 @@ def insert_many(self, documents, ordered=True, .. note:: `bypass_document_validation` requires server version **>= 3.2** + .. versionchanged:: 4.1 + Added ``comment`` parameter. + .. versionchanged:: 3.6 Added ``session`` parameter. @@ -739,131 +731,175 @@ def insert_many(self, documents, ordered=True, .. versionadded:: 3.0 """ - if not isinstance(documents, abc.Iterable) or not documents: + if ( + not isinstance(documents, abc.Iterable) + or isinstance(documents, abc.Mapping) + or not documents + ): raise TypeError("documents must be a non-empty list") - inserted_ids = [] - def gen(): + inserted_ids: list[ObjectId] = [] + + def gen() -> Iterator[tuple[int, Mapping[str, Any]]]: """A generator that validates documents and handles _ids.""" for document in documents: common.validate_is_document_type("document", document) if not isinstance(document, RawBSONDocument): if "_id" not in document: - document["_id"] = ObjectId() + document["_id"] = ObjectId() # type: ignore[index] inserted_ids.append(document["_id"]) yield (message._INSERT, document) write_concern = self._write_concern_for(session) - blk = _Bulk(self, ordered, bypass_document_validation) - blk.ops = [doc for doc in gen()] + blk = _Bulk(self, ordered, bypass_document_validation, comment=comment) + blk.ops = list(gen()) blk.execute(write_concern, session=session) return InsertManyResult(inserted_ids, write_concern.acknowledged) - def _update(self, sock_info, criteria, document, upsert=False, - check_keys=True, multi=False, manipulate=False, - write_concern=None, op_id=None, ordered=True, - bypass_doc_val=False, collation=None, array_filters=None, - session=None, retryable_write=False): + def _update( + self, + conn: Connection, + criteria: Mapping[str, Any], + document: Union[Mapping[str, Any], _Pipeline], + upsert: bool = False, + multi: bool = False, + write_concern: Optional[WriteConcern] = None, + op_id: Optional[int] = None, + ordered: bool = True, + bypass_doc_val: Optional[bool] = False, + collation: Optional[_CollationIn] = None, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + retryable_write: bool = False, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> Optional[Mapping[str, Any]]: """Internal update / replace helper.""" common.validate_boolean("upsert", upsert) - if manipulate: - document = self.__database._fix_incoming(document, self) collation = validate_collation_or_none(collation) write_concern = write_concern or self.write_concern acknowledged = write_concern.acknowledged - update_doc = SON([('q', criteria), - ('u', document), - ('multi', multi), - ('upsert', upsert)]) + update_doc: SON[str, Any] = SON( + [("q", criteria), ("u", document), ("multi", multi), ("upsert", upsert)] + ) if collation is not None: - if sock_info.max_wire_version < 5: - raise ConfigurationError( - 'Must be connected to MongoDB 3.4+ to use collations.') - elif not acknowledged: - raise ConfigurationError( - 'Collation is unsupported for unacknowledged writes.') + if not acknowledged: + raise ConfigurationError("Collation is unsupported for unacknowledged writes.") else: - update_doc['collation'] = collation + update_doc["collation"] = collation if array_filters is not None: - if sock_info.max_wire_version < 6: - raise ConfigurationError( - 'Must be connected to MongoDB 3.6+ to use array_filters.') - elif not acknowledged: - raise ConfigurationError( - 'arrayFilters is unsupported for unacknowledged writes.') + if not acknowledged: + raise ConfigurationError("arrayFilters is unsupported for unacknowledged writes.") else: - update_doc['arrayFilters'] = array_filters - command = SON([('update', self.name), - ('ordered', ordered), - ('updates', [update_doc])]) - if not write_concern.is_server_default: - command['writeConcern'] = write_concern.document - - if not sock_info.op_msg_enabled and not acknowledged: - # Legacy OP_UPDATE. - return self._legacy_write( - sock_info, 'update', command, op_id, - bypass_doc_val, message.update, self.__full_name, upsert, - multi, criteria, document, False, write_concern.document, - check_keys, self.__write_response_codec_options) - + update_doc["arrayFilters"] = array_filters + if hint is not None: + if not acknowledged and conn.max_wire_version < 8: + raise ConfigurationError( + "Must be connected to MongoDB 4.2+ to use hint on unacknowledged update commands." + ) + if not isinstance(hint, str): + hint = helpers._index_document(hint) + update_doc["hint"] = hint + command = SON([("update", self.name), ("ordered", ordered), ("updates", [update_doc])]) + if let is not None: + common.validate_is_mapping("let", let) + command["let"] = let + + if comment is not None: + command["comment"] = comment # Update command. - if bypass_doc_val and sock_info.max_wire_version >= 4: - command['bypassDocumentValidation'] = True + if bypass_doc_val: + command["bypassDocumentValidation"] = True # The command result has to be published for APM unmodified # so we make a shallow copy here before adding updatedExisting. - result = sock_info.command( + result = conn.command( self.__database.name, command, write_concern=write_concern, codec_options=self.__write_response_codec_options, session=session, client=self.__database.client, - retryable_write=retryable_write).copy() + retryable_write=retryable_write, + ).copy() _check_write_command_response(result) # Add the updatedExisting field for compatibility. - if result.get('n') and 'upserted' not in result: - result['updatedExisting'] = True + if result.get("n") and "upserted" not in result: + result["updatedExisting"] = True else: - result['updatedExisting'] = False + result["updatedExisting"] = False # MongoDB >= 2.6.0 returns the upsert _id in an array # element. Break it out for backward compatibility. - if 'upserted' in result: - result['upserted'] = result['upserted'][0]['_id'] + if "upserted" in result: + result["upserted"] = result["upserted"][0]["_id"] if not acknowledged: return None return result def _update_retryable( - self, criteria, document, upsert=False, - check_keys=True, multi=False, manipulate=False, - write_concern=None, op_id=None, ordered=True, - bypass_doc_val=False, collation=None, array_filters=None, - session=None): + self, + criteria: Mapping[str, Any], + document: Union[Mapping[str, Any], _Pipeline], + upsert: bool = False, + multi: bool = False, + write_concern: Optional[WriteConcern] = None, + op_id: Optional[int] = None, + ordered: bool = True, + bypass_doc_val: Optional[bool] = False, + collation: Optional[_CollationIn] = None, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> Optional[Mapping[str, Any]]: """Internal update / replace helper.""" - def _update(session, sock_info, retryable_write): + + def _update( + session: Optional[ClientSession], conn: Connection, retryable_write: bool + ) -> Optional[Mapping[str, Any]]: return self._update( - sock_info, criteria, document, upsert=upsert, - check_keys=check_keys, multi=multi, manipulate=manipulate, - write_concern=write_concern, op_id=op_id, ordered=ordered, - bypass_doc_val=bypass_doc_val, collation=collation, - array_filters=array_filters, session=session, - retryable_write=retryable_write) + conn, + criteria, + document, + upsert=upsert, + multi=multi, + write_concern=write_concern, + op_id=op_id, + ordered=ordered, + bypass_doc_val=bypass_doc_val, + collation=collation, + array_filters=array_filters, + hint=hint, + session=session, + retryable_write=retryable_write, + let=let, + comment=comment, + ) return self.__database.client._retryable_write( - (write_concern or self.write_concern).acknowledged and not multi, - _update, session) - - def replace_one(self, filter, replacement, upsert=False, - bypass_document_validation=False, collation=None, - session=None): + (write_concern or self.write_concern).acknowledged and not multi, _update, session + ) + + def replace_one( + self, + filter: Mapping[str, Any], + replacement: Mapping[str, Any], + upsert: bool = False, + bypass_document_validation: bool = False, + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> UpdateResult: """Replace a single document matching the filter. >>> for doc in db.test.find({}): ... print(doc) ... - {u'x': 1, u'_id': ObjectId('54f4c5befba5220aa4d6dee7')} + {'x': 1, '_id': ObjectId('54f4c5befba5220aa4d6dee7')} >>> result = db.test.replace_one({'x': 1}, {'y': 1}) >>> result.matched_count 1 @@ -872,7 +908,7 @@ def replace_one(self, filter, replacement, upsert=False, >>> for doc in db.test.find({}): ... print(doc) ... - {u'y': 1, u'_id': ObjectId('54f4c5befba5220aa4d6dee7')} + {'y': 1, '_id': ObjectId('54f4c5befba5220aa4d6dee7')} The *upsert* option can be used to insert a new document if a matching document does not exist. @@ -885,7 +921,7 @@ def replace_one(self, filter, replacement, upsert=False, >>> result.upserted_id ObjectId('54f11e5c8891e756a6e1abd4') >>> db.test.find_one({'x': 1}) - {u'x': 1, u'_id': ObjectId('54f11e5c8891e756a6e1abd4')} + {'x': 1, '_id': ObjectId('54f11e5c8891e756a6e1abd4')} :Parameters: - `filter`: A query that matches the document to replace. @@ -896,51 +932,80 @@ def replace_one(self, filter, replacement, upsert=False, write to opt-out of document level validation. Default is ``False``. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only supported - on MongoDB 3.4 and above. + :class:`~pymongo.collation.Collation`. + - `hint` (optional): An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.2 and above. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - + - `let` (optional): Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + - `comment` (optional): A user-provided comment to attach to this + command. :Returns: - An instance of :class:`~pymongo.results.UpdateResult`. - .. note:: `bypass_document_validation` requires server version - **>= 3.2** - + .. versionchanged:: 4.1 + Added ``let`` parameter. + Added ``comment`` parameter. + .. versionchanged:: 3.11 + Added ``hint`` parameter. .. versionchanged:: 3.6 Added ``session`` parameter. - .. versionchanged:: 3.4 Added the `collation` option. - .. versionchanged:: 3.2 - Added bypass_document_validation support + Added bypass_document_validation support. .. versionadded:: 3.0 """ common.validate_is_mapping("filter", filter) common.validate_ok_for_replace(replacement) - + if let is not None: + common.validate_is_mapping("let", let) write_concern = self._write_concern_for(session) return UpdateResult( self._update_retryable( - filter, replacement, upsert, + filter, + replacement, + upsert, write_concern=write_concern, bypass_doc_val=bypass_document_validation, - collation=collation, session=session), - write_concern.acknowledged) - - def update_one(self, filter, update, upsert=False, - bypass_document_validation=False, - collation=None, array_filters=None, session=None): + collation=collation, + hint=hint, + session=session, + let=let, + comment=comment, + ), + write_concern.acknowledged, + ) + + def update_one( + self, + filter: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + upsert: bool = False, + bypass_document_validation: bool = False, + collation: Optional[_CollationIn] = None, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> UpdateResult: """Update a single document matching the filter. >>> for doc in db.test.find(): ... print(doc) ... - {u'x': 1, u'_id': 0} - {u'x': 1, u'_id': 1} - {u'x': 1, u'_id': 2} + {'x': 1, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} >>> result = db.test.update_one({'x': 1}, {'$inc': {'x': 3}}) >>> result.matched_count 1 @@ -949,9 +1014,22 @@ def update_one(self, filter, update, upsert=False, >>> for doc in db.test.find(): ... print(doc) ... - {u'x': 4, u'_id': 0} - {u'x': 1, u'_id': 1} - {u'x': 1, u'_id': 2} + {'x': 4, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} + + If ``upsert=True`` and no documents match the filter, create a + new document based on the filter criteria and update modifications. + + >>> result = db.test.update_one({'x': -10}, {'$inc': {'x': 3}}, upsert=True) + >>> result.matched_count + 0 + >>> result.modified_count + 0 + >>> result.upserted_id + ObjectId('626a678eeaa80587d4bb3fb7') + >>> db.test.find_one(result.upserted_id) + {'_id': ObjectId('626a678eeaa80587d4bb3fb7'), 'x': -7} :Parameters: - `filter`: A query that matches the document to update. @@ -962,58 +1040,86 @@ def update_one(self, filter, update, upsert=False, write to opt-out of document level validation. Default is ``False``. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only supported - on MongoDB 3.4 and above. + :class:`~pymongo.collation.Collation`. - `array_filters` (optional): A list of filters specifying which - array elements an update should apply. Requires MongoDB 3.6+. + array elements an update should apply. + - `hint` (optional): An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.2 and above. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `let` (optional): Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + - `comment` (optional): A user-provided comment to attach to this + command. :Returns: - An instance of :class:`~pymongo.results.UpdateResult`. - .. note:: `bypass_document_validation` requires server version - **>= 3.2** - + .. versionchanged:: 4.1 + Added ``let`` parameter. + Added ``comment`` parameter. + .. versionchanged:: 3.11 + Added ``hint`` parameter. .. versionchanged:: 3.9 - Added the ability to accept a pipeline as the `update`. - + Added the ability to accept a pipeline as the ``update``. .. versionchanged:: 3.6 - Added the `array_filters` and ``session`` parameters. - + Added the ``array_filters`` and ``session`` parameters. .. versionchanged:: 3.4 - Added the `collation` option. - + Added the ``collation`` option. .. versionchanged:: 3.2 - Added bypass_document_validation support + Added ``bypass_document_validation`` support. .. versionadded:: 3.0 """ common.validate_is_mapping("filter", filter) common.validate_ok_for_update(update) - common.validate_list_or_none('array_filters', array_filters) + common.validate_list_or_none("array_filters", array_filters) write_concern = self._write_concern_for(session) return UpdateResult( self._update_retryable( - filter, update, upsert, check_keys=False, + filter, + update, + upsert, write_concern=write_concern, bypass_doc_val=bypass_document_validation, - collation=collation, array_filters=array_filters, - session=session), - write_concern.acknowledged) - - def update_many(self, filter, update, upsert=False, array_filters=None, - bypass_document_validation=False, collation=None, - session=None): + collation=collation, + array_filters=array_filters, + hint=hint, + session=session, + let=let, + comment=comment, + ), + write_concern.acknowledged, + ) + + def update_many( + self, + filter: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + upsert: bool = False, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + bypass_document_validation: Optional[bool] = None, + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> UpdateResult: """Update one or more documents that match the filter. >>> for doc in db.test.find(): ... print(doc) ... - {u'x': 1, u'_id': 0} - {u'x': 1, u'_id': 1} - {u'x': 1, u'_id': 2} + {'x': 1, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} >>> result = db.test.update_many({'x': 1}, {'$inc': {'x': 3}}) >>> result.matched_count 3 @@ -1022,9 +1128,9 @@ def update_many(self, filter, update, upsert=False, array_filters=None, >>> for doc in db.test.find(): ... print(doc) ... - {u'x': 4, u'_id': 0} - {u'x': 4, u'_id': 1} - {u'x': 4, u'_id': 2} + {'x': 4, '_id': 0} + {'x': 4, '_id': 1} + {'x': 4, '_id': 2} :Parameters: - `filter`: A query that matches the documents to update. @@ -1035,59 +1141,93 @@ def update_many(self, filter, update, upsert=False, array_filters=None, write to opt-out of document level validation. Default is ``False``. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only supported - on MongoDB 3.4 and above. + :class:`~pymongo.collation.Collation`. - `array_filters` (optional): A list of filters specifying which - array elements an update should apply. Requires MongoDB 3.6+. + array elements an update should apply. + - `hint` (optional): An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.2 and above. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `let` (optional): Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + - `comment` (optional): A user-provided comment to attach to this + command. :Returns: - An instance of :class:`~pymongo.results.UpdateResult`. - .. note:: `bypass_document_validation` requires server version - **>= 3.2** - + .. versionchanged:: 4.1 + Added ``let`` parameter. + Added ``comment`` parameter. + .. versionchanged:: 3.11 + Added ``hint`` parameter. .. versionchanged:: 3.9 Added the ability to accept a pipeline as the `update`. - .. versionchanged:: 3.6 Added ``array_filters`` and ``session`` parameters. - .. versionchanged:: 3.4 Added the `collation` option. - .. versionchanged:: 3.2 - Added bypass_document_validation support + Added bypass_document_validation support. .. versionadded:: 3.0 """ common.validate_is_mapping("filter", filter) common.validate_ok_for_update(update) - common.validate_list_or_none('array_filters', array_filters) + common.validate_list_or_none("array_filters", array_filters) write_concern = self._write_concern_for(session) return UpdateResult( self._update_retryable( - filter, update, upsert, check_keys=False, multi=True, + filter, + update, + upsert, + multi=True, write_concern=write_concern, bypass_doc_val=bypass_document_validation, - collation=collation, array_filters=array_filters, - session=session), - write_concern.acknowledged) - - def drop(self, session=None): + collation=collation, + array_filters=array_filters, + hint=hint, + session=session, + let=let, + comment=comment, + ), + write_concern.acknowledged, + ) + + def drop( + self, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + encrypted_fields: Optional[Mapping[str, Any]] = None, + ) -> None: """Alias for :meth:`~pymongo.database.Database.drop_collection`. :Parameters: - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. + - `encrypted_fields`: **(BETA)** Document that describes the encrypted fields for + Queryable Encryption. The following two calls are equivalent: >>> db.foo.drop() >>> db.drop_collection("foo") + .. versionchanged:: 4.2 + Added ``encrypted_fields`` parameter. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + .. versionchanged:: 3.7 :meth:`drop` now respects this :class:`Collection`'s :attr:`write_concern`. @@ -1099,72 +1239,114 @@ def drop(self, session=None): self.codec_options, self.read_preference, self.write_concern, - self.read_concern) - dbo.drop_collection(self.__name, session=session) + self.read_concern, + ) + dbo.drop_collection( + self.__name, session=session, comment=comment, encrypted_fields=encrypted_fields + ) def _delete( - self, sock_info, criteria, multi, - write_concern=None, op_id=None, ordered=True, - collation=None, session=None, retryable_write=False): + self, + conn: Connection, + criteria: Mapping[str, Any], + multi: bool, + write_concern: Optional[WriteConcern] = None, + op_id: Optional[int] = None, + ordered: bool = True, + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + retryable_write: bool = False, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> Mapping[str, Any]: """Internal delete helper.""" common.validate_is_mapping("filter", criteria) write_concern = write_concern or self.write_concern acknowledged = write_concern.acknowledged - delete_doc = SON([('q', criteria), - ('limit', int(not multi))]) + delete_doc = SON([("q", criteria), ("limit", int(not multi))]) collation = validate_collation_or_none(collation) if collation is not None: - if sock_info.max_wire_version < 5: - raise ConfigurationError( - 'Must be connected to MongoDB 3.4+ to use collations.') - elif not acknowledged: - raise ConfigurationError( - 'Collation is unsupported for unacknowledged writes.') + if not acknowledged: + raise ConfigurationError("Collation is unsupported for unacknowledged writes.") else: - delete_doc['collation'] = collation - command = SON([('delete', self.name), - ('ordered', ordered), - ('deletes', [delete_doc])]) - if not write_concern.is_server_default: - command['writeConcern'] = write_concern.document - - if not sock_info.op_msg_enabled and not acknowledged: - # Legacy OP_DELETE. - return self._legacy_write( - sock_info, 'delete', command, op_id, - False, message.delete, self.__full_name, criteria, - False, write_concern.document, - self.__write_response_codec_options, - int(not multi)) + delete_doc["collation"] = collation + if hint is not None: + if not acknowledged and conn.max_wire_version < 9: + raise ConfigurationError( + "Must be connected to MongoDB 4.4+ to use hint on unacknowledged delete commands." + ) + if not isinstance(hint, str): + hint = helpers._index_document(hint) + delete_doc["hint"] = hint + command = SON([("delete", self.name), ("ordered", ordered), ("deletes", [delete_doc])]) + + if let is not None: + common.validate_is_document_type("let", let) + command["let"] = let + + if comment is not None: + command["comment"] = comment + # Delete command. - result = sock_info.command( + result = conn.command( self.__database.name, command, write_concern=write_concern, codec_options=self.__write_response_codec_options, session=session, client=self.__database.client, - retryable_write=retryable_write) + retryable_write=retryable_write, + ) _check_write_command_response(result) return result def _delete_retryable( - self, criteria, multi, - write_concern=None, op_id=None, ordered=True, - collation=None, session=None): + self, + criteria: Mapping[str, Any], + multi: bool, + write_concern: Optional[WriteConcern] = None, + op_id: Optional[int] = None, + ordered: bool = True, + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> Mapping[str, Any]: """Internal delete helper.""" - def _delete(session, sock_info, retryable_write): + + def _delete( + session: Optional[ClientSession], conn: Connection, retryable_write: bool + ) -> Mapping[str, Any]: return self._delete( - sock_info, criteria, multi, - write_concern=write_concern, op_id=op_id, ordered=ordered, - collation=collation, session=session, - retryable_write=retryable_write) + conn, + criteria, + multi, + write_concern=write_concern, + op_id=op_id, + ordered=ordered, + collation=collation, + hint=hint, + session=session, + retryable_write=retryable_write, + let=let, + comment=comment, + ) return self.__database.client._retryable_write( - (write_concern or self.write_concern).acknowledged and not multi, - _delete, session) - - def delete_one(self, filter, collation=None, session=None): + (write_concern or self.write_concern).acknowledged and not multi, _delete, session + ) + + def delete_one( + self, + filter: Mapping[str, Any], + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> DeleteResult: """Delete a single document matching the filter. >>> db.test.count_documents({'x': 1}) @@ -1178,31 +1360,60 @@ def delete_one(self, filter, collation=None, session=None): :Parameters: - `filter`: A query that matches the document to delete. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only supported - on MongoDB 3.4 and above. + :class:`~pymongo.collation.Collation`. + - `hint` (optional): An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.4 and above. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `let` (optional): Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + - `comment` (optional): A user-provided comment to attach to this + command. :Returns: - An instance of :class:`~pymongo.results.DeleteResult`. + .. versionchanged:: 4.1 + Added ``let`` parameter. + Added ``comment`` parameter. + .. versionchanged:: 3.11 + Added ``hint`` parameter. .. versionchanged:: 3.6 Added ``session`` parameter. - .. versionchanged:: 3.4 Added the `collation` option. - .. versionadded:: 3.0 """ write_concern = self._write_concern_for(session) return DeleteResult( self._delete_retryable( - filter, False, + filter, + False, write_concern=write_concern, - collation=collation, session=session), - write_concern.acknowledged) - - def delete_many(self, filter, collation=None, session=None): + collation=collation, + hint=hint, + session=session, + let=let, + comment=comment, + ), + write_concern.acknowledged, + ) + + def delete_many( + self, + filter: Mapping[str, Any], + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> DeleteResult: """Delete one or more documents matching the filter. >>> db.test.count_documents({'x': 1}) @@ -1216,31 +1427,54 @@ def delete_many(self, filter, collation=None, session=None): :Parameters: - `filter`: A query that matches the documents to delete. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only supported - on MongoDB 3.4 and above. + :class:`~pymongo.collation.Collation`. + - `hint` (optional): An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.4 and above. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `let` (optional): Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + - `comment` (optional): A user-provided comment to attach to this + command. :Returns: - An instance of :class:`~pymongo.results.DeleteResult`. + .. versionchanged:: 4.1 + Added ``let`` parameter. + Added ``comment`` parameter. + .. versionchanged:: 3.11 + Added ``hint`` parameter. .. versionchanged:: 3.6 Added ``session`` parameter. - .. versionchanged:: 3.4 Added the `collation` option. - .. versionadded:: 3.0 """ write_concern = self._write_concern_for(session) return DeleteResult( self._delete_retryable( - filter, True, + filter, + True, write_concern=write_concern, - collation=collation, session=session), - write_concern.acknowledged) - - def find_one(self, filter=None, *args, **kwargs): + collation=collation, + hint=hint, + session=session, + let=let, + comment=comment, + ), + write_concern.acknowledged, + ) + + def find_one( + self, filter: Optional[Any] = None, *args: Any, **kwargs: Any + ) -> Optional[_DocumentType]: """Get a single document from the database. All arguments to :meth:`find` are also valid arguments for @@ -1264,20 +1498,19 @@ def find_one(self, filter=None, *args, **kwargs): are the same as the arguments to :meth:`find`. >>> collection.find_one(max_time_ms=100) + """ - if (filter is not None and not - isinstance(filter, abc.Mapping)): + if filter is not None and not isinstance(filter, abc.Mapping): filter = {"_id": filter} - cursor = self.find(filter, *args, **kwargs) for result in cursor.limit(-1): return result return None - def find(self, *args, **kwargs): + def find(self, *args: Any, **kwargs: Any) -> Cursor[_DocumentType]: """Query the database. - The `filter` argument is a prototype document that all results + The `filter` argument is a query document that all results must match. For example: >>> db.test.find({"hello": "world"}) @@ -1297,9 +1530,9 @@ def find(self, *args, **kwargs): this :class:`Collection`. :Parameters: - - `filter` (optional): a SON object specifying elements which - must be present for a document to be included in the - result set + - `filter` (optional): A query document that selects which documents + to include in the result set. Can be an empty document to include + all documents. - `projection` (optional): a list of field names that should be returned in the result set or a dict specifying the fields to include or exclude. If `projection` is a list "_id" will @@ -1329,7 +1562,7 @@ def find(self, *args, **kwargs): marks the final document position. If more data is received iteration of the cursor will continue from the last document received. For details, see the `tailable cursor documentation - `_. + `_. - :attr:`~pymongo.cursor.CursorType.TAILABLE_AWAIT` - the result of this find call will be a tailable cursor with the await flag set. The server will wait for a few seconds after returning the @@ -1346,15 +1579,12 @@ def find(self, *args, **kwargs): - `allow_partial_results` (optional): if True, mongos will return partial results if some shards are down instead of returning an error. - - `oplog_replay` (optional): If True, set the oplogReplay query - flag. + - `oplog_replay` (optional): **DEPRECATED** - if True, set the + oplogReplay query flag. Default: False. - `batch_size` (optional): Limits the number of documents returned in a single batch. - - `manipulate` (optional): **DEPRECATED** - If True (the default), - apply any outgoing SON manipulators before returning. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only supported - on MongoDB 3.4 and above. + :class:`~pymongo.collation.Collation`. - `return_key` (optional): If True, return only the index keys in each document. - `show_record_id` (optional): If True, adds a field ``$recordId`` in @@ -1389,9 +1619,12 @@ def find(self, *args, **kwargs): interpret and trace the operation in the server logs and in profile data. Pass this as an alternative to calling :meth:`~pymongo.cursor.Cursor.comment` on the cursor. - - `modifiers` (optional): **DEPRECATED** - A dict specifying - additional MongoDB query modifiers. Use the keyword arguments listed - above instead. + - `allow_disk_use` (optional): if True, MongoDB may use temporary + disk files to store data exceeding the system memory limit while + processing a blocking sort operation. The option has no effect if + MongoDB can satisfy the specified sort using an index, or if the + blocking sort requires less memory than the 100 MiB limit. This + option is only supported on MongoDB 4.4 and above. .. note:: There are a number of caveats to using :attr:`~pymongo.cursor.CursorType.EXHAUST` as cursor_type: @@ -1409,57 +1642,61 @@ def find(self, *args, **kwargs): connection will be closed and discarded without being returned to the connection pool. + .. versionchanged:: 4.0 + Removed the ``modifiers`` option. + Empty projections (eg {} or []) are passed to the server as-is, + rather than the previous behavior which substituted in a + projection of ``{"_id": 1}``. This means that an empty projection + will now return the entire document, not just the ``"_id"`` field. + + .. versionchanged:: 3.11 + Added the ``allow_disk_use`` option. + Deprecated the ``oplog_replay`` option. Support for this option is + deprecated in MongoDB 4.4. The query engine now automatically + optimizes queries against the oplog without requiring this + option to be set. + .. versionchanged:: 3.7 - Deprecated the `snapshot` option, which is deprecated in MongoDB + Deprecated the ``snapshot`` option, which is deprecated in MongoDB 3.6 and removed in MongoDB 4.0. - Deprecated the `max_scan` option. Support for this option is - deprecated in MongoDB 4.0. Use `max_time_ms` instead to limit server - side execution time. - + Deprecated the ``max_scan`` option. Support for this option is + deprecated in MongoDB 4.0. Use ``max_time_ms`` instead to limit + server-side execution time. .. versionchanged:: 3.6 Added ``session`` parameter. .. versionchanged:: 3.5 - Added the options `return_key`, `show_record_id`, `snapshot`, - `hint`, `max_time_ms`, `max_scan`, `min`, `max`, and `comment`. - Deprecated the option `modifiers`. + Added the options ``return_key``, ``show_record_id``, ``snapshot``, + ``hint``, ``max_time_ms``, ``max_scan``, ``min``, ``max``, and + ``comment``. + Deprecated the ``modifiers`` option. .. versionchanged:: 3.4 - Support the `collation` option. + Added support for the ``collation`` option. .. versionchanged:: 3.0 - Changed the parameter names `spec`, `fields`, `timeout`, and - `partial` to `filter`, `projection`, `no_cursor_timeout`, and - `allow_partial_results` respectively. - Added the `cursor_type`, `oplog_replay`, and `modifiers` options. - Removed the `network_timeout`, `read_preference`, `tag_sets`, - `secondary_acceptable_latency_ms`, `max_scan`, `snapshot`, - `tailable`, `await_data`, `exhaust`, `as_class`, and slave_okay - parameters. Removed `compile_re` option: PyMongo now always + Changed the parameter names ``spec``, ``fields``, ``timeout``, and + ``partial`` to ``filter``, ``projection``, ``no_cursor_timeout``, + and ``allow_partial_results`` respectively. + Added the ``cursor_type``, ``oplog_replay``, and ``modifiers`` + options. + Removed the ``network_timeout``, ``read_preference``, ``tag_sets``, + ``secondary_acceptable_latency_ms``, ``max_scan``, ``snapshot``, + ``tailable``, ``await_data``, ``exhaust``, ``as_class``, and + slave_okay parameters. + Removed ``compile_re`` option: PyMongo now always represents BSON regular expressions as :class:`~bson.regex.Regex` objects. Use :meth:`~bson.regex.Regex.try_compile` to attempt to convert from a BSON regular expression to a Python regular - expression object. Soft deprecated the `manipulate` option. - - .. versionchanged:: 2.7 - Added `compile_re` option. If set to False, PyMongo represented BSON - regular expressions as :class:`~bson.regex.Regex` objects instead of - attempting to compile BSON regular expressions as Python native - regular expressions, thus preventing errors for some incompatible - patterns, see `PYTHON-500`_. - - .. versionadded:: 2.3 - The `tag_sets` and `secondary_acceptable_latency_ms` parameters. - - .. _PYTHON-500: https://jira.mongodb.org/browse/PYTHON-500 - - .. mongodoc:: find + expression object. + Soft deprecated the ``manipulate`` option. + .. seealso:: The MongoDB documentation on `find `_. """ return Cursor(self, *args, **kwargs) - def find_raw_batches(self, *args, **kwargs): + def find_raw_batches(self, *args: Any, **kwargs: Any) -> RawBatchCursor[_DocumentType]: """Query the database and retrieve batches of raw BSON. Similar to the :meth:`find` method but returns a @@ -1475,145 +1712,72 @@ def find_raw_batches(self, *args, **kwargs): >>> for batch in cursor: ... print(bson.decode_all(batch)) - .. note:: find_raw_batches does not support sessions or auto - encryption. + .. note:: find_raw_batches does not support auto encryption. + + .. versionchanged:: 3.12 + Instead of ignoring the user-specified read concern, this method + now sends it to the server when connected to MongoDB 3.6+. + + Added session support. .. versionadded:: 3.6 """ - # OP_MSG with document stream returns is required to support - # sessions. - if "session" in kwargs: - raise ConfigurationError( - "find_raw_batches does not support sessions") - # OP_MSG is required to support encryption. if self.__database.client._encrypter: - raise InvalidOperation( - "find_raw_batches does not support auto encryption") - + raise InvalidOperation("find_raw_batches does not support auto encryption") return RawBatchCursor(self, *args, **kwargs) - def parallel_scan(self, num_cursors, session=None, **kwargs): - """**DEPRECATED**: Scan this entire collection in parallel. - - Returns a list of up to ``num_cursors`` cursors that can be iterated - concurrently. As long as the collection is not modified during - scanning, each document appears once in one of the cursors result - sets. - - For example, to process each document in a collection using some - thread-safe ``process_document()`` function: - - >>> def process_cursor(cursor): - ... for document in cursor: - ... # Some thread-safe processing function: - ... process_document(document) - >>> - >>> # Get up to 4 cursors. - ... - >>> cursors = collection.parallel_scan(4) - >>> threads = [ - ... threading.Thread(target=process_cursor, args=(cursor,)) - ... for cursor in cursors] - >>> - >>> for thread in threads: - ... thread.start() - >>> - >>> for thread in threads: - ... thread.join() - >>> - >>> # All documents have now been processed. - - The :meth:`parallel_scan` method obeys the :attr:`read_preference` of - this :class:`Collection`. - - :Parameters: - - `num_cursors`: the number of cursors to return - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `**kwargs`: additional options for the parallelCollectionScan - command can be passed as keyword arguments. - - .. note:: Requires server version **>= 2.5.5**. - - .. versionchanged:: 3.7 - Deprecated. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. versionchanged:: 3.4 - Added back support for arbitrary keyword arguments. MongoDB 3.4 - adds support for maxTimeMS as an option to the - parallelCollectionScan command. - - .. versionchanged:: 3.0 - Removed support for arbitrary keyword arguments, since - the parallelCollectionScan command has no optional arguments. - """ - warnings.warn("parallel_scan is deprecated. MongoDB 4.2 will remove " - "the parallelCollectionScan command.", - DeprecationWarning, stacklevel=2) - cmd = SON([('parallelCollectionScan', self.__name), - ('numCursors', num_cursors)]) - cmd.update(kwargs) - - with self._socket_for_reads(session) as (sock_info, slave_ok): - # We call sock_info.command here directly, instead of - # calling self._command to avoid using an implicit session. - result = sock_info.command( - self.__database.name, - cmd, - slave_ok, - self._read_preference_for(session), - self.codec_options, - read_concern=self.read_concern, - parse_write_concern_error=True, - session=session, - client=self.__database.client) - - cursors = [] - for cursor in result['cursors']: - cursors.append(CommandCursor( - self, cursor['cursor'], sock_info.address, - session=session, explicit_session=session is not None)) - - return cursors - - def _count(self, cmd, collation=None, session=None): - """Internal count helper.""" - def _cmd(session, server, sock_info, slave_ok): - res = self._command( - sock_info, - cmd, - slave_ok, - allowable_errors=["ns missing"], - codec_options=self.__write_response_codec_options, - read_concern=self.read_concern, - collation=collation, - session=session) - if res.get("errmsg", "") == "ns missing": - return 0 - return int(res["n"]) - - return self.__database.client._retryable_read( - _cmd, self._read_preference_for(session), session) + def _count_cmd( + self, + session: Optional[ClientSession], + conn: Connection, + read_preference: Optional[_ServerMode], + cmd: SON[str, Any], + collation: Optional[Collation], + ) -> int: + """Internal count command helper.""" + # XXX: "ns missing" checks can be removed when we drop support for + # MongoDB 3.0, see SERVER-17051. + res = self._command( + conn, + cmd, + read_preference=read_preference, + allowable_errors=["ns missing"], + codec_options=self.__write_response_codec_options, + read_concern=self.read_concern, + collation=collation, + session=session, + ) + if res.get("errmsg", "") == "ns missing": + return 0 + return int(res["n"]) def _aggregate_one_result( - self, sock_info, slave_ok, cmd, collation=None, session=None): + self, + conn: Connection, + read_preference: Optional[_ServerMode], + cmd: SON[str, Any], + collation: Optional[_CollationIn], + session: Optional[ClientSession], + ) -> Optional[Mapping[str, Any]]: """Internal helper to run an aggregate that returns a single result.""" result = self._command( - sock_info, + conn, cmd, - slave_ok, + read_preference, + allowable_errors=[26], # Ignore NamespaceNotFound. codec_options=self.__write_response_codec_options, read_concern=self.read_concern, collation=collation, - session=session) - batch = result['cursor']['firstBatch'] + session=session, + ) + # cursor will not be present for NamespaceNotFound errors. + if "cursor" not in result: + return None + batch = result["cursor"]["firstBatch"] return batch[0] if batch else None - def estimated_document_count(self, **kwargs): + def estimated_document_count(self, comment: Optional[Any] = None, **kwargs: Any) -> int: """Get an estimate of the number of documents in this collection using collection metadata. @@ -1627,18 +1791,44 @@ def estimated_document_count(self, **kwargs): operation to run, in milliseconds. :Parameters: + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): See list of options above. + .. versionchanged:: 4.2 + This method now always uses the `count`_ command. Due to an oversight in versions + 5.0.0-5.0.8 of MongoDB, the count command was not included in V1 of the + :ref:`versioned-api-ref`. Users of the Stable API with estimated_document_count are + recommended to upgrade their server version to 5.0.9+ or set + :attr:`pymongo.server_api.ServerApi.strict` to ``False`` to avoid encountering errors. + .. versionadded:: 3.7 + .. _count: https://mongodb.com/docs/manual/reference/command/count/ """ - if 'session' in kwargs: - raise ConfigurationError( - 'estimated_document_count does not support sessions') - cmd = SON([('count', self.__name)]) - cmd.update(kwargs) - return self._count(cmd) + if "session" in kwargs: + raise ConfigurationError("estimated_document_count does not support sessions") + if comment is not None: + kwargs["comment"] = comment + + def _cmd( + session: Optional[ClientSession], + _server: Server, + conn: Connection, + read_preference: Optional[_ServerMode], + ) -> int: + cmd: SON[str, Any] = SON([("count", self.__name)]) + cmd.update(kwargs) + return self._count_cmd(session, conn, read_preference, cmd, collation=None) - def count_documents(self, filter, session=None, **kwargs): + return self._retryable_non_cursor_read(_cmd, None) + + def count_documents( + self, + filter: Mapping[str, Any], + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> int: """Count the number of documents in this collection. .. note:: For a fast count of the total documents in a collection see @@ -1656,12 +1846,10 @@ def count_documents(self, filter, session=None, **kwargs): - `maxTimeMS` (int): The maximum amount of time to allow this operation to run, in milliseconds. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only supported - on MongoDB 3.4 and above. + :class:`~pymongo.collation.Collation`. - `hint` (string or list of tuples): The index to use. Specify either the index name as a string or the index specification as a list of tuples (e.g. [('a', pymongo.ASCENDING), ('b', pymongo.ASCENDING)]). - This option is only supported on MongoDB 3.6 and above. The :meth:`count_documents` method obeys the :attr:`read_preference` of this :class:`Collection`. @@ -1679,135 +1867,114 @@ def count_documents(self, filter, session=None, **kwargs): | $nearSphere | `$geoWithin`_ with `$centerSphere`_ | +-------------+-------------------------------------+ - $expr requires MongoDB 3.6+ - :Parameters: - `filter` (required): A query document that selects which documents to count in the collection. Can be an empty document to count all documents. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): See list of options above. + .. versionadded:: 3.7 - .. _$expr: https://docs.mongodb.com/manual/reference/operator/query/expr/ - .. _$geoWithin: https://docs.mongodb.com/manual/reference/operator/query/geoWithin/ - .. _$center: https://docs.mongodb.com/manual/reference/operator/query/center/#op._S_center - .. _$centerSphere: https://docs.mongodb.com/manual/reference/operator/query/centerSphere/#op._S_centerSphere + .. _$expr: https://mongodb.com/docs/manual/reference/operator/query/expr/ + .. _$geoWithin: https://mongodb.com/docs/manual/reference/operator/query/geoWithin/ + .. _$center: https://mongodb.com/docs/manual/reference/operator/query/center/ + .. _$centerSphere: https://mongodb.com/docs/manual/reference/operator/query/centerSphere/ """ - pipeline = [{'$match': filter}] - if 'skip' in kwargs: - pipeline.append({'$skip': kwargs.pop('skip')}) - if 'limit' in kwargs: - pipeline.append({'$limit': kwargs.pop('limit')}) - pipeline.append({'$group': {'_id': 1, 'n': {'$sum': 1}}}) - cmd = SON([('aggregate', self.__name), - ('pipeline', pipeline), - ('cursor', {})]) - if "hint" in kwargs and not isinstance(kwargs["hint"], string_type): + pipeline = [{"$match": filter}] + if "skip" in kwargs: + pipeline.append({"$skip": kwargs.pop("skip")}) + if "limit" in kwargs: + pipeline.append({"$limit": kwargs.pop("limit")}) + if comment is not None: + kwargs["comment"] = comment + pipeline.append({"$group": {"_id": 1, "n": {"$sum": 1}}}) + cmd = SON([("aggregate", self.__name), ("pipeline", pipeline), ("cursor", {})]) + if "hint" in kwargs and not isinstance(kwargs["hint"], str): kwargs["hint"] = helpers._index_document(kwargs["hint"]) - collation = validate_collation_or_none(kwargs.pop('collation', None)) + collation = validate_collation_or_none(kwargs.pop("collation", None)) cmd.update(kwargs) - def _cmd(session, server, sock_info, slave_ok): - result = self._aggregate_one_result( - sock_info, slave_ok, cmd, collation, session) + def _cmd( + session: Optional[ClientSession], + _server: Server, + conn: Connection, + read_preference: Optional[_ServerMode], + ) -> int: + result = self._aggregate_one_result(conn, read_preference, cmd, collation, session) if not result: return 0 - return result['n'] + return result["n"] + + return self._retryable_non_cursor_read(_cmd, session) + + def _retryable_non_cursor_read( + self, + func: Callable[[Optional[ClientSession], Server, Connection, Optional[_ServerMode]], T], + session: Optional[ClientSession], + ) -> T: + """Non-cursor read helper to handle implicit session creation.""" + client = self.__database.client + with client._tmp_session(session) as s: + return client._retryable_read(func, self._read_preference_for(s), s) + + def create_indexes( + self, + indexes: Sequence[IndexModel], + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> list[str]: + """Create one or more indexes on this collection. - return self.__database.client._retryable_read( - _cmd, self._read_preference_for(session), session) + >>> from pymongo import IndexModel, ASCENDING, DESCENDING + >>> index1 = IndexModel([("hello", DESCENDING), + ... ("world", ASCENDING)], name="hello_world") + >>> index2 = IndexModel([("goodbye", DESCENDING)]) + >>> db.test.create_indexes([index1, index2]) + ["hello_world", "goodbye_-1"] - def count(self, filter=None, session=None, **kwargs): - """**DEPRECATED** - Get the number of documents in this collection. + :Parameters: + - `indexes`: A list of :class:`~pymongo.operations.IndexModel` + instances. + - `session` (optional): a + :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. + - `**kwargs` (optional): optional arguments to the createIndexes + command (like maxTimeMS) can be passed as keyword arguments. - The :meth:`count` method is deprecated and **not** supported in a - transaction. Please use :meth:`count_documents` or - :meth:`estimated_document_count` instead. - All optional count parameters should be passed as keyword arguments - to this method. Valid options include: - - `skip` (int): The number of matching documents to skip before - returning results. - - `limit` (int): The maximum number of documents to count. A limit - of 0 (the default) is equivalent to setting no limit. - - `maxTimeMS` (int): The maximum amount of time to allow the count - command to run, in milliseconds. - - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only supported - on MongoDB 3.4 and above. - - `hint` (string or list of tuples): The index to use. Specify either - the index name as a string or the index specification as a list of - tuples (e.g. [('a', pymongo.ASCENDING), ('b', pymongo.ASCENDING)]). - - The :meth:`count` method obeys the :attr:`read_preference` of - this :class:`Collection`. - - .. note:: When migrating from :meth:`count` to :meth:`count_documents` - the following query operators must be replaced: - - +-------------+-------------------------------------+ - | Operator | Replacement | - +=============+=====================================+ - | $where | `$expr`_ | - +-------------+-------------------------------------+ - | $near | `$geoWithin`_ with `$center`_ | - +-------------+-------------------------------------+ - | $nearSphere | `$geoWithin`_ with `$centerSphere`_ | - +-------------+-------------------------------------+ - - $expr requires MongoDB 3.6+ - :Parameters: - - `filter` (optional): A query document that selects which documents - to count in the collection. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `**kwargs` (optional): See list of options above. - - .. versionchanged:: 3.7 - Deprecated. + .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of + this collection is automatically applied to this operation. .. versionchanged:: 3.6 - Added ``session`` parameter. + Added ``session`` parameter. Added support for arbitrary keyword + arguments. .. versionchanged:: 3.4 - Support the `collation` option. + Apply this collection's write concern automatically to this operation + when connected to MongoDB >= 3.4. + .. versionadded:: 3.0 - .. _$expr: https://docs.mongodb.com/manual/reference/operator/query/expr/ - .. _$geoWithin: https://docs.mongodb.com/manual/reference/operator/query/geoWithin/ - .. _$center: https://docs.mongodb.com/manual/reference/operator/query/center/#op._S_center - .. _$centerSphere: https://docs.mongodb.com/manual/reference/operator/query/centerSphere/#op._S_centerSphere + .. _createIndexes: https://mongodb.com/docs/manual/reference/command/createIndexes/ """ - warnings.warn("count is deprecated. Use estimated_document_count or " - "count_documents instead. Please note that $where must " - "be replaced by $expr, $near must be replaced by " - "$geoWithin with $center, and $nearSphere must be " - "replaced by $geoWithin with $centerSphere", - DeprecationWarning, stacklevel=2) - cmd = SON([("count", self.__name)]) - if filter is not None: - if "query" in kwargs: - raise ConfigurationError("can't pass both filter and query") - kwargs["query"] = filter - if "hint" in kwargs and not isinstance(kwargs["hint"], string_type): - kwargs["hint"] = helpers._index_document(kwargs["hint"]) - collation = validate_collation_or_none(kwargs.pop('collation', None)) - cmd.update(kwargs) - return self._count(cmd, collation, session) + common.validate_list("indexes", indexes) + if comment is not None: + kwargs["comment"] = comment + return self.__create_indexes(indexes, session, **kwargs) - def create_indexes(self, indexes, session=None, **kwargs): - """Create one or more indexes on this collection. - - >>> from pymongo import IndexModel, ASCENDING, DESCENDING - >>> index1 = IndexModel([("hello", DESCENDING), - ... ("world", ASCENDING)], name="hello_world") - >>> index2 = IndexModel([("goodbye", DESCENDING)]) - >>> db.test.create_indexes([index1, index2]) - ["hello_world", "goodbye_-1"] + @_csot.apply + def __create_indexes( + self, indexes: Sequence[IndexModel], session: Optional[ClientSession], **kwargs: Any + ) -> list[str]: + """Internal createIndexes helper. :Parameters: - `indexes`: A list of :class:`~pymongo.operations.IndexModel` @@ -1816,93 +1983,55 @@ def create_indexes(self, indexes, session=None, **kwargs): :class:`~pymongo.client_session.ClientSession`. - `**kwargs` (optional): optional arguments to the createIndexes command (like maxTimeMS) can be passed as keyword arguments. - - .. note:: `create_indexes` uses the `createIndexes`_ command - introduced in MongoDB **2.6** and cannot be used with earlier - versions. - - .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of - this collection is automatically applied to this operation when using - MongoDB >= 3.4. - - .. versionchanged:: 3.6 - Added ``session`` parameter. Added support for arbitrary keyword - arguments. - - .. versionchanged:: 3.4 - Apply this collection's write concern automatically to this operation - when connected to MongoDB >= 3.4. - .. versionadded:: 3.0 - - .. _createIndexes: https://docs.mongodb.com/manual/reference/command/createIndexes/ """ - common.validate_list('indexes', indexes) names = [] - with self._socket_for_writes(session) as sock_info: - supports_collations = sock_info.max_wire_version >= 5 - def gen_indexes(): + with self._conn_for_writes(session) as conn: + supports_quorum = conn.max_wire_version >= 9 + + def gen_indexes() -> Iterator[Mapping[str, Any]]: for index in indexes: if not isinstance(index, IndexModel): raise TypeError( - "%r is not an instance of " - "pymongo.operations.IndexModel" % (index,)) + f"{index!r} is not an instance of pymongo.operations.IndexModel" + ) document = index.document - if "collation" in document and not supports_collations: - raise ConfigurationError( - "Must be connected to MongoDB " - "3.4+ to use collations.") names.append(document["name"]) yield document - cmd = SON([('createIndexes', self.name), - ('indexes', list(gen_indexes()))]) - cmd.update(kwargs) - self._command( - sock_info, cmd, read_preference=ReadPreference.PRIMARY, - codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, - write_concern=self._write_concern_for(session), - session=session) - return names - def __create_index(self, keys, index_options, session, **kwargs): - """Internal create index helper. - - :Parameters: - - `keys`: a list of tuples [(key, type), (key, type), ...] - - `index_options`: a dict of index options. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - """ - index_doc = helpers._index_document(keys) - index = {"key": index_doc} - collation = validate_collation_or_none( - index_options.pop('collation', None)) - index.update(index_options) - - with self._socket_for_writes(session) as sock_info: - if collation is not None: - if sock_info.max_wire_version < 5: - raise ConfigurationError( - 'Must be connected to MongoDB 3.4+ to use collations.') - else: - index['collation'] = collation - cmd = SON([('createIndexes', self.name), ('indexes', [index])]) + cmd = SON([("createIndexes", self.name), ("indexes", list(gen_indexes()))]) cmd.update(kwargs) + if "commitQuorum" in kwargs and not supports_quorum: + raise ConfigurationError( + "Must be connected to MongoDB 4.4+ to use the " + "commitQuorum option for createIndexes" + ) + self._command( - sock_info, cmd, read_preference=ReadPreference.PRIMARY, + conn, + cmd, + read_preference=ReadPreference.PRIMARY, codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, write_concern=self._write_concern_for(session), - session=session) + session=session, + ) + return names - def create_index(self, keys, session=None, **kwargs): + def create_index( + self, + keys: _IndexKeyHint, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> str: """Creates an index on this collection. - Takes either a single key or a list of (key, direction) pairs. - The key(s) must be an instance of :class:`basestring` - (:class:`str` in python 3), and the direction(s) must be one of - (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`, - :data:`~pymongo.GEO2D`, :data:`~pymongo.GEOHAYSTACK`, - :data:`~pymongo.GEOSPHERE`, :data:`~pymongo.HASHED`, - :data:`~pymongo.TEXT`). + Takes either a single key or a list containing (key, direction) pairs + or keys. If no direction is given, :data:`~pymongo.ASCENDING` will + be assumed. + The key(s) must be an instance of :class:`str`and the direction(s) must + be one of (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`, + :data:`~pymongo.GEO2D`, :data:`~pymongo.GEOSPHERE`, + :data:`~pymongo.HASHED`, :data:`~pymongo.TEXT`). To create a single key ascending index on the key ``'mike'`` we just use a string argument:: @@ -1913,7 +2042,7 @@ def create_index(self, keys, session=None, **kwargs): ascending we need to use a list of tuples:: >>> my_collection.create_index([("mike", pymongo.DESCENDING), - ... ("eliot", pymongo.ASCENDING)]) + ... "eliot"]) All optional index creation parameters should be passed as keyword arguments to this method. For example:: @@ -1925,8 +2054,9 @@ def create_index(self, keys, session=None, **kwargs): - `name`: custom name to use for this index - if none is given, a name will be generated. - - `unique`: if ``True`` creates a uniqueness constraint on the index. - - `background`: if ``True`` this index should be created in the + - `unique`: if ``True``, creates a uniqueness constraint on the + index. + - `background`: if ``True``, this index should be created in the background. - `sparse`: if ``True``, omit from the index any documents that lack the indexed field. @@ -1942,13 +2072,15 @@ def create_index(self, keys, session=None, **kwargs): this collection after seconds. The indexed field must be a UTC datetime or the data will not expire. - `partialFilterExpression`: A document that specifies a filter for - a partial index. Requires server version >=3.2. + a partial index. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only supported - on MongoDB 3.4 and above. + :class:`~pymongo.collation.Collation`. - `wildcardProjection`: Allows users to include or exclude specific - field paths from a `wildcard index`_ using the { "$**" : 1} key - pattern. Requires server version >= 4.2. + field paths from a `wildcard index`_ using the {"$**" : 1} key + pattern. Requires MongoDB >= 4.2. + - `hidden`: if ``True``, this index will be hidden from the query + planner and will not be evaluated as part of query plan + selection. Requires MongoDB >= 4.4. See the MongoDB documentation for a full list of supported options by server version. @@ -1958,18 +2090,26 @@ def create_index(self, keys, session=None, **kwargs): using the option will fail if a duplicate value is detected. .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of - this collection is automatically applied to this operation when using - MongoDB >= 3.4. + this collection is automatically applied to this operation. :Parameters: - `keys`: a single key or a list of (key, direction) pairs specifying the index to create - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): any additional index creation options (see the above list) should be passed as keyword - arguments - + arguments. + + .. versionchanged:: 4.4 + Allow passing a list containing (key, direction) pairs + or keys for the ``keys`` parameter. + .. versionchanged:: 4.1 + Added ``comment`` parameter. + .. versionchanged:: 3.11 + Added the ``hidden`` option. .. versionchanged:: 3.6 Added ``session`` parameter. Added support for passing maxTimeMS in kwargs. @@ -1977,75 +2117,45 @@ def create_index(self, keys, session=None, **kwargs): Apply this collection's write concern automatically to this operation when connected to MongoDB >= 3.4. Support the `collation` option. .. versionchanged:: 3.2 - Added partialFilterExpression to support partial indexes. + Added partialFilterExpression to support partial indexes. .. versionchanged:: 3.0 - Renamed `key_or_list` to `keys`. Removed the `cache_for` option. - :meth:`create_index` no longer caches index names. Removed support - for the drop_dups and bucket_size aliases. + Renamed `key_or_list` to `keys`. Removed the `cache_for` option. + :meth:`create_index` no longer caches index names. Removed support + for the drop_dups and bucket_size aliases. - .. mongodoc:: indexes + .. seealso:: The MongoDB documentation on `indexes `_. - .. _wildcard index: https://docs.mongodb.com/master/core/index-wildcard/#wildcard-index-core + .. _wildcard index: https://dochub.mongodb.org/core/index-wildcard/ """ - keys = helpers._index_list(keys) - name = kwargs.setdefault("name", helpers._gen_index_name(keys)) cmd_options = {} if "maxTimeMS" in kwargs: cmd_options["maxTimeMS"] = kwargs.pop("maxTimeMS") - self.__create_index(keys, kwargs, session, **cmd_options) - return name - - def ensure_index(self, key_or_list, cache_for=300, **kwargs): - """**DEPRECATED** - Ensures that an index exists on this collection. - - .. versionchanged:: 3.0 - **DEPRECATED** - """ - warnings.warn("ensure_index is deprecated. Use create_index instead.", - DeprecationWarning, stacklevel=2) - # The types supported by datetime.timedelta. - if not (isinstance(cache_for, integer_types) or - isinstance(cache_for, float)): - raise TypeError("cache_for must be an integer or float.") - - if "drop_dups" in kwargs: - kwargs["dropDups"] = kwargs.pop("drop_dups") - - if "bucket_size" in kwargs: - kwargs["bucketSize"] = kwargs.pop("bucket_size") - - keys = helpers._index_list(key_or_list) - name = kwargs.setdefault("name", helpers._gen_index_name(keys)) - - # Note that there is a race condition here. One thread could - # check if the index is cached and be preempted before creating - # and caching the index. This means multiple threads attempting - # to create the same index concurrently could send the index - # to the server two or more times. This has no practical impact - # other than wasted round trips. - if not self.__database.client._cached(self.__database.name, - self.__name, name): - self.__create_index(keys, kwargs, session=None) - self.__database.client._cache_index(self.__database.name, - self.__name, name, cache_for) - return name - return None - - def drop_indexes(self, session=None, **kwargs): + if comment is not None: + cmd_options["comment"] = comment + index = IndexModel(keys, **kwargs) + return self.__create_indexes([index], session, **cmd_options)[0] + + def drop_indexes( + self, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> None: """Drops all indexes on this collection. - Can be used on non-existant collections or collections with no indexes. + Can be used on non-existent collections or collections with no indexes. Raises OperationFailure on an error. :Parameters: - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): optional arguments to the createIndexes command (like maxTimeMS) can be passed as keyword arguments. .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of - this collection is automatically applied to this operation when using - MongoDB >= 3.4. + this collection is automatically applied to this operation. .. versionchanged:: 3.6 Added ``session`` parameter. Added support for arbitrary keyword @@ -2054,15 +2164,22 @@ def drop_indexes(self, session=None, **kwargs): .. versionchanged:: 3.4 Apply this collection's write concern automatically to this operation when connected to MongoDB >= 3.4. - """ - self.__database.client._purge_index(self.__database.name, self.__name) + if comment is not None: + kwargs["comment"] = comment self.drop_index("*", session=session, **kwargs) - def drop_index(self, index_or_name, session=None, **kwargs): + @_csot.apply + def drop_index( + self, + index_or_name: _IndexKeyHint, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> None: """Drops the specified index on this collection. - Can be used on non-existant collections or collections with no + Can be used on non-existent collections or collections with no indexes. Raises OperationFailure on an error (e.g. trying to drop an index that does not exist). `index_or_name` can be either an index name (as returned by `create_index`), @@ -2073,19 +2190,23 @@ def drop_index(self, index_or_name, session=None, **kwargs): .. warning:: if a custom name was used on index creation (by - passing the `name` parameter to :meth:`create_index` or - :meth:`ensure_index`) the index **must** be dropped by name. + passing the `name` parameter to :meth:`create_index`) the index + **must** be dropped by name. :Parameters: - `index_or_name`: index (or name of index) to drop - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): optional arguments to the createIndexes command (like maxTimeMS) can be passed as keyword arguments. + + .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of - this collection is automatically applied to this operation when using - MongoDB >= 3.4. + this collection is automatically applied to this operation. + .. versionchanged:: 3.6 Added ``session`` parameter. Added support for arbitrary keyword @@ -2100,114 +2221,99 @@ def drop_index(self, index_or_name, session=None, **kwargs): if isinstance(index_or_name, list): name = helpers._gen_index_name(index_or_name) - if not isinstance(name, string_type): - raise TypeError("index_or_name must be an index name or list") + if not isinstance(name, str): + raise TypeError("index_or_name must be an instance of str or list") - self.__database.client._purge_index( - self.__database.name, self.__name, name) cmd = SON([("dropIndexes", self.__name), ("index", name)]) cmd.update(kwargs) - with self._socket_for_writes(session) as sock_info: - self._command(sock_info, - cmd, - read_preference=ReadPreference.PRIMARY, - allowable_errors=["ns not found"], - write_concern=self._write_concern_for(session), - session=session) - - def reindex(self, session=None, **kwargs): - """Rebuilds all indexes on this collection. - - :Parameters: - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `**kwargs` (optional): optional arguments to the reIndex - command (like maxTimeMS) can be passed as keyword arguments. - - .. warning:: reindex blocks all other operations (indexes - are built in the foreground) and will be slow for large - collections. - - .. versionchanged:: 3.6 - Added ``session`` parameter. Added support for arbitrary keyword - arguments. - - .. versionchanged:: 3.4 - Apply this collection's write concern automatically to this operation - when connected to MongoDB >= 3.4. - - .. versionchanged:: 3.5 - We no longer apply this collection's write concern to this operation. - MongoDB 3.4 silently ignored the write concern. MongoDB 3.6+ returns - an error if we include the write concern. - """ - cmd = SON([("reIndex", self.__name)]) - cmd.update(kwargs) - with self._socket_for_writes(session) as sock_info: - return self._command( - sock_info, cmd, read_preference=ReadPreference.PRIMARY, - session=session) + if comment is not None: + cmd["comment"] = comment + with self._conn_for_writes(session) as conn: + self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + allowable_errors=["ns not found", 26], + write_concern=self._write_concern_for(session), + session=session, + ) - def list_indexes(self, session=None): + def list_indexes( + self, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + ) -> CommandCursor[MutableMapping[str, Any]]: """Get a cursor over the index documents for this collection. >>> for index in db.test.list_indexes(): ... print(index) ... - SON([(u'v', 1), (u'key', SON([(u'_id', 1)])), - (u'name', u'_id_'), (u'ns', u'test.test')]) + SON([('v', 2), ('key', SON([('_id', 1)])), ('name', '_id_')]) :Parameters: - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. :Returns: An instance of :class:`~pymongo.command_cursor.CommandCursor`. + .. versionchanged:: 4.1 + Added ``comment`` parameter. + .. versionchanged:: 3.6 Added ``session`` parameter. .. versionadded:: 3.0 """ - codec_options = CodecOptions(SON) - coll = self.with_options(codec_options=codec_options, - read_preference=ReadPreference.PRIMARY) - read_pref = ((session and session._txn_read_preference()) - or ReadPreference.PRIMARY) - - def _cmd(session, server, sock_info, slave_ok): + codec_options: CodecOptions = CodecOptions(SON) + coll = cast( + Collection[MutableMapping[str, Any]], + self.with_options(codec_options=codec_options, read_preference=ReadPreference.PRIMARY), + ) + read_pref = (session and session._txn_read_preference()) or ReadPreference.PRIMARY + explicit_session = session is not None + + def _cmd( + session: Optional[ClientSession], + _server: Server, + conn: Connection, + read_preference: _ServerMode, + ) -> CommandCursor[MutableMapping[str, Any]]: cmd = SON([("listIndexes", self.__name), ("cursor", {})]) - if sock_info.max_wire_version > 2: - with self.__database.client._tmp_session(session, False) as s: - try: - cursor = self._command(sock_info, cmd, slave_ok, - read_pref, - codec_options, - session=s)["cursor"] - except OperationFailure as exc: - # Ignore NamespaceNotFound errors to match the behavior - # of reading from *.system.indexes. - if exc.code != 26: - raise - cursor = {'id': 0, 'firstBatch': []} - return CommandCursor(coll, cursor, sock_info.address, - session=s, - explicit_session=session is not None) - else: - res = message._first_batch( - sock_info, self.__database.name, "system.indexes", - {"ns": self.__full_name}, 0, slave_ok, codec_options, - read_pref, cmd, - self.database.client._event_listeners) - cursor = res["cursor"] - # Note that a collection can only have 64 indexes, so there - # will never be a getMore call. - return CommandCursor(coll, cursor, sock_info.address) - - return self.__database.client._retryable_read( - _cmd, read_pref, session) + if comment is not None: + cmd["comment"] = comment - def index_information(self, session=None): + try: + cursor = self._command(conn, cmd, read_preference, codec_options, session=session)[ + "cursor" + ] + except OperationFailure as exc: + # Ignore NamespaceNotFound errors to match the behavior + # of reading from *.system.indexes. + if exc.code != 26: + raise + cursor = {"id": 0, "firstBatch": []} + cmd_cursor = CommandCursor( + coll, + cursor, + conn.address, + session=session, + explicit_session=explicit_session, + comment=cmd.get("comment"), + ) + cmd_cursor._maybe_pin_connection(conn) + return cmd_cursor + + with self.__database.client._tmp_session(session, False) as s: + return self.__database.client._retryable_read(_cmd, read_pref, s) + + def index_information( + self, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + ) -> MutableMapping[str, Any]: """Get information on this collection's indexes. Returns a dictionary where the keys are index names (as @@ -2221,27 +2327,237 @@ def index_information(self, session=None): like this: >>> db.test.create_index("x", unique=True) - u'x_1' + 'x_1' >>> db.test.index_information() - {u'_id_': {u'key': [(u'_id', 1)]}, - u'x_1': {u'unique': True, u'key': [(u'x', 1)]}} + {'_id_': {'key': [('_id', 1)]}, + 'x_1': {'unique': True, 'key': [('x', 1)]}} :Parameters: - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. .. versionchanged:: 3.6 Added ``session`` parameter. """ - cursor = self.list_indexes(session=session) + cursor = self.list_indexes(session=session, comment=comment) info = {} for index in cursor: - index["key"] = index["key"].items() - index = dict(index) + index["key"] = list(index["key"].items()) + index = dict(index) # noqa: PLW2901 info[index.pop("name")] = index return info - def options(self, session=None): + def list_search_indexes( + self, + name: Optional[str] = None, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> CommandCursor[Mapping[str, Any]]: + """Return a cursor over search indexes for the current collection. + + :Parameters: + - `name` (optional): If given, the name of the index to search + for. Only indexes with matching index names will be returned. + If not given, all search indexes for the current collection + will be returned. + - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. + + :Returns: + A :class:`~pymongo.command_cursor.CommandCursor` over the result + set. + + .. note:: requires a MongoDB server version 7.0+ Atlas cluster. + + .. versionadded:: 4.5 + """ + if name is None: + pipeline: _Pipeline = [{"$listSearchIndexes": {}}] + else: + pipeline = [{"$listSearchIndexes": {"name": name}}] + + coll = self.with_options( + codec_options=DEFAULT_CODEC_OPTIONS, read_preference=ReadPreference.PRIMARY + ) + cmd = _CollectionAggregationCommand( + coll, + CommandCursor, + pipeline, + kwargs, + explicit_session=session is not None, + user_fields={"cursor": {"firstBatch": 1}}, + ) + + return self.__database.client._retryable_read( + cmd.get_cursor, + cmd.get_read_preference(session), # type: ignore[arg-type] + session, + retryable=not cmd._performs_write, + ) + + def create_search_index( + self, + model: Union[Mapping[str, Any], SearchIndexModel], + session: Optional[ClientSession] = None, + comment: Any = None, + **kwargs: Any, + ) -> str: + """Create a single search index for the current collection. + + :Parameters: + - `model`: The model for the new search index. + It can be given as a :class:`~pymongo.operations.SearchIndexModel` + instance or a dictionary with a model "definition" and optional + "name". + - `session` (optional): a + :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. + - `**kwargs` (optional): optional arguments to the createSearchIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + :Returns: + The name of the new search index. + + .. note:: requires a MongoDB server version 7.0+ Atlas cluster. + + .. versionadded:: 4.5 + """ + if not isinstance(model, SearchIndexModel): + model = SearchIndexModel(model["definition"], model.get("name")) + return self.create_search_indexes([model], session, comment, **kwargs)[0] + + def create_search_indexes( + self, + models: list[SearchIndexModel], + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> list[str]: + """Create multiple search indexes for the current collection. + + :Parameters: + - `models`: A list of :class:`~pymongo.operations.SearchIndexModel` instances. + - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. + - `**kwargs` (optional): optional arguments to the createSearchIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + :Returns: + A list of the newly created search index names. + + .. note:: requires a MongoDB server version 7.0+ Atlas cluster. + + .. versionadded:: 4.5 + """ + if comment is not None: + kwargs["comment"] = comment + + def gen_indexes() -> Iterator[Mapping[str, Any]]: + for index in models: + if not isinstance(index, SearchIndexModel): + raise TypeError( + f"{index!r} is not an instance of pymongo.operations.SearchIndexModel" + ) + yield index.document + + cmd = SON([("createSearchIndexes", self.name), ("indexes", list(gen_indexes()))]) + cmd.update(kwargs) + + with self._conn_for_writes(session) as conn: + resp = self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, + ) + return [index["name"] for index in resp["indexesCreated"]] + + def drop_search_index( + self, + name: str, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> None: + """Delete a search index by index name. + + :Parameters: + - `name`: The name of the search index to be deleted. + - `session` (optional): a + :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. + - `**kwargs` (optional): optional arguments to the dropSearchIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + .. note:: requires a MongoDB server version 7.0+ Atlas cluster. + + .. versionadded:: 4.5 + """ + cmd = SON([("dropSearchIndex", self.__name), ("name", name)]) + cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment + with self._conn_for_writes(session) as conn: + self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + allowable_errors=["ns not found", 26], + codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, + ) + + def update_search_index( + self, + name: str, + definition: Mapping[str, Any], + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> None: + """Update a search index by replacing the existing index definition with the provided definition. + + :Parameters: + - `name`: The name of the search index to be updated. + - `definition`: The new search index definition. + - `session` (optional): a + :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. + - `**kwargs` (optional): optional arguments to the updateSearchIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + .. note:: requires a MongoDB server version 7.0+ Atlas cluster. + + .. versionadded:: 4.5 + """ + cmd = SON([("updateSearchIndex", self.__name), ("name", name), ("definition", definition)]) + cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment + with self._conn_for_writes(session) as conn: + self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + allowable_errors=["ns not found", 26], + codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, + ) + + def options( + self, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + ) -> MutableMapping[str, Any]: """Get the options set on this collection. Returns a dictionary of options and their values - see @@ -2252,6 +2568,8 @@ def options(self, session=None): :Parameters: - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. .. versionchanged:: 3.6 Added ``session`` parameter. @@ -2261,9 +2579,11 @@ def options(self, session=None): self.codec_options, self.read_preference, self.write_concern, - self.read_concern) + self.read_concern, + ) cursor = dbo.list_collections( - session=session, filter={"name": self.__name}) + session=session, filter={"name": self.__name}, comment=comment + ) result = None for doc in cursor: @@ -2274,34 +2594,73 @@ def options(self, session=None): return {} options = result.get("options", {}) + assert options is not None if "create" in options: del options["create"] return options - def _aggregate(self, aggregation_command, pipeline, cursor_class, session, - explicit_session, **kwargs): - # Remove things that are not command options. - use_cursor = True - if "useCursor" in kwargs: - warnings.warn( - "The useCursor option is deprecated " - "and will be removed in PyMongo 4.0", - DeprecationWarning, stacklevel=2) - use_cursor = common.validate_boolean( - "useCursor", kwargs.pop("useCursor", True)) - + @_csot.apply + def _aggregate( + self, + aggregation_command: Type[_AggregationCommand], + pipeline: _Pipeline, + cursor_class: Type[CommandCursor], + session: Optional[ClientSession], + explicit_session: bool, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> CommandCursor[_DocumentType]: + if comment is not None: + kwargs["comment"] = comment cmd = aggregation_command( - self, cursor_class, pipeline, kwargs, explicit_session, - user_fields={'cursor': {'firstBatch': 1}}, use_cursor=use_cursor) - return self.__database.client._retryable_read( - cmd.get_cursor, cmd.get_read_preference(session), session, - retryable=not cmd._performs_write) + self, + cursor_class, + pipeline, + kwargs, + explicit_session, + let, + user_fields={"cursor": {"firstBatch": 1}}, + ) - def aggregate(self, pipeline, session=None, **kwargs): + return self.__database.client._retryable_read( + cmd.get_cursor, + cmd.get_read_preference(session), # type: ignore[arg-type] + session, + retryable=not cmd._performs_write, + ) + + def aggregate( + self, + pipeline: _Pipeline, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> CommandCursor[_DocumentType]: """Perform an aggregation using the aggregation framework on this collection. + The :meth:`aggregate` method obeys the :attr:`read_preference` of this + :class:`Collection`, except when ``$out`` or ``$merge`` are used on + MongoDB <5.0, in which case + :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY` is used. + + .. note:: This method does not support the 'explain' option. Please + use `PyMongoExplain `_ + instead. An example is included in the :ref:`aggregate-examples` + documentation. + + .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of + this collection is automatically applied to this operation. + + :Parameters: + - `pipeline`: a list of aggregation pipeline stages + - `session` (optional): a + :class:`~pymongo.client_session.ClientSession`. + - `**kwargs` (optional): extra `aggregate command`_ parameters. + All optional `aggregate command`_ parameters should be passed as keyword arguments to this method. Valid options include, but are not limited to: @@ -2313,36 +2672,29 @@ def aggregate(self, pipeline, session=None, **kwargs): to run in milliseconds. - `batchSize` (int): The maximum number of documents to return per batch. Ignored if the connected mongod or mongos does not support - returning aggregate results using a cursor, or `useCursor` is - ``False``. + returning aggregate results using a cursor. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only supported - on MongoDB 3.4 and above. - - `useCursor` (bool): Deprecated. Will be removed in PyMongo 4.0. - - The :meth:`aggregate` method obeys the :attr:`read_preference` of this - :class:`Collection`, except when ``$out`` or ``$merge`` are used, in - which case :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY` - is used. - - .. note:: This method does not support the 'explain' option. Please - use :meth:`~pymongo.database.Database.command` instead. An - example is included in the :ref:`aggregate-examples` documentation. - - .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of - this collection is automatically applied to this operation when using - MongoDB >= 3.4. + :class:`~pymongo.collation.Collation`. + - `let` (dict): A dict of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. ``"$$var"``). This option is + only supported on MongoDB >= 5.0. + - `comment` (optional): A user-provided comment to attach to this + command. - :Parameters: - - `pipeline`: a list of aggregation pipeline stages - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `**kwargs` (optional): See list of options above. :Returns: A :class:`~pymongo.command_cursor.CommandCursor` over the result set. + .. versionchanged:: 4.1 + Added ``comment`` parameter. + Added ``let`` parameter. + Support $merge and $out executing on secondaries according to the + collection's :attr:`read_preference`. + .. versionchanged:: 4.0 + Removed the ``useCursor`` option. .. versionchanged:: 3.9 Apply this collection's read concern to pipelines containing the `$out` stage when connected to MongoDB >= 4.2. @@ -2358,28 +2710,31 @@ def aggregate(self, pipeline, session=None, **kwargs): .. versionchanged:: 3.0 The :meth:`aggregate` method always returns a CommandCursor. The pipeline argument must be a list. - .. versionchanged:: 2.7 - When the cursor option is used, return - :class:`~pymongo.command_cursor.CommandCursor` instead of - :class:`~pymongo.cursor.Cursor`. - .. versionchanged:: 2.6 - Added cursor support. - .. versionadded:: 2.3 .. seealso:: :doc:`/examples/aggregation` .. _aggregate command: - https://docs.mongodb.com/manual/reference/command/aggregate + https://mongodb.com/docs/manual/reference/command/aggregate """ with self.__database.client._tmp_session(session, close=False) as s: - return self._aggregate(_CollectionAggregationCommand, - pipeline, - CommandCursor, - session=s, - explicit_session=session is not None, - **kwargs) - - def aggregate_raw_batches(self, pipeline, **kwargs): + return self._aggregate( + _CollectionAggregationCommand, + pipeline, + CommandCursor, + session=s, + explicit_session=session is not None, + let=let, + comment=comment, + **kwargs, + ) + + def aggregate_raw_batches( + self, + pipeline: _Pipeline, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> RawBatchCursor[_DocumentType]: """Perform an aggregation and retrieve batches of raw BSON. Similar to the :meth:`aggregate` method but returns a @@ -2396,32 +2751,46 @@ def aggregate_raw_batches(self, pipeline, **kwargs): >>> for batch in cursor: ... print(bson.decode_all(batch)) - .. note:: aggregate_raw_batches does not support sessions or auto - encryption. + .. note:: aggregate_raw_batches does not support auto encryption. + + .. versionchanged:: 3.12 + Added session support. .. versionadded:: 3.6 """ - # OP_MSG with document stream returns is required to support - # sessions. - if "session" in kwargs: - raise ConfigurationError( - "aggregate_raw_batches does not support sessions") - # OP_MSG is required to support encryption. if self.__database.client._encrypter: - raise InvalidOperation( - "aggregate_raw_batches does not support auto encryption") - - return self._aggregate(_CollectionRawAggregationCommand, - pipeline, - RawBatchCommandCursor, - session=None, - explicit_session=False, - **kwargs) - - def watch(self, pipeline=None, full_document=None, resume_after=None, - max_await_time_ms=None, batch_size=None, collation=None, - start_at_operation_time=None, session=None, start_after=None): + raise InvalidOperation("aggregate_raw_batches does not support auto encryption") + if comment is not None: + kwargs["comment"] = comment + with self.__database.client._tmp_session(session, close=False) as s: + return cast( + RawBatchCursor[_DocumentType], + self._aggregate( + _CollectionRawAggregationCommand, + pipeline, + RawBatchCommandCursor, + session=s, + explicit_session=session is not None, + **kwargs, + ), + ) + + def watch( + self, + pipeline: Optional[_Pipeline] = None, + full_document: Optional[str] = None, + resume_after: Optional[Mapping[str, Any]] = None, + max_await_time_ms: Optional[int] = None, + batch_size: Optional[int] = None, + collation: Optional[_CollationIn] = None, + start_at_operation_time: Optional[Timestamp] = None, + session: Optional[ClientSession] = None, + start_after: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + full_document_before_change: Optional[str] = None, + show_expanded_events: Optional[bool] = None, + ) -> CollectionChangeStream[_DocumentType]: """Watch changes on this collection. Performs an aggregation with an implicit initial ``$changeStream`` @@ -2429,8 +2798,6 @@ def watch(self, pipeline=None, full_document=None, resume_after=None, :class:`~pymongo.change_stream.CollectionChangeStream` cursor which iterates over changes on this collection. - Introduced in MongoDB 3.6. - .. code-block:: python with db.collection.watch() as stream: @@ -2449,14 +2816,13 @@ def watch(self, pipeline=None, full_document=None, resume_after=None, .. code-block:: python try: - with db.collection.watch( - [{'$match': {'operationType': 'insert'}}]) as stream: + with db.collection.watch([{"$match": {"operationType": "insert"}}]) as stream: for insert_change in stream: print(insert_change) except pymongo.errors.PyMongoError: # The ChangeStream encountered an unrecoverable error or the # resume attempt failed to recreate the cursor. - logging.error('...') + logging.error("...") For a precise description of the resume process see the `change streams specification`_. @@ -2476,11 +2842,15 @@ def watch(self, pipeline=None, full_document=None, resume_after=None, pipeline stages are valid after a ``$changeStream`` stage, see the MongoDB documentation on change streams for the supported stages. - `full_document` (optional): The fullDocument to pass as an option - to the ``$changeStream`` stage. Allowed values: 'updateLookup'. - When set to 'updateLookup', the change notification for partial - updates will include both a delta describing the changes to the - document, as well as a copy of the entire document that was - changed from some time after the change occurred. + to the ``$changeStream`` stage. Allowed values: 'updateLookup', + 'whenAvailable', 'required'. When set to 'updateLookup', the + change notification for partial updates will include both a delta + describing the changes to the document, as well as a copy of the + entire document that was changed from some time after the change + occurred. + - `full_document_before_change`: Allowed values: 'whenAvailable' + and 'required'. Change events may now result in a + 'fullDocumentBeforeChange' response field. - `resume_after` (optional): A resume token. If provided, the change stream will start returning changes that occur directly after the operation specified in the resume token. A resume token @@ -2501,10 +2871,22 @@ def watch(self, pipeline=None, full_document=None, resume_after=None, - `start_after` (optional): The same as `resume_after` except that `start_after` can resume notifications after an invalidate event. This option and `resume_after` are mutually exclusive. + - `comment` (optional): A user-provided comment to attach to this + command. + - `show_expanded_events` (optional): Include expanded events such as DDL events like `dropIndexes`. :Returns: A :class:`~pymongo.change_stream.CollectionChangeStream` cursor. + .. versionchanged:: 4.3 + Added `show_expanded_events` parameter. + + .. versionchanged:: 4.2 + Added ``full_document_before_change`` parameter. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + .. versionchanged:: 3.9 Added the ``start_after`` parameter. @@ -2513,76 +2895,55 @@ def watch(self, pipeline=None, full_document=None, resume_after=None, .. versionadded:: 3.6 - .. mongodoc:: changeStreams + .. seealso:: The MongoDB documentation on `changeStreams `_. .. _change streams specification: - https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.rst + https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.md """ return CollectionChangeStream( - self, pipeline, full_document, resume_after, max_await_time_ms, - batch_size, collation, start_at_operation_time, session, - start_after) - - def group(self, key, condition, initial, reduce, finalize=None, **kwargs): - """Perform a query similar to an SQL *group by* operation. - - **DEPRECATED** - The group command was deprecated in MongoDB 3.4. The - :meth:`~group` method is deprecated and will be removed in PyMongo 4.0. - Use :meth:`~aggregate` with the `$group` stage or :meth:`~map_reduce` - instead. - - .. versionchanged:: 3.5 - Deprecated the group method. - .. versionchanged:: 3.4 - Added the `collation` option. - .. versionchanged:: 2.2 - Removed deprecated argument: command - """ - warnings.warn("The group method is deprecated and will be removed in " - "PyMongo 4.0. Use the aggregate method with the $group " - "stage or the map_reduce method instead.", - DeprecationWarning, stacklevel=2) - group = {} - if isinstance(key, string_type): - group["$keyf"] = Code(key) - elif key is not None: - group = {"key": helpers._fields_list_to_dict(key, "key")} - group["ns"] = self.__name - group["$reduce"] = Code(reduce) - group["cond"] = condition - group["initial"] = initial - if finalize is not None: - group["finalize"] = Code(finalize) - - cmd = SON([("group", group)]) - collation = validate_collation_or_none(kwargs.pop('collation', None)) - cmd.update(kwargs) - - with self._socket_for_reads(session=None) as (sock_info, slave_ok): - return self._command(sock_info, cmd, slave_ok, - collation=collation, - user_fields={'retval': 1})["retval"] - - def rename(self, new_name, session=None, **kwargs): + self, + pipeline, + full_document, + resume_after, + max_await_time_ms, + batch_size, + collation, + start_at_operation_time, + session, + start_after, + comment, + full_document_before_change, + show_expanded_events, + ) + + @_csot.apply + def rename( + self, + new_name: str, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> MutableMapping[str, Any]: """Rename this collection. If operating in auth mode, client must be authorized as an admin to perform this operation. Raises :class:`TypeError` if - `new_name` is not an instance of :class:`basestring` - (:class:`str` in python 3). Raises :class:`~pymongo.errors.InvalidName` + `new_name` is not an instance of :class:`str`. + Raises :class:`~pymongo.errors.InvalidName` if `new_name` is not a valid collection name. :Parameters: - `new_name`: new name for this collection - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): additional arguments to the rename command may be passed as keyword arguments to this helper method (i.e. ``dropTarget=True``) .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of - this collection is automatically applied to this operation when using - MongoDB >= 3.4. + this collection is automatically applied to this operation. .. versionchanged:: 3.6 Added ``session`` parameter. @@ -2592,36 +2953,47 @@ def rename(self, new_name, session=None, **kwargs): when connected to MongoDB >= 3.4. """ - if not isinstance(new_name, string_type): - raise TypeError("new_name must be an " - "instance of %s" % (string_type.__name__,)) + if not isinstance(new_name, str): + raise TypeError("new_name must be an instance of str") if not new_name or ".." in new_name: raise InvalidName("collection names cannot be empty") if new_name[0] == "." or new_name[-1] == ".": - raise InvalidName("collecion names must not start or end with '.'") + raise InvalidName("collection names must not start or end with '.'") if "$" in new_name and not new_name.startswith("oplog.$main"): raise InvalidName("collection names must not contain '$'") - new_name = "%s.%s" % (self.__database.name, new_name) + new_name = f"{self.__database.name}.{new_name}" cmd = SON([("renameCollection", self.__full_name), ("to", new_name)]) cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment write_concern = self._write_concern_for_cmd(cmd, session) - with self._socket_for_writes(session) as sock_info: + with self._conn_for_writes(session) as conn: with self.__database.client._tmp_session(session) as s: - return sock_info.command( - 'admin', cmd, + return conn.command( + "admin", + cmd, write_concern=write_concern, parse_write_concern_error=True, - session=s, client=self.__database.client) - - def distinct(self, key, filter=None, session=None, **kwargs): + session=s, + client=self.__database.client, + ) + + def distinct( + self, + key: str, + filter: Optional[Mapping[str, Any]] = None, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> list: """Get a list of distinct values for `key` among all documents in this collection. Raises :class:`TypeError` if `key` is not an instance of - :class:`basestring` (:class:`str` in python 3). + :class:`str`. All optional distinct parameters should be passed as keyword arguments to this method. Valid options include: @@ -2629,8 +3001,7 @@ def distinct(self, key, filter=None, session=None, **kwargs): - `maxTimeMS` (int): The maximum amount of time to allow the count command to run, in milliseconds. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only supported - on MongoDB 3.4 and above. + :class:`~pymongo.collation.Collation`. The :meth:`distinct` method obeys the :attr:`read_preference` of this :class:`Collection`. @@ -2642,6 +3013,8 @@ def distinct(self, key, filter=None, session=None, **kwargs): from which to retrieve the distinct values. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): See list of options above. .. versionchanged:: 3.6 @@ -2651,247 +3024,138 @@ def distinct(self, key, filter=None, session=None, **kwargs): Support the `collation` option. """ - if not isinstance(key, string_type): - raise TypeError("key must be an " - "instance of %s" % (string_type.__name__,)) - cmd = SON([("distinct", self.__name), - ("key", key)]) + if not isinstance(key, str): + raise TypeError("key must be an instance of str") + cmd = SON([("distinct", self.__name), ("key", key)]) if filter is not None: if "query" in kwargs: raise ConfigurationError("can't pass both filter and query") kwargs["query"] = filter - collation = validate_collation_or_none(kwargs.pop('collation', None)) + collation = validate_collation_or_none(kwargs.pop("collation", None)) cmd.update(kwargs) - def _cmd(session, server, sock_info, slave_ok): - return self._command( - sock_info, cmd, slave_ok, read_concern=self.read_concern, - collation=collation, session=session, - user_fields={"values": 1})["values"] - - return self.__database.client._retryable_read( - _cmd, self._read_preference_for(session), session) - - def _map_reduce(self, map, reduce, out, session, read_pref, **kwargs): - """Internal mapReduce helper.""" - cmd = SON([("mapReduce", self.__name), - ("map", map), - ("reduce", reduce), - ("out", out)]) - collation = validate_collation_or_none(kwargs.pop('collation', None)) - cmd.update(kwargs) - - inline = 'inline' in out - - if inline: - user_fields = {'results': 1} - else: - user_fields = None - - read_pref = ((session and session._txn_read_preference()) - or read_pref) - - with self.__database.client._socket_for_reads(read_pref, session) as ( - sock_info, slave_ok): - if (sock_info.max_wire_version >= 4 and - ('readConcern' not in cmd) and - inline): - read_concern = self.read_concern - else: - read_concern = None - if 'writeConcern' not in cmd and not inline: - write_concern = self._write_concern_for(session) - else: - write_concern = None - + if comment is not None: + cmd["comment"] = comment + + def _cmd( + session: Optional[ClientSession], + _server: Server, + conn: Connection, + read_preference: Optional[_ServerMode], + ) -> list: return self._command( - sock_info, cmd, slave_ok, read_pref, - read_concern=read_concern, - write_concern=write_concern, - collation=collation, session=session, - user_fields=user_fields) - - def map_reduce(self, map, reduce, out, full_response=False, session=None, - **kwargs): - """Perform a map/reduce operation on this collection. - - If `full_response` is ``False`` (default) returns a - :class:`~pymongo.collection.Collection` instance containing - the results of the operation. Otherwise, returns the full - response from the server to the `map reduce command`_. - - :Parameters: - - `map`: map function (as a JavaScript string) - - `reduce`: reduce function (as a JavaScript string) - - `out`: output collection name or `out object` (dict). See - the `map reduce command`_ documentation for available options. - Note: `out` options are order sensitive. :class:`~bson.son.SON` - can be used to specify multiple options. - e.g. SON([('replace', ), ('db', )]) - - `full_response` (optional): if ``True``, return full response to - this command - otherwise just return the result collection - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `**kwargs` (optional): additional arguments to the - `map reduce command`_ may be passed as keyword arguments to this - helper method, e.g.:: - - >>> db.test.map_reduce(map, reduce, "myresults", limit=2) - - .. note:: The :meth:`map_reduce` method does **not** obey the - :attr:`read_preference` of this :class:`Collection`. To run - mapReduce on a secondary use the :meth:`inline_map_reduce` method - instead. - - .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of - this collection is automatically applied to this operation (if the - output is not inline) when using MongoDB >= 3.4. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. versionchanged:: 3.4 - Apply this collection's write concern automatically to this operation - when connected to MongoDB >= 3.4. - - .. seealso:: :doc:`/examples/aggregation` - - .. versionchanged:: 3.4 - Added the `collation` option. - .. versionchanged:: 2.2 - Removed deprecated arguments: merge_output and reduce_output - - .. _map reduce command: http://docs.mongodb.org/manual/reference/command/mapReduce/ - - .. mongodoc:: mapreduce - - """ - if not isinstance(out, (string_type, abc.Mapping)): - raise TypeError("'out' must be an instance of " - "%s or a mapping" % (string_type.__name__,)) - - response = self._map_reduce(map, reduce, out, session, - ReadPreference.PRIMARY, **kwargs) - - if full_response or not response.get('result'): - return response - elif isinstance(response['result'], dict): - dbase = response['result']['db'] - coll = response['result']['collection'] - return self.__database.client[dbase][coll] - else: - return self.__database[response["result"]] - - def inline_map_reduce(self, map, reduce, full_response=False, session=None, - **kwargs): - """Perform an inline map/reduce operation on this collection. - - Perform the map/reduce operation on the server in RAM. A result - collection is not created. The result set is returned as a list - of documents. - - If `full_response` is ``False`` (default) returns the - result documents in a list. Otherwise, returns the full - response from the server to the `map reduce command`_. - - The :meth:`inline_map_reduce` method obeys the :attr:`read_preference` - of this :class:`Collection`. - - :Parameters: - - `map`: map function (as a JavaScript string) - - `reduce`: reduce function (as a JavaScript string) - - `full_response` (optional): if ``True``, return full response to - this command - otherwise just return the result collection - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `**kwargs` (optional): additional arguments to the - `map reduce command`_ may be passed as keyword arguments to this - helper method, e.g.:: - - >>> db.test.inline_map_reduce(map, reduce, limit=2) - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. versionchanged:: 3.4 - Added the `collation` option. - - """ - res = self._map_reduce(map, reduce, {"inline": 1}, session, - self.read_preference, **kwargs) + conn, + cmd, + read_preference=read_preference, + read_concern=self.read_concern, + collation=collation, + session=session, + user_fields={"values": 1}, + )["values"] - if full_response: - return res - else: - return res.get("results") + return self._retryable_non_cursor_read(_cmd, session) - def _write_concern_for_cmd(self, cmd, session): - raw_wc = cmd.get('writeConcern') + def _write_concern_for_cmd( + self, cmd: Mapping[str, Any], session: Optional[ClientSession] + ) -> WriteConcern: + raw_wc = cmd.get("writeConcern") if raw_wc is not None: return WriteConcern(**raw_wc) else: return self._write_concern_for(session) - def __find_and_modify(self, filter, projection, sort, upsert=None, - return_document=ReturnDocument.BEFORE, - array_filters=None, session=None, **kwargs): + def __find_and_modify( + self, + filter: Mapping[str, Any], + projection: Optional[Union[Mapping[str, Any], Iterable[str]]], + sort: Optional[_IndexList], + upsert: Optional[bool] = None, + return_document: bool = ReturnDocument.BEFORE, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping] = None, + **kwargs: Any, + ) -> Any: """Internal findAndModify helper.""" - common.validate_is_mapping("filter", filter) if not isinstance(return_document, bool): - raise ValueError("return_document must be " - "ReturnDocument.BEFORE or ReturnDocument.AFTER") - collation = validate_collation_or_none(kwargs.pop('collation', None)) - cmd = SON([("findAndModify", self.__name), - ("query", filter), - ("new", return_document)]) + raise ValueError( + "return_document must be ReturnDocument.BEFORE or ReturnDocument.AFTER" + ) + collation = validate_collation_or_none(kwargs.pop("collation", None)) + cmd = SON([("findAndModify", self.__name), ("query", filter), ("new", return_document)]) + if let is not None: + common.validate_is_mapping("let", let) + cmd["let"] = let cmd.update(kwargs) if projection is not None: - cmd["fields"] = helpers._fields_list_to_dict(projection, - "projection") + cmd["fields"] = helpers._fields_list_to_dict(projection, "projection") if sort is not None: cmd["sort"] = helpers._index_document(sort) if upsert is not None: common.validate_boolean("upsert", upsert) cmd["upsert"] = upsert + if hint is not None: + if not isinstance(hint, str): + hint = helpers._index_document(hint) write_concern = self._write_concern_for_cmd(cmd, session) - def _find_and_modify(session, sock_info, retryable_write): + def _find_and_modify( + session: Optional[ClientSession], conn: Connection, retryable_write: bool + ) -> Any: + acknowledged = write_concern.acknowledged if array_filters is not None: - if sock_info.max_wire_version < 6: + if not acknowledged: + raise ConfigurationError( + "arrayFilters is unsupported for unacknowledged writes." + ) + cmd["arrayFilters"] = list(array_filters) + if hint is not None: + if conn.max_wire_version < 8: raise ConfigurationError( - 'Must be connected to MongoDB 3.6+ to use ' - 'arrayFilters.') - if not write_concern.acknowledged: + "Must be connected to MongoDB 4.2+ to use hint on find and modify commands." + ) + elif not acknowledged and conn.max_wire_version < 9: raise ConfigurationError( - 'arrayFilters is unsupported for unacknowledged ' - 'writes.') - cmd["arrayFilters"] = array_filters - if (sock_info.max_wire_version >= 4 and - not write_concern.is_server_default): - cmd['writeConcern'] = write_concern.document - out = self._command(sock_info, cmd, - read_preference=ReadPreference.PRIMARY, - write_concern=write_concern, - allowable_errors=[_NO_OBJ_ERROR], - collation=collation, session=session, - retryable_write=retryable_write, - user_fields=_FIND_AND_MODIFY_DOC_FIELDS) + "Must be connected to MongoDB 4.4+ to use hint on unacknowledged find and modify commands." + ) + cmd["hint"] = hint + out = self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + write_concern=write_concern, + collation=collation, + session=session, + retryable_write=retryable_write, + user_fields=_FIND_AND_MODIFY_DOC_FIELDS, + ) _check_write_command_response(out) return out.get("value") return self.__database.client._retryable_write( - write_concern.acknowledged, _find_and_modify, session) - - def find_one_and_delete(self, filter, - projection=None, sort=None, session=None, **kwargs): + write_concern.acknowledged, _find_and_modify, session + ) + + def find_one_and_delete( + self, + filter: Mapping[str, Any], + projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, + sort: Optional[_IndexList] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> _DocumentType: """Finds a single document and deletes it, returning the document. >>> db.test.count_documents({'x': 1}) 2 >>> db.test.find_one_and_delete({'x': 1}) - {u'x': 1, u'_id': ObjectId('54f4e12bfba5220aa4d6dee8')} + {'x': 1, '_id': ObjectId('54f4e12bfba5220aa4d6dee8')} >>> db.test.count_documents({'x': 1}) 1 @@ -2900,17 +3164,17 @@ def find_one_and_delete(self, filter, >>> for doc in db.test.find({'x': 1}): ... print(doc) ... - {u'x': 1, u'_id': 0} - {u'x': 1, u'_id': 1} - {u'x': 1, u'_id': 2} + {'x': 1, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} >>> db.test.find_one_and_delete( ... {'x': 1}, sort=[('_id', pymongo.DESCENDING)]) - {u'x': 1, u'_id': 2} + {'x': 1, '_id': 2} The *projection* option can be used to limit the fields returned. >>> db.test.find_one_and_delete({'x': 1}, projection={'_id': False}) - {u'x': 1} + {'x': 1} :Parameters: - `filter`: A query that matches the document to delete. @@ -2922,15 +3186,29 @@ def find_one_and_delete(self, filter, - `sort` (optional): a list of (key, direction) pairs specifying the sort order for the query. If multiple documents match the query, they are sorted and the first is deleted. + - `hint` (optional): An index to use to support the query predicate + specified either by its string name, or in the same format as + passed to :meth:`~pymongo.collection.Collection.create_index` + (e.g. ``[('field', ASCENDING)]``). This option is only supported + on MongoDB 4.4 and above. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `let` (optional): Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): additional command arguments can be passed as keyword arguments (for example maxTimeMS can be used with recent server versions). + .. versionchanged:: 4.1 + Added ``let`` parameter. + .. versionchanged:: 3.11 + Added ``hint`` parameter. .. versionchanged:: 3.6 Added ``session`` parameter. - .. versionchanged:: 3.2 Respects write concern. @@ -2943,16 +3221,28 @@ def find_one_and_delete(self, filter, .. versionchanged:: 3.4 Added the `collation` option. .. versionadded:: 3.0 - """ - kwargs['remove'] = True - return self.__find_and_modify(filter, projection, sort, - session=session, **kwargs) - - def find_one_and_replace(self, filter, replacement, - projection=None, sort=None, upsert=False, - return_document=ReturnDocument.BEFORE, - session=None, **kwargs): + kwargs["remove"] = True + if comment is not None: + kwargs["comment"] = comment + return self.__find_and_modify( + filter, projection, sort, let=let, hint=hint, session=session, **kwargs + ) + + def find_one_and_replace( + self, + filter: Mapping[str, Any], + replacement: Mapping[str, Any], + projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, + sort: Optional[_IndexList] = None, + upsert: bool = False, + return_document: bool = ReturnDocument.BEFORE, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> _DocumentType: """Finds a single document and replaces it, returning either the original or the replaced document. @@ -2963,17 +3253,17 @@ def find_one_and_replace(self, filter, replacement, >>> for doc in db.test.find({}): ... print(doc) ... - {u'x': 1, u'_id': 0} - {u'x': 1, u'_id': 1} - {u'x': 1, u'_id': 2} + {'x': 1, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} >>> db.test.find_one_and_replace({'x': 1}, {'y': 1}) - {u'x': 1, u'_id': 0} + {'x': 1, '_id': 0} >>> for doc in db.test.find({}): ... print(doc) ... - {u'y': 1, u'_id': 0} - {u'x': 1, u'_id': 1} - {u'x': 1, u'_id': 2} + {'y': 1, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} :Parameters: - `filter`: A query that matches the document to replace. @@ -2994,16 +3284,32 @@ def find_one_and_replace(self, filter, replacement, if no document matches. If :attr:`ReturnDocument.AFTER`, returns the replaced or inserted document. + - `hint` (optional): An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.4 and above. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `let` (optional): Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): additional command arguments can be passed as keyword arguments (for example maxTimeMS can be used with recent server versions). + .. versionchanged:: 4.1 + Added ``let`` parameter. + .. versionchanged:: 3.11 + Added the ``hint`` option. .. versionchanged:: 3.6 Added ``session`` parameter. .. versionchanged:: 3.4 - Added the `collation` option. + Added the ``collation`` option. .. versionchanged:: 3.2 Respects write concern. @@ -3016,21 +3322,42 @@ def find_one_and_replace(self, filter, replacement, .. versionadded:: 3.0 """ common.validate_ok_for_replace(replacement) - kwargs['update'] = replacement - return self.__find_and_modify(filter, projection, - sort, upsert, return_document, - session=session, **kwargs) - - def find_one_and_update(self, filter, update, - projection=None, sort=None, upsert=False, - return_document=ReturnDocument.BEFORE, - array_filters=None, session=None, **kwargs): + kwargs["update"] = replacement + if comment is not None: + kwargs["comment"] = comment + return self.__find_and_modify( + filter, + projection, + sort, + upsert, + return_document, + let=let, + hint=hint, + session=session, + **kwargs, + ) + + def find_one_and_update( + self, + filter: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, + sort: Optional[_IndexList] = None, + upsert: bool = False, + return_document: bool = ReturnDocument.BEFORE, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> _DocumentType: """Finds a single document and updates it, returning either the original or the updated document. >>> db.test.find_one_and_update( ... {'_id': 665}, {'$inc': {'count': 1}, '$set': {'done': True}}) - {u'_id': 665, u'done': False, u'count': 25}} + {'_id': 665, 'done': False, 'count': 25}} Returns ``None`` if no document matches the filter. @@ -3048,7 +3375,7 @@ def find_one_and_update(self, filter, update, ... {'_id': 'userid'}, ... {'$inc': {'seq': 1}}, ... return_document=ReturnDocument.AFTER) - {u'_id': u'userid', u'seq': 1} + {'_id': 'userid', 'seq': 1} You can limit the fields returned with the *projection* option. @@ -3057,7 +3384,7 @@ def find_one_and_update(self, filter, update, ... {'$inc': {'seq': 1}}, ... projection={'seq': True, '_id': False}, ... return_document=ReturnDocument.AFTER) - {u'seq': 2} + {'seq': 2} The *upsert* option can be used to create the document if it doesn't already exist. @@ -3070,20 +3397,20 @@ def find_one_and_update(self, filter, update, ... projection={'seq': True, '_id': False}, ... upsert=True, ... return_document=ReturnDocument.AFTER) - {u'seq': 1} + {'seq': 1} If multiple documents match *filter*, a *sort* can be applied. >>> for doc in db.test.find({'done': True}): ... print(doc) ... - {u'_id': 665, u'done': True, u'result': {u'count': 26}} - {u'_id': 701, u'done': True, u'result': {u'count': 17}} + {'_id': 665, 'done': True, 'result': {'count': 26}} + {'_id': 701, 'done': True, 'result': {'count': 17}} >>> db.test.find_one_and_update( ... {'done': True}, ... {'$set': {'final': True}}, ... sort=[('_id', pymongo.DESCENDING)]) - {u'_id': 701, u'done': True, u'result': {u'count': 17}} + {'_id': 701, 'done': True, 'result': {'count': 17}} :Parameters: - `filter`: A query that matches the document to update. @@ -3104,19 +3431,33 @@ def find_one_and_update(self, filter, update, :attr:`ReturnDocument.AFTER`, returns the updated or inserted document. - `array_filters` (optional): A list of filters specifying which - array elements an update should apply. Requires MongoDB 3.6+. + array elements an update should apply. + - `hint` (optional): An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.4 and above. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `let` (optional): Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): additional command arguments can be passed as keyword arguments (for example maxTimeMS can be used with recent server versions). + .. versionchanged:: 3.11 + Added the ``hint`` option. .. versionchanged:: 3.9 - Added the ability to accept a pipeline as the `update`. + Added the ability to accept a pipeline as the ``update``. .. versionchanged:: 3.6 - Added the `array_filters` and `session` options. + Added the ``array_filters`` and ``session`` options. .. versionchanged:: 3.4 - Added the `collation` option. + Added the ``collation`` option. .. versionchanged:: 3.2 Respects write concern. @@ -3129,215 +3470,42 @@ def find_one_and_update(self, filter, update, .. versionadded:: 3.0 """ common.validate_ok_for_update(update) - common.validate_list_or_none('array_filters', array_filters) - kwargs['update'] = update - return self.__find_and_modify(filter, projection, - sort, upsert, return_document, - array_filters, session=session, **kwargs) - - def save(self, to_save, manipulate=True, check_keys=True, **kwargs): - """Save a document in this collection. - - **DEPRECATED** - Use :meth:`insert_one` or :meth:`replace_one` instead. - - .. versionchanged:: 3.0 - Removed the `safe` parameter. Pass ``w=0`` for unacknowledged write - operations. - """ - warnings.warn("save is deprecated. Use insert_one or replace_one " - "instead", DeprecationWarning, stacklevel=2) - common.validate_is_document_type("to_save", to_save) - - write_concern = None - collation = validate_collation_or_none(kwargs.pop('collation', None)) - if kwargs: - write_concern = WriteConcern(**kwargs) - - if not (isinstance(to_save, RawBSONDocument) or "_id" in to_save): - return self._insert( - to_save, True, check_keys, manipulate, write_concern) - else: - self._update_retryable( - {"_id": to_save["_id"]}, to_save, True, - check_keys, False, manipulate, write_concern, - collation=collation) - return to_save.get("_id") - - def insert(self, doc_or_docs, manipulate=True, - check_keys=True, continue_on_error=False, **kwargs): - """Insert a document(s) into this collection. - - **DEPRECATED** - Use :meth:`insert_one` or :meth:`insert_many` instead. - - .. versionchanged:: 3.0 - Removed the `safe` parameter. Pass ``w=0`` for unacknowledged write - operations. - """ - warnings.warn("insert is deprecated. Use insert_one or insert_many " - "instead.", DeprecationWarning, stacklevel=2) - write_concern = None - if kwargs: - write_concern = WriteConcern(**kwargs) - return self._insert(doc_or_docs, not continue_on_error, - check_keys, manipulate, write_concern) - - def update(self, spec, document, upsert=False, manipulate=False, - multi=False, check_keys=True, **kwargs): - """Update a document(s) in this collection. - - **DEPRECATED** - Use :meth:`replace_one`, :meth:`update_one`, or - :meth:`update_many` instead. - - .. versionchanged:: 3.0 - Removed the `safe` parameter. Pass ``w=0`` for unacknowledged write - operations. - """ - warnings.warn("update is deprecated. Use replace_one, update_one or " - "update_many instead.", DeprecationWarning, stacklevel=2) - common.validate_is_mapping("spec", spec) - common.validate_is_mapping("document", document) - if document: - # If a top level key begins with '$' this is a modify operation - # and we should skip key validation. It doesn't matter which key - # we check here. Passing a document with a mix of top level keys - # starting with and without a '$' is invalid and the server will - # raise an appropriate exception. - first = next(iter(document)) - if first.startswith('$'): - check_keys = False - - write_concern = None - collation = validate_collation_or_none(kwargs.pop('collation', None)) - if kwargs: - write_concern = WriteConcern(**kwargs) - return self._update_retryable( - spec, document, upsert, check_keys, multi, manipulate, - write_concern, collation=collation) - - def remove(self, spec_or_id=None, multi=True, **kwargs): - """Remove a document(s) from this collection. - - **DEPRECATED** - Use :meth:`delete_one` or :meth:`delete_many` instead. - - .. versionchanged:: 3.0 - Removed the `safe` parameter. Pass ``w=0`` for unacknowledged write - operations. - """ - warnings.warn("remove is deprecated. Use delete_one or delete_many " - "instead.", DeprecationWarning, stacklevel=2) - if spec_or_id is None: - spec_or_id = {} - if not isinstance(spec_or_id, abc.Mapping): - spec_or_id = {"_id": spec_or_id} - write_concern = None - collation = validate_collation_or_none(kwargs.pop('collation', None)) - if kwargs: - write_concern = WriteConcern(**kwargs) - return self._delete_retryable( - spec_or_id, multi, write_concern, collation=collation) - - def find_and_modify(self, query={}, update=None, - upsert=False, sort=None, full_response=False, - manipulate=False, **kwargs): - """Update and return an object. - - **DEPRECATED** - Use :meth:`find_one_and_delete`, - :meth:`find_one_and_replace`, or :meth:`find_one_and_update` instead. - """ - warnings.warn("find_and_modify is deprecated, use find_one_and_delete" - ", find_one_and_replace, or find_one_and_update instead", - DeprecationWarning, stacklevel=2) - - if not update and not kwargs.get('remove', None): - raise ValueError("Must either update or remove") - - if update and kwargs.get('remove', None): - raise ValueError("Can't do both update and remove") - - # No need to include empty args - if query: - kwargs['query'] = query - if update: - kwargs['update'] = update - if upsert: - kwargs['upsert'] = upsert - if sort: - # Accept a list of tuples to match Cursor's sort parameter. - if isinstance(sort, list): - kwargs['sort'] = helpers._index_document(sort) - # Accept OrderedDict, SON, and dict with len == 1 so we - # don't break existing code already using find_and_modify. - elif (isinstance(sort, ORDERED_TYPES) or - isinstance(sort, dict) and len(sort) == 1): - warnings.warn("Passing mapping types for `sort` is deprecated," - " use a list of (key, direction) pairs instead", - DeprecationWarning, stacklevel=2) - kwargs['sort'] = sort - else: - raise TypeError("sort must be a list of (key, direction) " - "pairs, a dict of len 1, or an instance of " - "SON or OrderedDict") - - fields = kwargs.pop("fields", None) - if fields is not None: - kwargs["fields"] = helpers._fields_list_to_dict(fields, "fields") - - collation = validate_collation_or_none(kwargs.pop('collation', None)) - - cmd = SON([("findAndModify", self.__name)]) - cmd.update(kwargs) - - write_concern = self._write_concern_for_cmd(cmd, None) - - def _find_and_modify(session, sock_info, retryable_write): - if (sock_info.max_wire_version >= 4 and - not write_concern.is_server_default): - cmd['writeConcern'] = write_concern.document - result = self._command( - sock_info, cmd, read_preference=ReadPreference.PRIMARY, - allowable_errors=[_NO_OBJ_ERROR], collation=collation, - session=session, retryable_write=retryable_write, - user_fields=_FIND_AND_MODIFY_DOC_FIELDS) - - _check_write_command_response(result) - return result - - out = self.__database.client._retryable_write( - write_concern.acknowledged, _find_and_modify, None) - - if not out['ok']: - if out["errmsg"] == _NO_OBJ_ERROR: - return None - else: - # Should never get here b/c of allowable_errors - raise ValueError("Unexpected Error: %s" % (out,)) - - if full_response: - return out - else: - document = out.get('value') - if manipulate: - document = self.__database._fix_outgoing(document, self) - return document + common.validate_list_or_none("array_filters", array_filters) + kwargs["update"] = update + if comment is not None: + kwargs["comment"] = comment + return self.__find_and_modify( + filter, + projection, + sort, + upsert, + return_document, + array_filters, + hint=hint, + let=let, + session=session, + **kwargs, + ) - def __iter__(self): - return self + # See PYTHON-3084. + __iter__ = None - def __next__(self): + def __next__(self) -> NoReturn: raise TypeError("'Collection' object is not iterable") next = __next__ - def __call__(self, *args, **kwargs): - """This is only here so that some API misusages are easier to debug. - """ + def __call__(self, *args: Any, **kwargs: Any) -> NoReturn: + """This is only here so that some API misusages are easier to debug.""" if "." not in self.__name: - raise TypeError("'Collection' object is not callable. If you " - "meant to call the '%s' method on a 'Database' " - "object it is failing because no such method " - "exists." % - self.__name) - raise TypeError("'Collection' object is not callable. If you meant to " - "call the '%s' method on a 'Collection' object it is " - "failing because no such method exists." % - self.__name.split(".")[-1]) + raise TypeError( + "'Collection' object is not callable. If you " + "meant to call the '%s' method on a 'Database' " + "object it is failing because no such method " + "exists." % self.__name + ) + raise TypeError( + "'Collection' object is not callable. If you meant to " + "call the '%s' method on a 'Collection' object it is " + "failing because no such method exists." % self.__name.split(".")[-1] + ) diff --git a/pymongo/command_cursor.py b/pymongo/command_cursor.py index 196d94dda5..42becece28 100644 --- a/pymongo/command_cursor.py +++ b/pymongo/command_cursor.py @@ -13,86 +13,115 @@ # limitations under the License. """CommandCursor class to iterate over command results.""" +from __future__ import annotations from collections import deque - -from bson.py3compat import integer_types -from pymongo.errors import (ConnectionFailure, - InvalidOperation, - NotMasterError, - OperationFailure) -from pymongo.message import (_CursorAddress, - _GetMore, - _RawBatchGetMore) - - -class CommandCursor(object): +from typing import ( + TYPE_CHECKING, + Any, + Generic, + Iterator, + Mapping, + NoReturn, + Optional, + Sequence, + Union, +) + +from bson import CodecOptions, _convert_raw_document_lists_to_streams +from pymongo.cursor import _CURSOR_CLOSED_ERRORS, _ConnectionManager +from pymongo.errors import ConnectionFailure, InvalidOperation, OperationFailure +from pymongo.message import _CursorAddress, _GetMore, _OpMsg, _OpReply, _RawBatchGetMore +from pymongo.response import PinnedResponse +from pymongo.typings import _Address, _DocumentOut, _DocumentType + +if TYPE_CHECKING: + from pymongo.client_session import ClientSession + from pymongo.collection import Collection + from pymongo.pool import Connection + + +class CommandCursor(Generic[_DocumentType]): """A cursor / iterator over command cursors.""" - _getmore_class = _GetMore - def __init__(self, collection, cursor_info, address, retrieved=0, - batch_size=0, max_await_time_ms=None, session=None, - explicit_session=False): - """Create a new command cursor. + _getmore_class = _GetMore - The parameter 'retrieved' is unused. - """ - self.__collection = collection - self.__id = cursor_info['id'] - self.__data = deque(cursor_info['firstBatch']) - self.__postbatchresumetoken = cursor_info.get('postBatchResumeToken') + def __init__( + self, + collection: Collection[_DocumentType], + cursor_info: Mapping[str, Any], + address: Optional[_Address], + batch_size: int = 0, + max_await_time_ms: Optional[int] = None, + session: Optional[ClientSession] = None, + explicit_session: bool = False, + comment: Any = None, + ) -> None: + """Create a new command cursor.""" + self.__sock_mgr: Any = None + self.__collection: Collection[_DocumentType] = collection + self.__id = cursor_info["id"] + self.__data = deque(cursor_info["firstBatch"]) + self.__postbatchresumetoken: Optional[Mapping[str, Any]] = cursor_info.get( + "postBatchResumeToken" + ) self.__address = address self.__batch_size = batch_size self.__max_await_time_ms = max_await_time_ms self.__session = session self.__explicit_session = explicit_session - self.__killed = (self.__id == 0) + self.__killed = self.__id == 0 + self.__comment = comment if self.__killed: self.__end_session(True) - if "ns" in cursor_info: + if "ns" in cursor_info: # noqa: SIM401 self.__ns = cursor_info["ns"] else: self.__ns = collection.full_name self.batch_size(batch_size) - if (not isinstance(max_await_time_ms, integer_types) - and max_await_time_ms is not None): + if not isinstance(max_await_time_ms, int) and max_await_time_ms is not None: raise TypeError("max_await_time_ms must be an integer or None") - def __del__(self): - if self.__id and not self.__killed: - self.__die() + def __del__(self) -> None: + self.__die() - def __die(self, synchronous=False): - """Closes this cursor. - """ + def __die(self, synchronous: bool = False) -> None: + """Closes this cursor.""" already_killed = self.__killed self.__killed = True if self.__id and not already_killed: - address = _CursorAddress( - self.__address, self.__collection.full_name) - if synchronous: - self.__collection.database.client._close_cursor_now( - self.__id, address, session=self.__session) - else: - # The cursor will be closed later in a different session. - self.__collection.database.client._close_cursor( - self.__id, address) - self.__end_session(synchronous) + cursor_id = self.__id + assert self.__address is not None + address = _CursorAddress(self.__address, self.__ns) + else: + # Skip killCursors. + cursor_id = 0 + address = None + self.__collection.database.client._cleanup_cursor( + synchronous, + cursor_id, + address, + self.__sock_mgr, + self.__session, + self.__explicit_session, + ) + if not self.__explicit_session: + self.__session = None + self.__sock_mgr = None - def __end_session(self, synchronous): + def __end_session(self, synchronous: bool) -> None: if self.__session and not self.__explicit_session: self.__session._end_session(lock=synchronous) self.__session = None - def close(self): - """Explicitly close / kill this cursor. - """ + def close(self) -> None: + """Explicitly close / kill this cursor.""" self.__die(True) - def batch_size(self, batch_size): + def batch_size(self, batch_size: int) -> CommandCursor[_DocumentType]: """Limits the number of documents returned in one batch. Each batch requires a round trip to the server. It can be adjusted to optimize performance and limit data transfer. @@ -108,7 +137,7 @@ def batch_size(self, batch_size): :Parameters: - `batch_size`: The size of each batch of results requested. """ - if not isinstance(batch_size, integer_types): + if not isinstance(batch_size, int): raise TypeError("batch_size must be an integer") if batch_size < 0: raise ValueError("batch_size must be >= 0") @@ -116,71 +145,88 @@ def batch_size(self, batch_size): self.__batch_size = batch_size == 1 and 2 or batch_size return self - def _has_next(self): + def _has_next(self) -> bool: """Returns `True` if the cursor has documents remaining from the - previous batch.""" + previous batch. + """ return len(self.__data) > 0 @property - def _post_batch_resume_token(self): + def _post_batch_resume_token(self) -> Optional[Mapping[str, Any]]: """Retrieve the postBatchResumeToken from the response to a - changeStream aggregate or getMore.""" + changeStream aggregate or getMore. + """ return self.__postbatchresumetoken - def __send_message(self, operation): - """Send a getmore message and handle the response. - """ - def kill(): - self.__killed = True - self.__end_session(True) + def _maybe_pin_connection(self, conn: Connection) -> None: + client = self.__collection.database.client + if not client._should_pin_cursor(self.__session): + return + if not self.__sock_mgr: + conn.pin_cursor() + conn_mgr = _ConnectionManager(conn, False) + # Ensure the connection gets returned when the entire result is + # returned in the first batch. + if self.__id == 0: + conn_mgr.close() + else: + self.__sock_mgr = conn_mgr + def __send_message(self, operation: _GetMore) -> None: + """Send a getmore message and handle the response.""" client = self.__collection.database.client try: - response = client._run_operation_with_response( - operation, self._unpack_response, address=self.__address) - except OperationFailure: - kill() - raise - except NotMasterError: - # Don't send kill cursors to another server after a "not master" - # error. It's completely pointless. - kill() + response = client._run_operation( + operation, self._unpack_response, address=self.__address + ) + except OperationFailure as exc: + if exc.code in _CURSOR_CLOSED_ERRORS: + # Don't send killCursors because the cursor is already closed. + self.__killed = True + if exc.timeout: + self.__die(False) + else: + # Return the session and pinned connection, if necessary. + self.close() raise except ConnectionFailure: - # Don't try to send kill cursors on another socket - # or to another server. It can cause a _pinValue - # assertion on some server releases if we get here - # due to a socket timeout. - kill() + # Don't send killCursors because the cursor is already closed. + self.__killed = True + # Return the session and pinned connection, if necessary. + self.close() raise except Exception: - # Close the cursor - self.__die() + self.close() raise - from_command = response.from_command - reply = response.data - docs = response.docs - - if from_command: - cursor = docs[0]['cursor'] - documents = cursor['nextBatch'] - self.__postbatchresumetoken = cursor.get('postBatchResumeToken') - self.__id = cursor['id'] + if isinstance(response, PinnedResponse): + if not self.__sock_mgr: + self.__sock_mgr = _ConnectionManager(response.conn, response.more_to_come) + if response.from_command: + cursor = response.docs[0]["cursor"] + documents = cursor["nextBatch"] + self.__postbatchresumetoken = cursor.get("postBatchResumeToken") + self.__id = cursor["id"] else: - documents = docs - self.__id = reply.cursor_id + documents = response.docs + assert isinstance(response.data, _OpReply) + self.__id = response.data.cursor_id if self.__id == 0: - kill() + self.close() self.__data = deque(documents) - def _unpack_response(self, response, cursor_id, codec_options, - user_fields=None, legacy_response=False): - return response.unpack_response(cursor_id, codec_options, user_fields, - legacy_response) - - def _refresh(self): + def _unpack_response( + self, + response: Union[_OpReply, _OpMsg], + cursor_id: Optional[int], + codec_options: CodecOptions[Mapping[str, Any]], + user_fields: Optional[Mapping[str, Any]] = None, + legacy_response: bool = False, + ) -> Sequence[_DocumentOut]: + return response.unpack_response(cursor_id, codec_options, user_fields, legacy_response) + + def _refresh(self) -> int: """Refreshes the cursor with more data from the server. Returns the length of self.__data after refresh. Will exit early if @@ -191,27 +237,31 @@ def _refresh(self): return len(self.__data) if self.__id: # Get More - dbname, collname = self.__ns.split('.', 1) + dbname, collname = self.__ns.split(".", 1) read_pref = self.__collection._read_preference_for(self.session) self.__send_message( - self._getmore_class(dbname, - collname, - self.__batch_size, - self.__id, - self.__collection.codec_options, - read_pref, - self.__session, - self.__collection.database.client, - self.__max_await_time_ms, - False)) + self._getmore_class( + dbname, + collname, + self.__batch_size, + self.__id, + self.__collection.codec_options, + read_pref, + self.__session, + self.__collection.database.client, + self.__max_await_time_ms, + self.__sock_mgr, + False, + self.__comment, + ) + ) else: # Cursor id is zero nothing else to return - self.__killed = True - self.__end_session(True) + self.__die(True) return len(self.__data) @property - def alive(self): + def alive(self) -> bool: """Does this cursor have the potential to return more data? Even if :attr:`alive` is ``True``, :meth:`next` can raise @@ -228,12 +278,12 @@ def alive(self): return bool(len(self.__data) or (not self.__killed)) @property - def cursor_id(self): + def cursor_id(self) -> int: """Returns the id of the cursor.""" return self.__id @property - def address(self): + def address(self) -> Optional[_Address]: """The (host, port) of the server used, or None. .. versionadded:: 3.0 @@ -241,18 +291,19 @@ def address(self): return self.__address @property - def session(self): + def session(self) -> Optional[ClientSession]: """The cursor's :class:`~pymongo.client_session.ClientSession`, or None. .. versionadded:: 3.6 """ if self.__explicit_session: return self.__session + return None - def __iter__(self): + def __iter__(self) -> Iterator[_DocumentType]: return self - def next(self): + def next(self) -> _DocumentType: """Advance the cursor.""" # Block until a document is returnable. while self.alive: @@ -264,45 +315,89 @@ def next(self): __next__ = next - def _try_next(self, get_more_allowed): + def _try_next(self, get_more_allowed: bool) -> Optional[_DocumentType]: """Advance the cursor blocking for at most one getMore command.""" if not len(self.__data) and not self.__killed and get_more_allowed: self._refresh() if len(self.__data): - coll = self.__collection - return coll.database._fix_outgoing(self.__data.popleft(), coll) + return self.__data.popleft() else: return None - def __enter__(self): + def try_next(self) -> Optional[_DocumentType]: + """Advance the cursor without blocking indefinitely. + + This method returns the next document without waiting + indefinitely for data. + + If no document is cached locally then this method runs a single + getMore command. If the getMore yields any documents, the next + document is returned, otherwise, if the getMore returns no documents + (because there is no additional data) then ``None`` is returned. + + :Returns: + The next document or ``None`` when no document is available + after running a single getMore or when the cursor is closed. + + .. versionadded:: 4.5 + """ + return self._try_next(get_more_allowed=True) + + def __enter__(self) -> CommandCursor[_DocumentType]: return self - def __exit__(self, exc_type, exc_val, exc_tb): + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: self.close() -class RawBatchCommandCursor(CommandCursor): +class RawBatchCommandCursor(CommandCursor, Generic[_DocumentType]): _getmore_class = _RawBatchGetMore - def __init__(self, collection, cursor_info, address, retrieved=0, - batch_size=0, max_await_time_ms=None, session=None, - explicit_session=False): + def __init__( + self, + collection: Collection[_DocumentType], + cursor_info: Mapping[str, Any], + address: Optional[_Address], + batch_size: int = 0, + max_await_time_ms: Optional[int] = None, + session: Optional[ClientSession] = None, + explicit_session: bool = False, + comment: Any = None, + ) -> None: """Create a new cursor / iterator over raw batches of BSON data. Should not be called directly by application developers - see :meth:`~pymongo.collection.Collection.aggregate_raw_batches` instead. - .. mongodoc:: cursors + .. seealso:: The MongoDB documentation on `cursors `_. """ - assert not cursor_info.get('firstBatch') - super(RawBatchCommandCursor, self).__init__( - collection, cursor_info, address, retrieved, batch_size, - max_await_time_ms, session, explicit_session) - - def _unpack_response(self, response, cursor_id, codec_options, - user_fields=None, legacy_response=False): - return response.raw_response(cursor_id) - - def __getitem__(self, index): + assert not cursor_info.get("firstBatch") + super().__init__( + collection, + cursor_info, + address, + batch_size, + max_await_time_ms, + session, + explicit_session, + comment, + ) + + def _unpack_response( # type: ignore[override] + self, + response: Union[_OpReply, _OpMsg], + cursor_id: Optional[int], + codec_options: CodecOptions, + user_fields: Optional[Mapping[str, Any]] = None, + legacy_response: bool = False, + ) -> list[Mapping[str, Any]]: + raw_response = response.raw_response(cursor_id, user_fields=user_fields) + if not legacy_response: + # OP_MSG returns firstBatch/nextBatch documents as a BSON array + # Re-assemble the array of documents into a document stream + _convert_raw_document_lists_to_streams(raw_response[0]) + return raw_response # type: ignore[return-value] + + def __getitem__(self, index: int) -> NoReturn: raise InvalidOperation("Cannot call __getitem__ on RawBatchCursor") diff --git a/pymongo/common.py b/pymongo/common.py index f208e83d08..e3da3a5f69 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -14,52 +14,67 @@ """Functions and classes common to multiple pymongo modules.""" +from __future__ import annotations import datetime +import inspect import warnings +from collections import OrderedDict, abc +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Iterator, + Mapping, + MutableMapping, + NoReturn, + Optional, + Sequence, + Type, + Union, + overload, +) +from urllib.parse import unquote_plus from bson import SON -from bson.binary import (STANDARD, PYTHON_LEGACY, - JAVA_LEGACY, CSHARP_LEGACY) -from bson.codec_options import CodecOptions, TypeRegistry -from bson.py3compat import abc, integer_types, iteritems, string_type +from bson.binary import UuidRepresentation +from bson.codec_options import CodecOptions, DatetimeConversion, TypeRegistry from bson.raw_bson import RawBSONDocument from pymongo.auth import MECHANISMS -from pymongo.compression_support import (validate_compressors, - validate_zlib_compression_level) +from pymongo.compression_support import ( + validate_compressors, + validate_zlib_compression_level, +) from pymongo.driver_info import DriverInfo -from pymongo.encryption_options import validate_auto_encryption_opts_or_none from pymongo.errors import ConfigurationError from pymongo.monitoring import _validate_event_listeners from pymongo.read_concern import ReadConcern from pymongo.read_preferences import _MONGOS_MODES, _ServerMode -from pymongo.ssl_support import (validate_cert_reqs, - validate_allow_invalid_certs) -from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern +from pymongo.server_api import ServerApi +from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern, validate_boolean -try: - from collections import OrderedDict - ORDERED_TYPES = (SON, OrderedDict) -except ImportError: - ORDERED_TYPES = (SON,) +if TYPE_CHECKING: + from pymongo.client_session import ClientSession +ORDERED_TYPES: Sequence[Type] = (SON, OrderedDict) # Defaults until we connect to a server and get updated limits. -MAX_BSON_SIZE = 16 * (1024 ** 2) -MAX_MESSAGE_SIZE = 2 * MAX_BSON_SIZE +MAX_BSON_SIZE = 16 * (1024**2) +MAX_MESSAGE_SIZE: int = 2 * MAX_BSON_SIZE MIN_WIRE_VERSION = 0 MAX_WIRE_VERSION = 0 MAX_WRITE_BATCH_SIZE = 1000 # What this version of PyMongo supports. -MIN_SUPPORTED_SERVER_VERSION = "2.6" -MIN_SUPPORTED_WIRE_VERSION = 2 -MAX_SUPPORTED_WIRE_VERSION = 8 +MIN_SUPPORTED_SERVER_VERSION = "3.6" +MIN_SUPPORTED_WIRE_VERSION = 6 +MAX_SUPPORTED_WIRE_VERSION = 21 -# Frequency to call ismaster on servers, in seconds. +# Frequency to call hello on servers, in seconds. HEARTBEAT_FREQUENCY = 10 -# Frequency to process kill-cursors, in seconds. See MongoClient.close_cursor. +# Frequency to clean up unclosed cursors, in seconds. +# See MongoClient._process_kill_cursors. KILL_CURSOR_FREQUENCY = 1 # Frequency to process events queue, in seconds. @@ -71,7 +86,7 @@ # longest it is willing to wait for a new primary to be found. SERVER_SELECTION_TIMEOUT = 30 -# Spec requires at least 500ms between ismaster calls. +# Spec requires at least 500ms between hello calls. MIN_HEARTBEAT_INTERVAL = 0.5 # Spec requires at least 60s between SRV rescans. @@ -86,14 +101,17 @@ # Default value for minPoolSize. MIN_POOL_SIZE = 0 +# The maximum number of concurrent connection creation attempts per pool. +MAX_CONNECTING = 2 + # Default value for maxIdleTimeMS. -MAX_IDLE_TIME_MS = None +MAX_IDLE_TIME_MS: Optional[int] = None # Default value for maxIdleTimeMS in seconds. -MAX_IDLE_TIME_SEC = None +MAX_IDLE_TIME_SEC: Optional[int] = None # Default value for waitQueueTimeoutMS in seconds. -WAIT_QUEUE_TIMEOUT = None +WAIT_QUEUE_TIMEOUT: Optional[int] = None # Default value for localThresholdMS. LOCAL_THRESHOLD_MS = 15 @@ -104,201 +122,182 @@ # Default value for retryReads. RETRY_READS = True -# mongod/s 2.6 and above return code 59 when a command doesn't exist. -COMMAND_NOT_FOUND_CODES = (59,) +# The error code returned when a command doesn't exist. +COMMAND_NOT_FOUND_CODES: Sequence[int] = (59,) # Error codes to ignore if GridFS calls createIndex on a secondary -UNAUTHORIZED_CODES = (13, 16547, 16548) +UNAUTHORIZED_CODES: Sequence[int] = (13, 16547, 16548) # Maximum number of sessions to send in a single endSessions command. # From the driver sessions spec. _MAX_END_SESSIONS = 10000 +# Default value for srvServiceName +SRV_SERVICE_NAME = "mongodb" + +# Default value for serverMonitoringMode +SERVER_MONITORING_MODE = "auto" # poll/stream/auto + -def partition_node(node): +def partition_node(node: str) -> tuple[str, int]: """Split a host:port string into (host, int(port)) pair.""" host = node port = 27017 - idx = node.rfind(':') + idx = node.rfind(":") if idx != -1: - host, port = node[:idx], int(node[idx + 1:]) - if host.startswith('['): + host, port = node[:idx], int(node[idx + 1 :]) + if host.startswith("["): host = host[1:-1] return host, port -def clean_node(node): - """Split and normalize a node name from an ismaster response.""" +def clean_node(node: str) -> tuple[str, int]: + """Split and normalize a node name from a hello response.""" host, port = partition_node(node) # Normalize hostname to lowercase, since DNS is case-insensitive: # http://tools.ietf.org/html/rfc4343 # This prevents useless rediscovery if "foo.com" is in the seed list but - # "FOO.com" is in the ismaster response. + # "FOO.com" is in the hello response. return host.lower(), port -def raise_config_error(key, dummy): +def raise_config_error(key: str, dummy: Any) -> NoReturn: """Raise ConfigurationError with the given key name.""" - raise ConfigurationError("Unknown option %s" % (key,)) + raise ConfigurationError(f"Unknown option {key}") # Mapping of URI uuid representation options to valid subtypes. _UUID_REPRESENTATIONS = { - 'standard': STANDARD, - 'pythonLegacy': PYTHON_LEGACY, - 'javaLegacy': JAVA_LEGACY, - 'csharpLegacy': CSHARP_LEGACY + "unspecified": UuidRepresentation.UNSPECIFIED, + "standard": UuidRepresentation.STANDARD, + "pythonLegacy": UuidRepresentation.PYTHON_LEGACY, + "javaLegacy": UuidRepresentation.JAVA_LEGACY, + "csharpLegacy": UuidRepresentation.CSHARP_LEGACY, } -def validate_boolean(option, value): - """Validates that 'value' is True or False.""" - if isinstance(value, bool): - return value - raise TypeError("%s must be True or False" % (option,)) - - -def validate_boolean_or_string(option, value): +def validate_boolean_or_string(option: str, value: Any) -> bool: """Validates that value is True, False, 'true', or 'false'.""" - if isinstance(value, string_type): - if value not in ('true', 'false'): - raise ValueError("The value of %s must be " - "'true' or 'false'" % (option,)) - return value == 'true' + if isinstance(value, str): + if value not in ("true", "false"): + raise ValueError(f"The value of {option} must be 'true' or 'false'") + return value == "true" return validate_boolean(option, value) -def validate_integer(option, value): - """Validates that 'value' is an integer (or basestring representation). - """ - if isinstance(value, integer_types): +def validate_integer(option: str, value: Any) -> int: + """Validates that 'value' is an integer (or basestring representation).""" + if isinstance(value, int): return value - elif isinstance(value, string_type): + elif isinstance(value, str): try: return int(value) except ValueError: - raise ValueError("The value of %s must be " - "an integer" % (option,)) - raise TypeError("Wrong type for %s, value must be an integer" % (option,)) + raise ValueError(f"The value of {option} must be an integer") from None + raise TypeError(f"Wrong type for {option}, value must be an integer") -def validate_positive_integer(option, value): - """Validate that 'value' is a positive integer, which does not include 0. - """ +def validate_positive_integer(option: str, value: Any) -> int: + """Validate that 'value' is a positive integer, which does not include 0.""" val = validate_integer(option, value) if val <= 0: - raise ValueError("The value of %s must be " - "a positive integer" % (option,)) + raise ValueError(f"The value of {option} must be a positive integer") return val -def validate_non_negative_integer(option, value): - """Validate that 'value' is a positive integer or 0. - """ +def validate_non_negative_integer(option: str, value: Any) -> int: + """Validate that 'value' is a positive integer or 0.""" val = validate_integer(option, value) if val < 0: - raise ValueError("The value of %s must be " - "a non negative integer" % (option,)) + raise ValueError(f"The value of {option} must be a non negative integer") return val -def validate_readable(option, value): - """Validates that 'value' is file-like and readable. - """ +def validate_readable(option: str, value: Any) -> Optional[str]: + """Validates that 'value' is file-like and readable.""" if value is None: return value # First make sure its a string py3.3 open(True, 'r') succeeds # Used in ssl cert checking due to poor ssl module error reporting value = validate_string(option, value) - open(value, 'r').close() + open(value).close() return value -def validate_positive_integer_or_none(option, value): - """Validate that 'value' is a positive integer or None. - """ +def validate_positive_integer_or_none(option: str, value: Any) -> Optional[int]: + """Validate that 'value' is a positive integer or None.""" if value is None: return value return validate_positive_integer(option, value) -def validate_non_negative_integer_or_none(option, value): - """Validate that 'value' is a positive integer or 0 or None. - """ +def validate_non_negative_integer_or_none(option: str, value: Any) -> Optional[int]: + """Validate that 'value' is a positive integer or 0 or None.""" if value is None: return value return validate_non_negative_integer(option, value) -def validate_string(option, value): - """Validates that 'value' is an instance of `basestring` for Python 2 - or `str` for Python 3. - """ - if isinstance(value, string_type): +def validate_string(option: str, value: Any) -> str: + """Validates that 'value' is an instance of `str`.""" + if isinstance(value, str): return value - raise TypeError("Wrong type for %s, value must be " - "an instance of %s" % (option, string_type.__name__)) + raise TypeError(f"Wrong type for {option}, value must be an instance of str") -def validate_string_or_none(option, value): - """Validates that 'value' is an instance of `basestring` or `None`. - """ +def validate_string_or_none(option: str, value: Any) -> Optional[str]: + """Validates that 'value' is an instance of `basestring` or `None`.""" if value is None: return value return validate_string(option, value) -def validate_int_or_basestring(option, value): - """Validates that 'value' is an integer or string. - """ - if isinstance(value, integer_types): +def validate_int_or_basestring(option: str, value: Any) -> Union[int, str]: + """Validates that 'value' is an integer or string.""" + if isinstance(value, int): return value - elif isinstance(value, string_type): + elif isinstance(value, str): try: return int(value) except ValueError: return value - raise TypeError("Wrong type for %s, value must be an " - "integer or a string" % (option,)) + raise TypeError(f"Wrong type for {option}, value must be an integer or a string") -def validate_non_negative_int_or_basestring(option, value): - """Validates that 'value' is an integer or string. - """ - if isinstance(value, integer_types): +def validate_non_negative_int_or_basestring(option: Any, value: Any) -> Union[int, str]: + """Validates that 'value' is an integer or string.""" + if isinstance(value, int): return value - elif isinstance(value, string_type): + elif isinstance(value, str): try: val = int(value) except ValueError: return value return validate_non_negative_integer(option, val) - raise TypeError("Wrong type for %s, value must be an " - "non negative integer or a string" % (option,)) + raise TypeError(f"Wrong type for {option}, value must be an non negative integer or a string") -def validate_positive_float(option, value): +def validate_positive_float(option: str, value: Any) -> float: """Validates that 'value' is a float, or can be converted to one, and is - positive. + positive. """ - errmsg = "%s must be an integer or float" % (option,) + errmsg = f"{option} must be an integer or float" try: value = float(value) except ValueError: - raise ValueError(errmsg) + raise ValueError(errmsg) from None except TypeError: - raise TypeError(errmsg) + raise TypeError(errmsg) from None # float('inf') doesn't work in 2.4 or 2.5 on Windows, so just cap floats at # one billion - this is a reasonable approximation for infinity if not 0 < value < 1e9: - raise ValueError("%s must be greater than 0 and " - "less than one billion" % (option,)) + raise ValueError(f"{option} must be greater than 0 and less than one billion") return value -def validate_positive_float_or_zero(option, value): +def validate_positive_float_or_zero(option: str, value: Any) -> float: """Validates that 'value' is 0 or a positive float, or can be converted to 0 or a positive float. """ @@ -307,7 +306,7 @@ def validate_positive_float_or_zero(option, value): return validate_positive_float(option, value) -def validate_timeout_or_none(option, value): +def validate_timeout_or_none(option: str, value: Any) -> Optional[float]: """Validates a timeout specified in milliseconds returning a value in floating point seconds. """ @@ -316,20 +315,39 @@ def validate_timeout_or_none(option, value): return validate_positive_float(option, value) / 1000.0 -def validate_timeout_or_zero(option, value): +def validate_timeout_or_zero(option: str, value: Any) -> float: """Validates a timeout specified in milliseconds returning a value in floating point seconds for the case where None is an error and 0 is valid. Setting the timeout to nothing in the URI string is a config error. """ if value is None: - raise ConfigurationError("%s cannot be None" % (option, )) + raise ConfigurationError(f"{option} cannot be None") if value == 0 or value == "0": return 0 return validate_positive_float(option, value) / 1000.0 -def validate_max_staleness(option, value): +def validate_timeout_or_none_or_zero(option: Any, value: Any) -> Optional[float]: + """Validates a timeout specified in milliseconds returning + a value in floating point seconds. value=0 and value="0" are treated the + same as value=None which means unlimited timeout. + """ + if value is None or value == 0 or value == "0": + return None + return validate_positive_float(option, value) / 1000.0 + + +def validate_timeoutms(option: Any, value: Any) -> Optional[float]: + """Validates a timeout specified in milliseconds returning + a value in floating point seconds. + """ + if value is None: + return None + return validate_positive_float_or_zero(option, value) / 1000.0 + + +def validate_max_staleness(option: str, value: Any) -> int: """Validates maxStalenessSeconds according to the Max Staleness Spec.""" if value == -1 or value == "-1": # Default: No maximum staleness. @@ -337,396 +355,476 @@ def validate_max_staleness(option, value): return validate_positive_integer(option, value) -def validate_read_preference(dummy, value): - """Validate a read preference. - """ +def validate_read_preference(dummy: Any, value: Any) -> _ServerMode: + """Validate a read preference.""" if not isinstance(value, _ServerMode): - raise TypeError("%r is not a read preference." % (value,)) + raise TypeError(f"{value!r} is not a read preference.") return value -def validate_read_preference_mode(dummy, value): - """Validate read preference mode for a MongoReplicaSetClient. +def validate_read_preference_mode(dummy: Any, value: Any) -> _ServerMode: + """Validate read preference mode for a MongoClient. .. versionchanged:: 3.5 Returns the original ``value`` instead of the validated read preference mode. """ if value not in _MONGOS_MODES: - raise ValueError("%s is not a valid read preference" % (value,)) + raise ValueError(f"{value} is not a valid read preference") return value -def validate_auth_mechanism(option, value): - """Validate the authMechanism URI option. - """ - # CRAM-MD5 is for server testing only. Undocumented, - # unsupported, may be removed at any time. You have - # been warned. - if value not in MECHANISMS and value != 'CRAM-MD5': - raise ValueError("%s must be in %s" % (option, tuple(MECHANISMS))) +def validate_auth_mechanism(option: str, value: Any) -> str: + """Validate the authMechanism URI option.""" + if value not in MECHANISMS: + raise ValueError(f"{option} must be in {tuple(MECHANISMS)}") return value -def validate_uuid_representation(dummy, value): - """Validate the uuid representation option selected in the URI. - """ +def validate_uuid_representation(dummy: Any, value: Any) -> int: + """Validate the uuid representation option selected in the URI.""" try: return _UUID_REPRESENTATIONS[value] except KeyError: - raise ValueError("%s is an invalid UUID representation. " - "Must be one of " - "%s" % (value, tuple(_UUID_REPRESENTATIONS))) + raise ValueError( + f"{value} is an invalid UUID representation. " + "Must be one of " + f"{tuple(_UUID_REPRESENTATIONS)}" + ) from None -def validate_read_preference_tags(name, value): - """Parse readPreferenceTags if passed as a client kwarg. - """ +def validate_read_preference_tags(name: str, value: Any) -> list[dict[str, str]]: + """Parse readPreferenceTags if passed as a client kwarg.""" if not isinstance(value, list): value = [value] - tag_sets = [] + tag_sets: list = [] for tag_set in value: - if tag_set == '': + if tag_set == "": tag_sets.append({}) continue try: - tag_sets.append(dict([tag.split(":") - for tag in tag_set.split(",")])) + tags = {} + for tag in tag_set.split(","): + key, val = tag.split(":") + tags[unquote_plus(key)] = unquote_plus(val) + tag_sets.append(tags) except Exception: - raise ValueError("%r not a valid " - "value for %s" % (tag_set, name)) + raise ValueError(f"{tag_set!r} not a valid value for {name}") from None return tag_sets -_MECHANISM_PROPS = frozenset(['SERVICE_NAME', - 'CANONICALIZE_HOST_NAME', - 'SERVICE_REALM']) +_MECHANISM_PROPS = frozenset( + [ + "SERVICE_NAME", + "CANONICALIZE_HOST_NAME", + "SERVICE_REALM", + "AWS_SESSION_TOKEN", + "PROVIDER_NAME", + ] +) -def validate_auth_mechanism_properties(option, value): +def validate_auth_mechanism_properties(option: str, value: Any) -> dict[str, Union[bool, str]]: """Validate authMechanismProperties.""" + props: dict[str, Any] = {} + if not isinstance(value, str): + if not isinstance(value, dict): + raise ValueError("Auth mechanism properties must be given as a string or a dictionary") + for key, value in value.items(): # noqa: B020 + if isinstance(value, str): + props[key] = value + elif isinstance(value, bool): + props[key] = str(value).lower() + elif key in ["allowed_hosts"] and isinstance(value, list): + props[key] = value + elif inspect.isfunction(value): + signature = inspect.signature(value) + if key == "request_token_callback": + expected_params = 2 + else: + raise ValueError(f"Unrecognized Auth mechanism function {key}") + if len(signature.parameters) != expected_params: + msg = f"{key} must accept {expected_params} parameters" + raise ValueError(msg) + props[key] = value + else: + raise ValueError( + "Auth mechanism property values must be strings or callback functions" + ) + return props + value = validate_string(option, value) - props = {} - for opt in value.split(','): + for opt in value.split(","): try: - key, val = opt.split(':') + key, val = opt.split(":") except ValueError: - raise ValueError("auth mechanism properties must be " - "key:value pairs like SERVICE_NAME:" - "mongodb, not %s." % (opt,)) + # Try not to leak the token. + if "AWS_SESSION_TOKEN" in opt: + opt = ( # noqa: PLW2901 + "AWS_SESSION_TOKEN:, did you forget " + "to percent-escape the token with quote_plus?" + ) + raise ValueError( + "auth mechanism properties must be " + "key:value pairs like SERVICE_NAME:" + f"mongodb, not {opt}." + ) from None if key not in _MECHANISM_PROPS: - raise ValueError("%s is not a supported auth " - "mechanism property. Must be one of " - "%s." % (key, tuple(_MECHANISM_PROPS))) - if key == 'CANONICALIZE_HOST_NAME': + raise ValueError( + f"{key} is not a supported auth " + "mechanism property. Must be one of " + f"{tuple(_MECHANISM_PROPS)}." + ) + if key == "CANONICALIZE_HOST_NAME": props[key] = validate_boolean_or_string(key, val) else: - props[key] = val + props[key] = unquote_plus(val) return props -def validate_document_class(option, value): +def validate_document_class( + option: str, value: Any +) -> Union[Type[MutableMapping], Type[RawBSONDocument]]: """Validate the document_class option.""" - if not issubclass(value, (abc.MutableMapping, RawBSONDocument)): - raise TypeError("%s must be dict, bson.son.SON, " - "bson.raw_bson.RawBSONDocument, or a " - "sublass of collections.MutableMapping" % (option,)) + # issubclass can raise TypeError for generic aliases like SON[str, Any]. + # In that case we can use the base class for the comparison. + is_mapping = False + try: + is_mapping = issubclass(value, abc.MutableMapping) + except TypeError: + if hasattr(value, "__origin__"): + is_mapping = issubclass(value.__origin__, abc.MutableMapping) + if not is_mapping and not issubclass(value, RawBSONDocument): + raise TypeError( + f"{option} must be dict, bson.son.SON, " + "bson.raw_bson.RawBSONDocument, or a " + "subclass of collections.MutableMapping" + ) return value -def validate_type_registry(option, value): +def validate_type_registry(option: Any, value: Any) -> Optional[TypeRegistry]: """Validate the type_registry option.""" if value is not None and not isinstance(value, TypeRegistry): - raise TypeError("%s must be an instance of %s" % ( - option, TypeRegistry)) + raise TypeError(f"{option} must be an instance of {TypeRegistry}") return value -def validate_list(option, value): +def validate_list(option: str, value: Any) -> list: """Validates that 'value' is a list.""" if not isinstance(value, list): - raise TypeError("%s must be a list" % (option,)) + raise TypeError(f"{option} must be a list") return value -def validate_list_or_none(option, value): +def validate_list_or_none(option: Any, value: Any) -> Optional[list]: """Validates that 'value' is a list or None.""" if value is None: return value return validate_list(option, value) -def validate_list_or_mapping(option, value): +def validate_list_or_mapping(option: Any, value: Any) -> None: """Validates that 'value' is a list or a document.""" if not isinstance(value, (abc.Mapping, list)): - raise TypeError("%s must either be a list or an instance of dict, " - "bson.son.SON, or any other type that inherits from " - "collections.Mapping" % (option,)) + raise TypeError( + f"{option} must either be a list or an instance of dict, " + "bson.son.SON, or any other type that inherits from " + "collections.Mapping" + ) -def validate_is_mapping(option, value): +def validate_is_mapping(option: str, value: Any) -> None: """Validate the type of method arguments that expect a document.""" if not isinstance(value, abc.Mapping): - raise TypeError("%s must be an instance of dict, bson.son.SON, or " - "any other type that inherits from " - "collections.Mapping" % (option,)) + raise TypeError( + f"{option} must be an instance of dict, bson.son.SON, or " + "any other type that inherits from " + "collections.Mapping" + ) -def validate_is_document_type(option, value): +def validate_is_document_type(option: str, value: Any) -> None: """Validate the type of method arguments that expect a MongoDB document.""" if not isinstance(value, (abc.MutableMapping, RawBSONDocument)): - raise TypeError("%s must be an instance of dict, bson.son.SON, " - "bson.raw_bson.RawBSONDocument, or " - "a type that inherits from " - "collections.MutableMapping" % (option,)) + raise TypeError( + f"{option} must be an instance of dict, bson.son.SON, " + "bson.raw_bson.RawBSONDocument, or " + "a type that inherits from " + "collections.MutableMapping" + ) -def validate_appname_or_none(option, value): +def validate_appname_or_none(option: str, value: Any) -> Optional[str]: """Validate the appname option.""" if value is None: return value validate_string(option, value) # We need length in bytes, so encode utf8 first. - if len(value.encode('utf-8')) > 128: - raise ValueError("%s must be <= 128 bytes" % (option,)) + if len(value.encode("utf-8")) > 128: + raise ValueError(f"{option} must be <= 128 bytes") return value -def validate_driver_or_none(option, value): +def validate_driver_or_none(option: Any, value: Any) -> Optional[DriverInfo]: """Validate the driver keyword arg.""" if value is None: return value if not isinstance(value, DriverInfo): - raise TypeError("%s must be an instance of DriverInfo" % (option,)) + raise TypeError(f"{option} must be an instance of DriverInfo") return value -def validate_is_callable_or_none(option, value): +def validate_server_api_or_none(option: Any, value: Any) -> Optional[ServerApi]: + """Validate the server_api keyword arg.""" + if value is None: + return value + if not isinstance(value, ServerApi): + raise TypeError(f"{option} must be an instance of ServerApi") + return value + + +def validate_is_callable_or_none(option: Any, value: Any) -> Optional[Callable]: """Validates that 'value' is a callable.""" if value is None: return value if not callable(value): - raise ValueError("%s must be a callable" % (option,)) + raise ValueError(f"{option} must be a callable") return value -def validate_ok_for_replace(replacement): +def validate_ok_for_replace(replacement: Mapping[str, Any]) -> None: """Validate a replacement document.""" validate_is_mapping("replacement", replacement) # Replacement can be {} if replacement and not isinstance(replacement, RawBSONDocument): first = next(iter(replacement)) - if first.startswith('$'): - raise ValueError('replacement can not include $ operators') + if first.startswith("$"): + raise ValueError("replacement can not include $ operators") -def validate_ok_for_update(update): +def validate_ok_for_update(update: Any) -> None: """Validate an update document.""" validate_list_or_mapping("update", update) # Update cannot be {}. if not update: - raise ValueError('update cannot be empty') + raise ValueError("update cannot be empty") is_document = not isinstance(update, list) first = next(iter(update)) - if is_document and not first.startswith('$'): - raise ValueError('update only works with $ operators') + if is_document and not first.startswith("$"): + raise ValueError("update only works with $ operators") -_UNICODE_DECODE_ERROR_HANDLERS = frozenset(['strict', 'replace', 'ignore']) +_UNICODE_DECODE_ERROR_HANDLERS = frozenset(["strict", "replace", "ignore"]) -def validate_unicode_decode_error_handler(dummy, value): - """Validate the Unicode decode error handler option of CodecOptions. - """ +def validate_unicode_decode_error_handler(dummy: Any, value: str) -> str: + """Validate the Unicode decode error handler option of CodecOptions.""" if value not in _UNICODE_DECODE_ERROR_HANDLERS: - raise ValueError("%s is an invalid Unicode decode error handler. " - "Must be one of " - "%s" % (value, tuple(_UNICODE_DECODE_ERROR_HANDLERS))) + raise ValueError( + f"{value} is an invalid Unicode decode error handler. " + "Must be one of " + f"{tuple(_UNICODE_DECODE_ERROR_HANDLERS)}" + ) return value -def validate_tzinfo(dummy, value): - """Validate the tzinfo option - """ +def validate_tzinfo(dummy: Any, value: Any) -> Optional[datetime.tzinfo]: + """Validate the tzinfo option""" if value is not None and not isinstance(value, datetime.tzinfo): raise TypeError("%s must be an instance of datetime.tzinfo" % value) return value +def validate_auto_encryption_opts_or_none(option: Any, value: Any) -> Optional[Any]: + """Validate the driver keyword arg.""" + if value is None: + return value + from pymongo.encryption_options import AutoEncryptionOpts + + if not isinstance(value, AutoEncryptionOpts): + raise TypeError(f"{option} must be an instance of AutoEncryptionOpts") + + return value + + +def validate_datetime_conversion(option: Any, value: Any) -> Optional[DatetimeConversion]: + """Validate a DatetimeConversion string.""" + if value is None: + return DatetimeConversion.DATETIME + + if isinstance(value, str): + if value.isdigit(): + return DatetimeConversion(int(value)) + return DatetimeConversion[value] + elif isinstance(value, int): + return DatetimeConversion(value) + + raise TypeError(f"{option} must be a str or int representing DatetimeConversion") + + +def validate_server_monitoring_mode(option: str, value: str) -> str: + """Validate the serverMonitoringMode option.""" + if value not in {"auto", "stream", "poll"}: + raise ValueError( + f'{option}={value!r} is invalid. Must be one of "auto", "stream", or "poll"' + ) + return value + + # Dictionary where keys are the names of public URI options, and values -# are lists of aliases for that option. Aliases of option names are assumed -# to have been deprecated. -URI_OPTIONS_ALIAS_MAP = { - 'journal': ['j'], - 'wtimeoutms': ['wtimeout'], - 'tls': ['ssl'], - 'tlsallowinvalidcertificates': ['ssl_cert_reqs'], - 'tlsallowinvalidhostnames': ['ssl_match_hostname'], - 'tlscrlfile': ['ssl_crlfile'], - 'tlscafile': ['ssl_ca_certs'], - 'tlscertificatekeyfile': ['ssl_certfile'], - 'tlscertificatekeyfilepassword': ['ssl_pem_passphrase'], +# are lists of aliases for that option. +URI_OPTIONS_ALIAS_MAP: dict[str, list[str]] = { + "tls": ["ssl"], } # Dictionary where keys are the names of URI options, and values # are functions that validate user-input values for that option. If an option # alias uses a different validator than its public counterpart, it should be # included here as a key, value pair. -URI_OPTIONS_VALIDATOR_MAP = { - 'appname': validate_appname_or_none, - 'authmechanism': validate_auth_mechanism, - 'authmechanismproperties': validate_auth_mechanism_properties, - 'authsource': validate_string, - 'compressors': validate_compressors, - 'connecttimeoutms': validate_timeout_or_none, - 'heartbeatfrequencyms': validate_timeout_or_none, - 'journal': validate_boolean_or_string, - 'localthresholdms': validate_positive_float_or_zero, - 'maxidletimems': validate_timeout_or_none, - 'maxpoolsize': validate_positive_integer_or_none, - 'maxstalenessseconds': validate_max_staleness, - 'readconcernlevel': validate_string_or_none, - 'readpreference': validate_read_preference_mode, - 'readpreferencetags': validate_read_preference_tags, - 'replicaset': validate_string_or_none, - 'retryreads': validate_boolean_or_string, - 'retrywrites': validate_boolean_or_string, - 'serverselectiontimeoutms': validate_timeout_or_zero, - 'sockettimeoutms': validate_timeout_or_none, - 'ssl_keyfile': validate_readable, - 'tls': validate_boolean_or_string, - 'tlsallowinvalidcertificates': validate_allow_invalid_certs, - 'ssl_cert_reqs': validate_cert_reqs, - 'tlsallowinvalidhostnames': lambda *x: not validate_boolean_or_string(*x), - 'ssl_match_hostname': validate_boolean_or_string, - 'tlscafile': validate_readable, - 'tlscertificatekeyfile': validate_readable, - 'tlscertificatekeyfilepassword': validate_string_or_none, - 'tlsinsecure': validate_boolean_or_string, - 'w': validate_non_negative_int_or_basestring, - 'wtimeoutms': validate_non_negative_integer, - 'zlibcompressionlevel': validate_zlib_compression_level, +URI_OPTIONS_VALIDATOR_MAP: dict[str, Callable[[Any, Any], Any]] = { + "appname": validate_appname_or_none, + "authmechanism": validate_auth_mechanism, + "authmechanismproperties": validate_auth_mechanism_properties, + "authsource": validate_string, + "compressors": validate_compressors, + "connecttimeoutms": validate_timeout_or_none_or_zero, + "directconnection": validate_boolean_or_string, + "heartbeatfrequencyms": validate_timeout_or_none, + "journal": validate_boolean_or_string, + "localthresholdms": validate_positive_float_or_zero, + "maxidletimems": validate_timeout_or_none, + "maxconnecting": validate_positive_integer, + "maxpoolsize": validate_non_negative_integer_or_none, + "maxstalenessseconds": validate_max_staleness, + "readconcernlevel": validate_string_or_none, + "readpreference": validate_read_preference_mode, + "readpreferencetags": validate_read_preference_tags, + "replicaset": validate_string_or_none, + "retryreads": validate_boolean_or_string, + "retrywrites": validate_boolean_or_string, + "loadbalanced": validate_boolean_or_string, + "serverselectiontimeoutms": validate_timeout_or_zero, + "sockettimeoutms": validate_timeout_or_none_or_zero, + "tls": validate_boolean_or_string, + "tlsallowinvalidcertificates": validate_boolean_or_string, + "tlsallowinvalidhostnames": validate_boolean_or_string, + "tlscafile": validate_readable, + "tlscertificatekeyfile": validate_readable, + "tlscertificatekeyfilepassword": validate_string_or_none, + "tlsdisableocspendpointcheck": validate_boolean_or_string, + "tlsinsecure": validate_boolean_or_string, + "w": validate_non_negative_int_or_basestring, + "wtimeoutms": validate_non_negative_integer, + "zlibcompressionlevel": validate_zlib_compression_level, + "srvservicename": validate_string, + "srvmaxhosts": validate_non_negative_integer, + "timeoutms": validate_timeoutms, + "servermonitoringmode": validate_server_monitoring_mode, } # Dictionary where keys are the names of URI options specific to pymongo, # and values are functions that validate user-input values for those options. -NONSPEC_OPTIONS_VALIDATOR_MAP = { - 'connect': validate_boolean_or_string, - 'driver': validate_driver_or_none, - 'fsync': validate_boolean_or_string, - 'minpoolsize': validate_non_negative_integer, - 'socketkeepalive': validate_boolean_or_string, - 'tlscrlfile': validate_readable, - 'tz_aware': validate_boolean_or_string, - 'unicode_decode_error_handler': validate_unicode_decode_error_handler, - 'uuidrepresentation': validate_uuid_representation, - 'waitqueuemultiple': validate_non_negative_integer_or_none, - 'waitqueuetimeoutms': validate_timeout_or_none, +NONSPEC_OPTIONS_VALIDATOR_MAP: dict[str, Callable[[Any, Any], Any]] = { + "connect": validate_boolean_or_string, + "driver": validate_driver_or_none, + "server_api": validate_server_api_or_none, + "fsync": validate_boolean_or_string, + "minpoolsize": validate_non_negative_integer, + "tlscrlfile": validate_readable, + "tz_aware": validate_boolean_or_string, + "unicode_decode_error_handler": validate_unicode_decode_error_handler, + "uuidrepresentation": validate_uuid_representation, + "waitqueuemultiple": validate_non_negative_integer_or_none, + "waitqueuetimeoutms": validate_timeout_or_none, + "datetime_conversion": validate_datetime_conversion, } # Dictionary where keys are the names of keyword-only options for the # MongoClient constructor, and values are functions that validate user-input # values for those options. -KW_VALIDATORS = { - 'document_class': validate_document_class, - 'type_registry': validate_type_registry, - 'read_preference': validate_read_preference, - 'event_listeners': _validate_event_listeners, - 'tzinfo': validate_tzinfo, - 'username': validate_string_or_none, - 'password': validate_string_or_none, - 'server_selector': validate_is_callable_or_none, - 'auto_encryption_opts': validate_auto_encryption_opts_or_none, +KW_VALIDATORS: dict[str, Callable[[Any, Any], Any]] = { + "document_class": validate_document_class, + "type_registry": validate_type_registry, + "read_preference": validate_read_preference, + "event_listeners": _validate_event_listeners, + "tzinfo": validate_tzinfo, + "username": validate_string_or_none, + "password": validate_string_or_none, + "server_selector": validate_is_callable_or_none, + "auto_encryption_opts": validate_auto_encryption_opts_or_none, + "authoidcallowedhosts": validate_list, } # Dictionary where keys are any URI option name, and values are the # internally-used names of that URI option. Options with only one name # variant need not be included here. Options whose public and internal # names are the same need not be included here. -INTERNAL_URI_OPTION_NAME_MAP = { - 'j': 'journal', - 'wtimeout': 'wtimeoutms', - 'tls': 'ssl', - 'tlsallowinvalidcertificates': 'ssl_cert_reqs', - 'tlsallowinvalidhostnames': 'ssl_match_hostname', - 'tlscrlfile': 'ssl_crlfile', - 'tlscafile': 'ssl_ca_certs', - 'tlscertificatekeyfile': 'ssl_certfile', - 'tlscertificatekeyfilepassword': 'ssl_pem_passphrase', +INTERNAL_URI_OPTION_NAME_MAP: dict[str, str] = { + "ssl": "tls", } # Map from deprecated URI option names to a tuple indicating the method of # their deprecation and any additional information that may be needed to # construct the warning message. -URI_OPTIONS_DEPRECATION_MAP = { +URI_OPTIONS_DEPRECATION_MAP: dict[str, tuple[str, str]] = { # format: : (, ), # Supported values: # - 'renamed': should be the new option name. Note that case is # preserved for renamed options as they are part of user warnings. # - 'removed': may suggest the rationale for deprecating the # option and/or recommend remedial action. - 'j': ('renamed', 'journal'), - 'wtimeout': ('renamed', 'wTimeoutMS'), - 'ssl_cert_reqs': ('renamed', 'tlsAllowInvalidCertificates'), - 'ssl_match_hostname': ('renamed', 'tlsAllowInvalidHostnames'), - 'ssl_crlfile': ('renamed', 'tlsCRLFile'), - 'ssl_ca_certs': ('renamed', 'tlsCAFile'), - 'ssl_pem_passphrase': ('renamed', 'tlsCertificateKeyFilePassword'), - 'waitqueuemultiple': ('removed', ( - 'Instead of using waitQueueMultiple to bound queuing, limit the size ' - 'of the thread pool in your application server')) + # For example: + # 'wtimeout': ('renamed', 'wTimeoutMS'), } # Augment the option validator map with pymongo-specific option information. URI_OPTIONS_VALIDATOR_MAP.update(NONSPEC_OPTIONS_VALIDATOR_MAP) -for optname, aliases in iteritems(URI_OPTIONS_ALIAS_MAP): +for optname, aliases in URI_OPTIONS_ALIAS_MAP.items(): for alias in aliases: if alias not in URI_OPTIONS_VALIDATOR_MAP: - URI_OPTIONS_VALIDATOR_MAP[alias] = ( - URI_OPTIONS_VALIDATOR_MAP[optname]) + URI_OPTIONS_VALIDATOR_MAP[alias] = URI_OPTIONS_VALIDATOR_MAP[optname] # Map containing all URI option and keyword argument validators. -VALIDATORS = URI_OPTIONS_VALIDATOR_MAP.copy() +VALIDATORS: dict[str, Callable[[Any, Any], Any]] = URI_OPTIONS_VALIDATOR_MAP.copy() VALIDATORS.update(KW_VALIDATORS) # List of timeout-related options. -TIMEOUT_OPTIONS = [ - 'connecttimeoutms', - 'heartbeatfrequencyms', - 'maxidletimems', - 'maxstalenessseconds', - 'serverselectiontimeoutms', - 'sockettimeoutms', - 'waitqueuetimeoutms', +TIMEOUT_OPTIONS: list[str] = [ + "connecttimeoutms", + "heartbeatfrequencyms", + "maxidletimems", + "maxstalenessseconds", + "serverselectiontimeoutms", + "sockettimeoutms", + "waitqueuetimeoutms", ] - -_AUTH_OPTIONS = frozenset(['authmechanismproperties']) +_AUTH_OPTIONS = frozenset(["authmechanismproperties"]) -def validate_auth_option(option, value): - """Validate optional authentication parameters. - """ +def validate_auth_option(option: str, value: Any) -> tuple[str, Any]: + """Validate optional authentication parameters.""" lower, value = validate(option, value) if lower not in _AUTH_OPTIONS: - raise ConfigurationError('Unknown ' - 'authentication option: %s' % (option,)) - return lower, value + raise ConfigurationError(f"Unknown authentication option: {option}") + return option, value -def validate(option, value): - """Generic validation function. - """ +def validate(option: str, value: Any) -> tuple[str, Any]: + """Generic validation function.""" lower = option.lower() validator = VALIDATORS.get(lower, raise_config_error) value = validator(option, value) - return lower, value + return option, value -def get_validated_options(options, warn=True): +def get_validated_options( + options: Mapping[str, Any], warn: bool = True +) -> MutableMapping[str, Any]: """Validate each entry in options and raise a warning if it is not valid. Returns a copy of options with invalid entries removed. @@ -736,24 +834,33 @@ def get_validated_options(options, warn=True): invalid options will be ignored. Otherwise, invalid options will cause errors. """ + validated_options: MutableMapping[str, Any] if isinstance(options, _CaseInsensitiveDictionary): validated_options = _CaseInsensitiveDictionary() - get_normed_key = lambda x: x - get_setter_key = lambda x: options.cased_key(x) + + def get_normed_key(x: str) -> str: + return x + + def get_setter_key(x: str) -> str: + return options.cased_key(x) # type: ignore[attr-defined] + else: validated_options = {} - get_normed_key = lambda x: x.lower() - get_setter_key = lambda x: x - for opt, value in iteritems(options): + def get_normed_key(x: str) -> str: + return x.lower() + + def get_setter_key(x: str) -> str: + return x + + for opt, value in options.items(): normed_key = get_normed_key(opt) try: - validator = URI_OPTIONS_VALIDATOR_MAP.get( - normed_key, raise_config_error) - value = validator(opt, value) + validator = URI_OPTIONS_VALIDATOR_MAP.get(normed_key, raise_config_error) + value = validator(opt, value) # noqa: PLW2901 except (ValueError, TypeError, ConfigurationError) as exc: if warn: - warnings.warn(str(exc)) + warnings.warn(str(exc), stacklevel=2) else: raise else: @@ -761,57 +868,63 @@ def get_validated_options(options, warn=True): return validated_options +def _esc_coll_name(encrypted_fields: Mapping[str, Any], name: str) -> Any: + return encrypted_fields.get("escCollection", f"enxcol_.{name}.esc") + + +def _ecoc_coll_name(encrypted_fields: Mapping[str, Any], name: str) -> Any: + return encrypted_fields.get("ecocCollection", f"enxcol_.{name}.ecoc") + + # List of write-concern-related options. -WRITE_CONCERN_OPTIONS = frozenset([ - 'w', - 'wtimeout', - 'wtimeoutms', - 'fsync', - 'j', - 'journal' -]) +WRITE_CONCERN_OPTIONS = frozenset(["w", "wtimeout", "wtimeoutms", "fsync", "j", "journal"]) -class BaseObject(object): +class BaseObject: """A base class that provides attributes and methods common to multiple pymongo classes. SHOULD NOT BE USED BY DEVELOPERS EXTERNAL TO MONGODB. """ - def __init__(self, codec_options, read_preference, write_concern, - read_concern): - + def __init__( + self, + codec_options: CodecOptions, + read_preference: _ServerMode, + write_concern: WriteConcern, + read_concern: ReadConcern, + ) -> None: if not isinstance(codec_options, CodecOptions): - raise TypeError("codec_options must be an instance of " - "bson.codec_options.CodecOptions") + raise TypeError("codec_options must be an instance of bson.codec_options.CodecOptions") self.__codec_options = codec_options if not isinstance(read_preference, _ServerMode): - raise TypeError("%r is not valid for read_preference. See " - "pymongo.read_preferences for valid " - "options." % (read_preference,)) + raise TypeError( + f"{read_preference!r} is not valid for read_preference. See " + "pymongo.read_preferences for valid " + "options." + ) self.__read_preference = read_preference if not isinstance(write_concern, WriteConcern): - raise TypeError("write_concern must be an instance of " - "pymongo.write_concern.WriteConcern") + raise TypeError( + "write_concern must be an instance of pymongo.write_concern.WriteConcern" + ) self.__write_concern = write_concern if not isinstance(read_concern, ReadConcern): - raise TypeError("read_concern must be an instance of " - "pymongo.read_concern.ReadConcern") + raise TypeError("read_concern must be an instance of pymongo.read_concern.ReadConcern") self.__read_concern = read_concern @property - def codec_options(self): + def codec_options(self) -> CodecOptions: """Read only access to the :class:`~bson.codec_options.CodecOptions` of this instance. """ return self.__codec_options @property - def write_concern(self): + def write_concern(self) -> WriteConcern: """Read only access to the :class:`~pymongo.write_concern.WriteConcern` of this instance. @@ -820,16 +933,15 @@ def write_concern(self): """ return self.__write_concern - def _write_concern_for(self, session): - """Read only access to the write concern of this instance or session. - """ + def _write_concern_for(self, session: Optional[ClientSession]) -> WriteConcern: + """Read only access to the write concern of this instance or session.""" # Override this operation's write concern with the transaction's. if session and session.in_transaction: return DEFAULT_WRITE_CONCERN return self.write_concern @property - def read_preference(self): + def read_preference(self) -> _ServerMode: """Read only access to the read preference of this instance. .. versionchanged:: 3.0 @@ -837,16 +949,15 @@ def read_preference(self): """ return self.__read_preference - def _read_preference_for(self, session): - """Read only access to the read preference of this instance or session. - """ + def _read_preference_for(self, session: Optional[ClientSession]) -> _ServerMode: + """Read only access to the read preference of this instance or session.""" # Override this operation's read preference with the transaction's. if session: return session._txn_read_preference() or self.__read_preference return self.__read_preference @property - def read_concern(self): + def read_concern(self) -> ReadConcern: """Read only access to the :class:`~pymongo.read_concern.ReadConcern` of this instance. @@ -855,66 +966,74 @@ def read_concern(self): return self.__read_concern -class _CaseInsensitiveDictionary(abc.MutableMapping): - def __init__(self, *args, **kwargs): - self.__casedkeys = {} - self.__data = {} +class _CaseInsensitiveDictionary(MutableMapping[str, Any]): + def __init__(self, *args: Any, **kwargs: Any): + self.__casedkeys: dict[str, Any] = {} + self.__data: dict[str, Any] = {} self.update(dict(*args, **kwargs)) - def __contains__(self, key): + def __contains__(self, key: str) -> bool: # type: ignore[override] return key.lower() in self.__data - def __len__(self): + def __len__(self) -> int: return len(self.__data) - def __iter__(self): + def __iter__(self) -> Iterator[str]: return (key for key in self.__casedkeys) - def __repr__(self): + def __repr__(self) -> str: return str({self.__casedkeys[k]: self.__data[k] for k in self}) - def __setitem__(self, key, value): + def __setitem__(self, key: str, value: Any) -> None: lc_key = key.lower() self.__casedkeys[lc_key] = key self.__data[lc_key] = value - def __getitem__(self, key): + def __getitem__(self, key: str) -> Any: return self.__data[key.lower()] - def __delitem__(self, key): + def __delitem__(self, key: str) -> None: lc_key = key.lower() del self.__casedkeys[lc_key] del self.__data[lc_key] - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if not isinstance(other, abc.Mapping): return NotImplemented if len(self) != len(other): return False - for key in other: + for key in other: # noqa: SIM110 if self[key] != other[key]: return False return True - def get(self, key, default=None): + def get(self, key: str, default: Optional[Any] = None) -> Any: return self.__data.get(key.lower(), default) - def pop(self, key, *args, **kwargs): + def pop(self, key: str, *args: Any, **kwargs: Any) -> Any: lc_key = key.lower() self.__casedkeys.pop(lc_key, None) return self.__data.pop(lc_key, *args, **kwargs) - def popitem(self): + def popitem(self) -> tuple[str, Any]: lc_key, cased_key = self.__casedkeys.popitem() value = self.__data.pop(lc_key) return cased_key, value - def clear(self): + def clear(self) -> None: self.__casedkeys.clear() self.__data.clear() - def setdefault(self, key, default=None): + @overload + def setdefault(self, key: str, default: None = None) -> Optional[Any]: + ... + + @overload + def setdefault(self, key: str, default: Any) -> Any: + ... + + def setdefault(self, key: str, default: Optional[Any] = None) -> Optional[Any]: lc_key = key.lower() if key in self: return self.__data[lc_key] @@ -923,7 +1042,7 @@ def setdefault(self, key, default=None): self.__data[lc_key] = default return default - def update(self, other): + def update(self, other: Mapping[str, Any]) -> None: # type: ignore[override] if isinstance(other, _CaseInsensitiveDictionary): for key in other: self[other.cased_key(key)] = other[key] @@ -931,5 +1050,5 @@ def update(self, other): for key in other: self[key] = other[key] - def cased_key(self, key): - return self.__casedkeys[key.lower()] \ No newline at end of file + def cased_key(self, key: str) -> Any: + return self.__casedkeys[key.lower()] diff --git a/pymongo/compression_support.py b/pymongo/compression_support.py index b6662f22f8..ad54d628bf 100644 --- a/pymongo/compression_support.py +++ b/pymongo/compression_support.py @@ -11,11 +11,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import warnings +from typing import Any, Iterable, Optional, Union try: import snappy + _HAVE_SNAPPY = True except ImportError: # python-snappy isn't available. @@ -23,6 +26,7 @@ try: import zlib + _HAVE_ZLIB = True except ImportError: # Python built without zlib support. @@ -30,21 +34,23 @@ try: from zstandard import ZstdCompressor, ZstdDecompressor + _HAVE_ZSTD = True except ImportError: _HAVE_ZSTD = False +from pymongo.hello import HelloCompat from pymongo.monitoring import _SENSITIVE_COMMANDS -_SUPPORTED_COMPRESSORS = set(["snappy", "zlib", "zstd"]) -_NO_COMPRESSION = set(['ismaster']) +_SUPPORTED_COMPRESSORS = {"snappy", "zlib", "zstd"} +_NO_COMPRESSION = {HelloCompat.CMD, HelloCompat.LEGACY_CMD} _NO_COMPRESSION.update(_SENSITIVE_COMMANDS) -def validate_compressors(dummy, value): +def validate_compressors(dummy: Any, value: Union[str, Iterable[str]]) -> list[str]: try: # `value` is string. - compressors = value.split(",") + compressors = value.split(",") # type: ignore[union-attr] except AttributeError: # `value` is an iterable. compressors = list(value) @@ -52,42 +58,49 @@ def validate_compressors(dummy, value): for compressor in compressors[:]: if compressor not in _SUPPORTED_COMPRESSORS: compressors.remove(compressor) - warnings.warn("Unsupported compressor: %s" % (compressor,)) + warnings.warn(f"Unsupported compressor: {compressor}", stacklevel=2) elif compressor == "snappy" and not _HAVE_SNAPPY: compressors.remove(compressor) warnings.warn( "Wire protocol compression with snappy is not available. " - "You must install the python-snappy module for snappy support.") + "You must install the python-snappy module for snappy support.", + stacklevel=2, + ) elif compressor == "zlib" and not _HAVE_ZLIB: compressors.remove(compressor) warnings.warn( "Wire protocol compression with zlib is not available. " - "The zlib module is not available.") + "The zlib module is not available.", + stacklevel=2, + ) elif compressor == "zstd" and not _HAVE_ZSTD: compressors.remove(compressor) warnings.warn( "Wire protocol compression with zstandard is not available. " - "You must install the zstandard module for zstandard support.") + "You must install the zstandard module for zstandard support.", + stacklevel=2, + ) return compressors -def validate_zlib_compression_level(option, value): +def validate_zlib_compression_level(option: str, value: Any) -> int: try: level = int(value) - except: - raise TypeError("%s must be an integer, not %r." % (option, value)) + except Exception: + raise TypeError(f"{option} must be an integer, not {value!r}.") from None if level < -1 or level > 9: - raise ValueError( - "%s must be between -1 and 9, not %d." % (option, level)) + raise ValueError("%s must be between -1 and 9, not %d." % (option, level)) return level -class CompressionSettings(object): - def __init__(self, compressors, zlib_compression_level): +class CompressionSettings: + def __init__(self, compressors: list[str], zlib_compression_level: int): self.compressors = compressors self.zlib_compression_level = zlib_compression_level - def get_compression_context(self, compressors): + def get_compression_context( + self, compressors: Optional[list[str]] + ) -> Union[SnappyContext, ZlibContext, ZstdContext, None]: if compressors: chosen = compressors[0] if chosen == "snappy": @@ -96,56 +109,44 @@ def get_compression_context(self, compressors): return ZlibContext(self.zlib_compression_level) elif chosen == "zstd": return ZstdContext() + return None + return None -def _zlib_no_compress(data): - """Compress data with zlib level 0.""" - cobj = zlib.compressobj(0) - return b"".join([cobj.compress(data), cobj.flush()]) - - -class SnappyContext(object): +class SnappyContext: compressor_id = 1 @staticmethod - def compress(data): + def compress(data: bytes) -> bytes: return snappy.compress(data) -class ZlibContext(object): +class ZlibContext: compressor_id = 2 - def __init__(self, level): - # Jython zlib.compress doesn't support -1 - if level == -1: - self.compress = zlib.compress - # Jython zlib.compress also doesn't support 0 - elif level == 0: - self.compress = _zlib_no_compress - else: - self.compress = lambda data: zlib.compress(data, level) + def __init__(self, level: int): + self.level = level + + def compress(self, data: bytes) -> bytes: + return zlib.compress(data, self.level) -class ZstdContext(object): +class ZstdContext: compressor_id = 3 @staticmethod - def compress(data): + def compress(data: bytes) -> bytes: # ZstdCompressor is not thread safe. # TODO: Use a pool? return ZstdCompressor().compress(data) -def decompress(data, compressor_id): +def decompress(data: bytes, compressor_id: int) -> bytes: if compressor_id == SnappyContext.compressor_id: # python-snappy doesn't support the buffer interface. # https://github.com/andrix/python-snappy/issues/65 # This only matters when data is a memoryview since # id(bytes(data)) == id(data) when data is a bytes. - # NOTE: bytes(memoryview) returns the memoryview repr - # in Python 2.7. The right thing to do in 2.7 is call - # memoryview.tobytes(), but we currently only use - # memoryview in Python 3.x. return snappy.uncompress(bytes(data)) elif compressor_id == ZlibContext.compressor_id: return zlib.decompress(data) diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 5de40518c5..6dfb3ba90b 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -13,44 +13,103 @@ # limitations under the License. """Cursor class to iterate over Mongo query results.""" +from __future__ import annotations import copy import warnings - from collections import deque - -from bson import RE_TYPE +from typing import ( + TYPE_CHECKING, + Any, + Generic, + Iterable, + List, + Mapping, + NoReturn, + Optional, + Sequence, + Tuple, + Union, + cast, + overload, +) + +from bson import RE_TYPE, _convert_raw_document_lists_to_streams from bson.code import Code -from bson.py3compat import (iteritems, - integer_types, - string_type) from bson.son import SON from pymongo import helpers -from pymongo.common import validate_boolean, validate_is_mapping from pymongo.collation import validate_collation_or_none -from pymongo.errors import (ConnectionFailure, - InvalidOperation, - NotMasterError, - OperationFailure) -from pymongo.message import (_CursorAddress, - _GetMore, - _RawBatchGetMore, - _Query, - _RawBatchQuery) -from pymongo.monitoring import ConnectionClosedReason - +from pymongo.common import ( + validate_boolean, + validate_is_document_type, + validate_is_mapping, +) +from pymongo.errors import ConnectionFailure, InvalidOperation, OperationFailure +from pymongo.lock import _create_lock +from pymongo.message import ( + _CursorAddress, + _GetMore, + _OpMsg, + _OpReply, + _Query, + _RawBatchGetMore, + _RawBatchQuery, +) +from pymongo.response import PinnedResponse +from pymongo.typings import _Address, _CollationIn, _DocumentOut, _DocumentType + +if TYPE_CHECKING: + from _typeshed import SupportsItems + + from bson.codec_options import CodecOptions + from pymongo.client_session import ClientSession + from pymongo.collection import Collection + from pymongo.pool import Connection + from pymongo.read_preferences import _ServerMode + + +# These errors mean that the server has already killed the cursor so there is +# no need to send killCursors. +_CURSOR_CLOSED_ERRORS = frozenset( + [ + 43, # CursorNotFound + 175, # QueryPlanKilled + 237, # CursorKilled + # On a tailable cursor, the following errors mean the capped collection + # rolled over. + # MongoDB 2.6: + # {'$err': 'Runner killed during getMore', 'code': 28617, 'ok': 0} + 28617, + # MongoDB 3.0: + # {'$err': 'getMore executor error: UnknownError no details available', + # 'code': 17406, 'ok': 0} + 17406, + # MongoDB 3.2 + 3.4: + # {'ok': 0.0, 'errmsg': 'GetMore command executor error: + # CappedPositionLost: CollectionScan died due to failure to restore + # tailable cursor position. Last seen record id: RecordId(3)', + # 'code': 96} + 96, + # MongoDB 3.6+: + # {'ok': 0.0, 'errmsg': 'errmsg: "CollectionScan died due to failure to + # restore tailable cursor position. Last seen record id: RecordId(3)"', + # 'code': 136, 'codeName': 'CappedPositionLost'} + 136, + ] +) _QUERY_OPTIONS = { "tailable_cursor": 2, - "slave_okay": 4, + "secondary_okay": 4, "oplog_replay": 8, "no_timeout": 16, "await_data": 32, "exhaust": 64, - "partial": 128} + "partial": 128, +} -class CursorType(object): +class CursorType: NON_TAILABLE = 0 """The standard cursor type.""" @@ -79,55 +138,78 @@ class CursorType(object): """ -# This has to be an old style class due to -# http://bugs.jython.org/issue1057 -class _SocketManager: - """Used with exhaust cursors to ensure the socket is returned. - """ - def __init__(self, sock, pool): - self.sock = sock - self.pool = pool - self.__closed = False +class _ConnectionManager: + """Used with exhaust cursors to ensure the connection is returned.""" - def __del__(self): - self.close() + def __init__(self, conn: Connection, more_to_come: bool): + self.conn: Optional[Connection] = conn + self.more_to_come = more_to_come + self.lock = _create_lock() - def close(self): - """Return this instance's socket to the connection pool. - """ - if not self.__closed: - self.__closed = True - self.pool.return_socket(self.sock) - self.sock, self.pool = None, None + def update_exhaust(self, more_to_come: bool) -> None: + self.more_to_come = more_to_come + def close(self) -> None: + """Return this instance's connection to the connection pool.""" + if self.conn: + self.conn.unpin() + self.conn = None + + +_Sort = Union[ + Sequence[Union[str, Tuple[str, Union[int, str, Mapping[str, Any]]]]], Mapping[str, Any] +] +_Hint = Union[str, _Sort] + + +class Cursor(Generic[_DocumentType]): + """A cursor / iterator over Mongo query results.""" -class Cursor(object): - """A cursor / iterator over Mongo query results. - """ _query_class = _Query _getmore_class = _GetMore - def __init__(self, collection, filter=None, projection=None, skip=0, - limit=0, no_cursor_timeout=False, - cursor_type=CursorType.NON_TAILABLE, - sort=None, allow_partial_results=False, oplog_replay=False, - modifiers=None, batch_size=0, manipulate=True, - collation=None, hint=None, max_scan=None, max_time_ms=None, - max=None, min=None, return_key=False, show_record_id=False, - snapshot=False, comment=None, session=None): + def __init__( + self, + collection: Collection[_DocumentType], + filter: Optional[Mapping[str, Any]] = None, + projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, + skip: int = 0, + limit: int = 0, + no_cursor_timeout: bool = False, + cursor_type: int = CursorType.NON_TAILABLE, + sort: Optional[_Sort] = None, + allow_partial_results: bool = False, + oplog_replay: bool = False, + batch_size: int = 0, + collation: Optional[_CollationIn] = None, + hint: Optional[_Hint] = None, + max_scan: Optional[int] = None, + max_time_ms: Optional[int] = None, + max: Optional[_Sort] = None, + min: Optional[_Sort] = None, + return_key: Optional[bool] = None, + show_record_id: Optional[bool] = None, + snapshot: Optional[bool] = None, + comment: Optional[Any] = None, + session: Optional[ClientSession] = None, + allow_disk_use: Optional[bool] = None, + let: Optional[bool] = None, + ) -> None: """Create a new cursor. Should not be called directly by application developers - see :meth:`~pymongo.collection.Collection.find` instead. - .. mongodoc:: cursors + .. seealso:: The MongoDB documentation on `cursors `_. """ # Initialize all attributes used in __del__ before possibly raising # an error to avoid attribute errors during garbage collection. - self.__id = None + self.__collection: Collection[_DocumentType] = collection + self.__id: Any = None self.__exhaust = False - self.__exhaust_mgr = None + self.__sock_mgr: Any = None self.__killed = False + self.__session: Optional[ClientSession] if session: self.__session = session @@ -136,62 +218,74 @@ def __init__(self, collection, filter=None, projection=None, skip=0, self.__session = None self.__explicit_session = False - spec = filter - if spec is None: - spec = {} - + spec: Mapping[str, Any] = filter or {} validate_is_mapping("filter", spec) if not isinstance(skip, int): raise TypeError("skip must be an instance of int") if not isinstance(limit, int): raise TypeError("limit must be an instance of int") validate_boolean("no_cursor_timeout", no_cursor_timeout) - if cursor_type not in (CursorType.NON_TAILABLE, CursorType.TAILABLE, - CursorType.TAILABLE_AWAIT, CursorType.EXHAUST): + if no_cursor_timeout and not self.__explicit_session: + warnings.warn( + "use an explicit session with no_cursor_timeout=True " + "otherwise the cursor may still timeout after " + "30 minutes, for more info see " + "https://mongodb.com/docs/v4.4/reference/method/" + "cursor.noCursorTimeout/" + "#session-idle-timeout-overrides-nocursortimeout", + UserWarning, + stacklevel=2, + ) + if cursor_type not in ( + CursorType.NON_TAILABLE, + CursorType.TAILABLE, + CursorType.TAILABLE_AWAIT, + CursorType.EXHAUST, + ): raise ValueError("not a valid value for cursor_type") validate_boolean("allow_partial_results", allow_partial_results) validate_boolean("oplog_replay", oplog_replay) - if modifiers is not None: - warnings.warn("the 'modifiers' parameter is deprecated", - DeprecationWarning, stacklevel=2) - validate_is_mapping("modifiers", modifiers) - if not isinstance(batch_size, integer_types): + if not isinstance(batch_size, int): raise TypeError("batch_size must be an integer") if batch_size < 0: raise ValueError("batch_size must be >= 0") + # Only set if allow_disk_use is provided by the user, else None. + if allow_disk_use is not None: + allow_disk_use = validate_boolean("allow_disk_use", allow_disk_use) if projection is not None: - if not projection: - projection = {"_id": 1} projection = helpers._fields_list_to_dict(projection, "projection") - self.__collection = collection + if let is not None: + validate_is_document_type("let", let) + + self.__let = let self.__spec = spec + self.__has_filter = filter is not None self.__projection = projection self.__skip = skip self.__limit = limit self.__batch_size = batch_size - self.__modifiers = modifiers and modifiers.copy() or {} self.__ordering = sort and helpers._index_document(sort) or None self.__max_scan = max_scan self.__explain = False self.__comment = comment self.__max_time_ms = max_time_ms - self.__max_await_time_ms = None - self.__max = max - self.__min = min - self.__manipulate = manipulate + self.__max_await_time_ms: Optional[int] = None + self.__max: Optional[Union[SON[Any, Any], _Sort]] = max + self.__min: Optional[Union[SON[Any, Any], _Sort]] = min self.__collation = validate_collation_or_none(collation) self.__return_key = return_key self.__show_record_id = show_record_id + self.__allow_disk_use = allow_disk_use self.__snapshot = snapshot + self.__hint: Union[str, SON[str, Any], None] self.__set_hint(hint) # Exhaust cursor support if cursor_type == CursorType.EXHAUST: if self.__collection.database.client.is_mongos: - raise InvalidOperation('Exhaust cursors are ' - 'not supported by mongos') + raise InvalidOperation("Exhaust cursors are not supported by mongos") if limit: raise InvalidOperation("Can't use limit and exhaust together.") self.__exhaust = True @@ -204,13 +298,13 @@ def __init__(self, collection, filter=None, projection=None, skip=0, # it anytime we change __limit. self.__empty = False - self.__data = deque() - self.__address = None + self.__data: deque = deque() + self.__address: Optional[_Address] = None self.__retrieved = 0 self.__codec_options = collection.codec_options # Read preference is set when the initial find is sent. - self.__read_preference = None + self.__read_preference: Optional[_ServerMode] = None self.__read_concern = collection.read_concern self.__query_flags = cursor_type @@ -221,23 +315,26 @@ def __init__(self, collection, filter=None, projection=None, skip=0, if oplog_replay: self.__query_flags |= _QUERY_OPTIONS["oplog_replay"] + # The namespace to use for find/getMore commands. + self.__dbname = collection.database.name + self.__collname = collection.name + @property - def collection(self): + def collection(self) -> Collection[_DocumentType]: """The :class:`~pymongo.collection.Collection` that this :class:`Cursor` is iterating. """ return self.__collection @property - def retrieved(self): - """The number of documents retrieved so far. - """ + def retrieved(self) -> int: + """The number of documents retrieved so far.""" return self.__retrieved - def __del__(self): + def __del__(self) -> None: self.__die() - def rewind(self): + def rewind(self) -> Cursor[_DocumentType]: """Rewind this cursor to its unevaluated state. Reset this cursor if it has been partially or completely evaluated. @@ -246,6 +343,7 @@ def rewind(self): be sent to the server, even if the resultant data has already been retrieved by this cursor. """ + self.close() self.__data = deque() self.__id = None self.__address = None @@ -254,7 +352,7 @@ def rewind(self): return self - def clone(self): + def clone(self) -> Cursor[_DocumentType]: """Get a clone of this cursor. Returns a new Cursor instance with options matching those that have @@ -264,7 +362,7 @@ def clone(self): """ return self._clone(True) - def _clone(self, deepcopy=True, base=None): + def _clone(self, deepcopy: bool = True, base: Optional[Cursor] = None) -> Cursor: """Internal clone helper.""" if not base: if self.__explicit_session: @@ -272,26 +370,47 @@ def _clone(self, deepcopy=True, base=None): else: base = self._clone_base(None) - values_to_clone = ("spec", "projection", "skip", "limit", - "max_time_ms", "max_await_time_ms", "comment", - "max", "min", "ordering", "explain", "hint", - "batch_size", "max_scan", "manipulate", - "query_flags", "modifiers", "collation") - data = dict((k, v) for k, v in iteritems(self.__dict__) - if k.startswith('_Cursor__') and k[9:] in values_to_clone) + values_to_clone = ( + "spec", + "projection", + "skip", + "limit", + "max_time_ms", + "max_await_time_ms", + "comment", + "max", + "min", + "ordering", + "explain", + "hint", + "batch_size", + "max_scan", + "query_flags", + "collation", + "empty", + "show_record_id", + "return_key", + "allow_disk_use", + "snapshot", + "exhaust", + "has_filter", + ) + data = { + k: v + for k, v in self.__dict__.items() + if k.startswith("_Cursor__") and k[9:] in values_to_clone + } if deepcopy: data = self._deepcopy(data) base.__dict__.update(data) return base - def _clone_base(self, session): - """Creates an empty Cursor object for information to be copied into. - """ + def _clone_base(self, session: Optional[ClientSession]) -> Cursor: + """Creates an empty Cursor object for information to be copied into.""" return self.__class__(self.__collection, session=session) - def __die(self, synchronous=False): - """Closes this cursor. - """ + def __die(self, synchronous: bool = False) -> None: + """Closes this cursor.""" try: already_killed = self.__killed except AttributeError: @@ -300,43 +419,40 @@ def __die(self, synchronous=False): self.__killed = True if self.__id and not already_killed: - if self.__exhaust and self.__exhaust_mgr: - # If this is an exhaust cursor and we haven't completely - # exhausted the result set we *must* close the socket - # to stop the server from sending more data. - self.__exhaust_mgr.sock.close_socket( - ConnectionClosedReason.ERROR) - else: - address = _CursorAddress( - self.__address, self.__collection.full_name) - if synchronous: - self.__collection.database.client._close_cursor_now( - self.__id, address, session=self.__session) - else: - # The cursor will be closed later in a different session. - self.__collection.database.client._close_cursor( - self.__id, address) - if self.__exhaust and self.__exhaust_mgr: - self.__exhaust_mgr.close() - if self.__session and not self.__explicit_session: - self.__session._end_session(lock=synchronous) + cursor_id = self.__id + assert self.__address is not None + address = _CursorAddress(self.__address, f"{self.__dbname}.{self.__collname}") + else: + # Skip killCursors. + cursor_id = 0 + address = None + self.__collection.database.client._cleanup_cursor( + synchronous, + cursor_id, + address, + self.__sock_mgr, + self.__session, + self.__explicit_session, + ) + if not self.__explicit_session: self.__session = None + self.__sock_mgr = None - def close(self): - """Explicitly close / kill this cursor. - """ + def close(self) -> None: + """Explicitly close / kill this cursor.""" self.__die(True) - def __query_spec(self): - """Get the spec to use for a query. - """ - operators = self.__modifiers.copy() + def __query_spec(self) -> Mapping[str, Any]: + """Get the spec to use for a query.""" + operators: dict[str, Any] = {} if self.__ordering: operators["$orderby"] = self.__ordering if self.__explain: operators["$explain"] = True if self.__hint: operators["$hint"] = self.__hint + if self.__let: + operators["let"] = self.__let if self.__comment: operators["$comment"] = self.__comment if self.__max_scan: @@ -347,19 +463,19 @@ def __query_spec(self): operators["$max"] = self.__max if self.__min: operators["$min"] = self.__min - if self.__return_key: + if self.__return_key is not None: operators["$returnKey"] = self.__return_key - if self.__show_record_id: + if self.__show_record_id is not None: # This is upgraded to showRecordId for MongoDB 3.2+ "find" command. operators["$showDiskLoc"] = self.__show_record_id - if self.__snapshot: + if self.__snapshot is not None: operators["$snapshot"] = self.__snapshot if operators: # Make a shallow copy so we can cleanly rewind or clone. - spec = self.__spec.copy() + spec = copy.copy(self.__spec) - # White-listed commands must be wrapped in $query. + # Allow-listed commands must be wrapped in $query. if "$query" not in spec: # $query has to come first spec = SON([("$query", spec)]) @@ -376,20 +492,19 @@ def __query_spec(self): # that breaks commands like count and find_and_modify. # Checking spec.keys()[0] covers the case that the spec # was passed as an instance of SON or OrderedDict. - elif ("query" in self.__spec and - (len(self.__spec) == 1 or - next(iter(self.__spec)) == "query")): + elif "query" in self.__spec and ( + len(self.__spec) == 1 or next(iter(self.__spec)) == "query" + ): return SON({"$query": self.__spec}) return self.__spec - def __check_okay_to_chain(self): - """Check if it is okay to chain more options onto this cursor. - """ + def __check_okay_to_chain(self) -> None: + """Check if it is okay to chain more options onto this cursor.""" if self.__retrieved or self.__id is not None: raise InvalidOperation("cannot set options after executing query") - def add_option(self, mask): + def add_option(self, mask: int) -> Cursor[_DocumentType]: """Set arbitrary query flags using a bitmask. To set the tailable flag: @@ -403,14 +518,13 @@ def add_option(self, mask): if self.__limit: raise InvalidOperation("Can't use limit and exhaust together.") if self.__collection.database.client.is_mongos: - raise InvalidOperation('Exhaust cursors are ' - 'not supported by mongos') + raise InvalidOperation("Exhaust cursors are not supported by mongos") self.__exhaust = True self.__query_flags |= mask return self - def remove_option(self, mask): + def remove_option(self, mask: int) -> Cursor[_DocumentType]: """Unset arbitrary query flags using a bitmask. To unset the tailable flag: @@ -426,7 +540,29 @@ def remove_option(self, mask): self.__query_flags &= ~mask return self - def limit(self, limit): + def allow_disk_use(self, allow_disk_use: bool) -> Cursor[_DocumentType]: + """Specifies whether MongoDB can use temporary disk files while + processing a blocking sort operation. + + Raises :exc:`TypeError` if `allow_disk_use` is not a boolean. + + .. note:: `allow_disk_use` requires server version **>= 4.4** + + :Parameters: + - `allow_disk_use`: if True, MongoDB may use temporary + disk files to store data exceeding the system memory limit while + processing a blocking sort operation. + + .. versionadded:: 3.11 + """ + if not isinstance(allow_disk_use, bool): + raise TypeError("allow_disk_use must be a bool") + self.__check_okay_to_chain() + + self.__allow_disk_use = allow_disk_use + return self + + def limit(self, limit: int) -> Cursor[_DocumentType]: """Limits the number of results to be returned by this cursor. Raises :exc:`TypeError` if `limit` is not an integer. Raises @@ -437,9 +573,9 @@ def limit(self, limit): :Parameters: - `limit`: the number of results to return - .. mongodoc:: limit + .. seealso:: The MongoDB documentation on `limit `_. """ - if not isinstance(limit, integer_types): + if not isinstance(limit, int): raise TypeError("limit must be an integer") if self.__exhaust: raise InvalidOperation("Can't use limit and exhaust together.") @@ -449,7 +585,7 @@ def limit(self, limit): self.__limit = limit return self - def batch_size(self, batch_size): + def batch_size(self, batch_size: int) -> Cursor[_DocumentType]: """Limits the number of documents returned in one batch. Each batch requires a round trip to the server. It can be adjusted to optimize performance and limit data transfer. @@ -468,7 +604,7 @@ def batch_size(self, batch_size): :Parameters: - `batch_size`: The size of each batch of results requested. """ - if not isinstance(batch_size, integer_types): + if not isinstance(batch_size, int): raise TypeError("batch_size must be an integer") if batch_size < 0: raise ValueError("batch_size must be >= 0") @@ -477,7 +613,7 @@ def batch_size(self, batch_size): self.__batch_size = batch_size return self - def skip(self, skip): + def skip(self, skip: int) -> Cursor[_DocumentType]: """Skips the first `skip` results of this cursor. Raises :exc:`TypeError` if `skip` is not an integer. Raises @@ -489,7 +625,7 @@ def skip(self, skip): :Parameters: - `skip`: the number of results to skip """ - if not isinstance(skip, integer_types): + if not isinstance(skip, int): raise TypeError("skip must be an integer") if skip < 0: raise ValueError("skip must be >= 0") @@ -498,7 +634,7 @@ def skip(self, skip): self.__skip = skip return self - def max_time_ms(self, max_time_ms): + def max_time_ms(self, max_time_ms: Optional[int]) -> Cursor[_DocumentType]: """Specifies a time limit for a query operation. If the specified time is exceeded, the operation will be aborted and :exc:`~pymongo.errors.ExecutionTimeout` is raised. If `max_time_ms` @@ -511,15 +647,14 @@ def max_time_ms(self, max_time_ms): :Parameters: - `max_time_ms`: the time limit after which the operation is aborted """ - if (not isinstance(max_time_ms, integer_types) - and max_time_ms is not None): + if not isinstance(max_time_ms, int) and max_time_ms is not None: raise TypeError("max_time_ms must be an integer or None") self.__check_okay_to_chain() self.__max_time_ms = max_time_ms return self - def max_await_time_ms(self, max_await_time_ms): + def max_await_time_ms(self, max_await_time_ms: Optional[int]) -> Cursor[_DocumentType]: """Specifies a time limit for a getMore operation on a :attr:`~pymongo.cursor.CursorType.TAILABLE_AWAIT` cursor. For all other types of cursor max_await_time_ms is ignored. @@ -536,8 +671,7 @@ def max_await_time_ms(self, max_await_time_ms): .. versionadded:: 3.2 """ - if (not isinstance(max_await_time_ms, integer_types) - and max_await_time_ms is not None): + if not isinstance(max_await_time_ms, int) and max_await_time_ms is not None: raise TypeError("max_await_time_ms must be an integer or None") self.__check_okay_to_chain() @@ -547,9 +681,29 @@ def max_await_time_ms(self, max_await_time_ms): return self - def __getitem__(self, index): + @overload + def __getitem__(self, index: int) -> _DocumentType: + ... + + @overload + def __getitem__(self, index: slice) -> Cursor[_DocumentType]: + ... + + def __getitem__(self, index: Union[int, slice]) -> Union[_DocumentType, Cursor[_DocumentType]]: """Get a single document or a slice of documents from this cursor. + .. warning:: A :class:`~Cursor` is not a Python :class:`list`. Each + index access or slice requires that a new query be run using skip + and limit. Do not iterate the cursor using index accesses. + The following example is **extremely inefficient** and may return + surprising results:: + + cursor = db.collection.find() + # Warning: This runs a new query for each document. + # Don't do this! + for idx in range(10): + print(cursor[idx]) + Raises :class:`~pymongo.errors.InvalidOperation` if this cursor has already been used. @@ -585,15 +739,15 @@ def __getitem__(self, index): skip = 0 if index.start is not None: if index.start < 0: - raise IndexError("Cursor instances do not support " - "negative indices") + raise IndexError("Cursor instances do not support negative indices") skip = index.start if index.stop is not None: limit = index.stop - skip if limit < 0: - raise IndexError("stop index must be greater than start " - "index for slice %r" % index) + raise IndexError( + "stop index must be greater than start index for slice %r" % index + ) if limit == 0: self.__empty = True else: @@ -603,10 +757,9 @@ def __getitem__(self, index): self.__limit = limit return self - if isinstance(index, integer_types): + if isinstance(index, int): if index < 0: - raise IndexError("Cursor instances do not support negative " - "indices") + raise IndexError("Cursor instances do not support negative indices") clone = self.clone() clone.skip(index + self.__skip) clone.limit(-1) # use a hard limit @@ -614,10 +767,9 @@ def __getitem__(self, index): for doc in clone: return doc raise IndexError("no such item for Cursor instance") - raise TypeError("index %r cannot be applied to Cursor " - "instances" % index) + raise TypeError("index %r cannot be applied to Cursor instances" % index) - def max_scan(self, max_scan): + def max_scan(self, max_scan: Optional[int]) -> Cursor[_DocumentType]: """**DEPRECATED** - Limit the number of documents to scan when performing the query. @@ -637,7 +789,7 @@ def max_scan(self, max_scan): self.__max_scan = max_scan return self - def max(self, spec): + def max(self, spec: _Sort) -> Cursor[_DocumentType]: """Adds ``max`` operator that specifies upper bound for specific index. When using ``max``, :meth:`~hint` should also be configured to ensure @@ -660,7 +812,7 @@ def max(self, spec): self.__max = SON(spec) return self - def min(self, spec): + def min(self, spec: _Sort) -> Cursor[_DocumentType]: """Adds ``min`` operator that specifies lower bound for specific index. When using ``min``, :meth:`~hint` should also be configured to ensure @@ -683,24 +835,26 @@ def min(self, spec): self.__min = SON(spec) return self - def sort(self, key_or_list, direction=None): + def sort( + self, key_or_list: _Hint, direction: Optional[Union[int, str]] = None + ) -> Cursor[_DocumentType]: """Sorts this cursor's results. Pass a field name and a direction, either - :data:`~pymongo.ASCENDING` or :data:`~pymongo.DESCENDING`:: + :data:`~pymongo.ASCENDING` or :data:`~pymongo.DESCENDING`.:: for doc in collection.find().sort('field', pymongo.ASCENDING): print(doc) - To sort by multiple fields, pass a list of (key, direction) pairs:: + To sort by multiple fields, pass a list of (key, direction) pairs. + If just a name is given, :data:`~pymongo.ASCENDING` will be inferred:: for doc in collection.find().sort([ - ('field1', pymongo.ASCENDING), + 'field1', ('field2', pymongo.DESCENDING)]): print(doc) - Beginning with MongoDB version 2.6, text search results can be - sorted by relevance:: + Text search results can be sorted by relevance:: cursor = db.test.find( {'$text': {'$search': 'some words'}}, @@ -712,6 +866,9 @@ def sort(self, key_or_list, direction=None): for doc in cursor: print(doc) + For more advanced text search functionality, see MongoDB's + `Atlas Search `_. + Raises :class:`~pymongo.errors.InvalidOperation` if this cursor has already been used. Only the last :meth:`sort` applied to this cursor has any effect. @@ -727,71 +884,12 @@ def sort(self, key_or_list, direction=None): self.__ordering = helpers._index_document(keys) return self - def count(self, with_limit_and_skip=False): - """**DEPRECATED** - Get the size of the results set for this query. - - The :meth:`count` method is deprecated and **not** supported in a - transaction. Please use - :meth:`~pymongo.collection.Collection.count_documents` instead. - - Returns the number of documents in the results set for this query. Does - not take :meth:`limit` and :meth:`skip` into account by default - set - `with_limit_and_skip` to ``True`` if that is the desired behavior. - Raises :class:`~pymongo.errors.OperationFailure` on a database error. - - When used with MongoDB >= 2.6, :meth:`~count` uses any :meth:`~hint` - applied to the query. In the following example the hint is passed to - the count command: - - collection.find({'field': 'value'}).hint('field_1').count() - - The :meth:`count` method obeys the - :attr:`~pymongo.collection.Collection.read_preference` of the - :class:`~pymongo.collection.Collection` instance on which - :meth:`~pymongo.collection.Collection.find` was called. - - :Parameters: - - `with_limit_and_skip` (optional): take any :meth:`limit` or - :meth:`skip` that has been applied to this cursor into account when - getting the count - - .. note:: The `with_limit_and_skip` parameter requires server - version **>= 1.1.4-** - - .. versionchanged:: 3.7 - Deprecated. - - .. versionchanged:: 2.8 - The :meth:`~count` method now supports :meth:`~hint`. - """ - warnings.warn("count is deprecated. Use Collection.count_documents " - "instead.", DeprecationWarning, stacklevel=2) - validate_boolean("with_limit_and_skip", with_limit_and_skip) - cmd = SON([("count", self.__collection.name), - ("query", self.__spec)]) - if self.__max_time_ms is not None: - cmd["maxTimeMS"] = self.__max_time_ms - if self.__comment: - cmd["comment"] = self.__comment - - if self.__hint is not None: - cmd["hint"] = self.__hint - - if with_limit_and_skip: - if self.__limit: - cmd["limit"] = self.__limit - if self.__skip: - cmd["skip"] = self.__skip - - return self.__collection._count( - cmd, self.__collation, session=self.__session) - - def distinct(self, key): + def distinct(self, key: str) -> list: """Get a list of distinct values for `key` among all documents in the result set of this query. Raises :class:`TypeError` if `key` is not an instance of - :class:`basestring` (:class:`str` in python 3). + :class:`str`. The :meth:`distinct` method obeys the :attr:`~pymongo.collection.Collection.read_preference` of the @@ -803,30 +901,29 @@ def distinct(self, key): .. seealso:: :meth:`pymongo.collection.Collection.distinct` """ - options = {} + options: dict[str, Any] = {} if self.__spec: options["query"] = self.__spec if self.__max_time_ms is not None: - options['maxTimeMS'] = self.__max_time_ms + options["maxTimeMS"] = self.__max_time_ms if self.__comment: - options['comment'] = self.__comment + options["comment"] = self.__comment if self.__collation is not None: - options['collation'] = self.__collation + options["collation"] = self.__collation - return self.__collection.distinct( - key, session=self.__session, **options) + return self.__collection.distinct(key, session=self.__session, **options) - def explain(self): + def explain(self) -> _DocumentType: """Returns an explain plan record for this cursor. - .. note:: Starting with MongoDB 3.2 :meth:`explain` uses - the default verbosity mode of the `explain command - `_, + .. note:: This method uses the default verbosity mode of the + `explain command + `_, ``allPlansExecution``. To use a different verbosity use :meth:`~pymongo.database.Database.command` to run the explain command directly. - .. mongodoc:: explain + .. seealso:: The MongoDB documentation on `explain `_. """ c = self.clone() c.__explain = True @@ -836,17 +933,17 @@ def explain(self): c.__limit = -abs(c.__limit) return next(c) - def __set_hint(self, index): + def __set_hint(self, index: Optional[_Hint]) -> None: if index is None: self.__hint = None return - if isinstance(index, string_type): + if isinstance(index, str): self.__hint = index else: self.__hint = helpers._index_document(index) - def hint(self, index): + def hint(self, index: Optional[_Hint]) -> Cursor[_DocumentType]: """Adds a 'hint', telling Mongo the proper index to use for the query. Judicious use of hints can greatly improve query @@ -866,18 +963,15 @@ def hint(self, index): :Parameters: - `index`: index to hint on (as an index specifier) - - .. versionchanged:: 2.8 - The :meth:`~hint` method accepts the name of the index. """ self.__check_okay_to_chain() self.__set_hint(index) return self - def comment(self, comment): + def comment(self, comment: Any) -> Cursor[_DocumentType]: """Adds a 'comment' to the cursor. - http://docs.mongodb.org/manual/reference/operator/comment/ + http://mongodb.com/docs/manual/reference/operator/comment/ :Parameters: - `comment`: A string to attach to the query to help interpret and @@ -889,38 +983,52 @@ def comment(self, comment): self.__comment = comment return self - def where(self, code): - """Adds a $where clause to this query. + def where(self, code: Union[str, Code]) -> Cursor[_DocumentType]: + """Adds a `$where`_ clause to this query. + + The `code` argument must be an instance of :class:`str` or + :class:`~bson.code.Code` containing a JavaScript expression. + This expression will be evaluated for each document scanned. + Only those documents for which the expression evaluates to + *true* will be returned as results. The keyword *this* refers + to the object currently being scanned. For example:: - The `code` argument must be an instance of :class:`basestring` - (:class:`str` in python 3) or :class:`~bson.code.Code` - containing a JavaScript expression. This expression will be - evaluated for each document scanned. Only those documents - for which the expression evaluates to *true* will be returned - as results. The keyword *this* refers to the object currently - being scanned. + # Find all documents where field "a" is less than "b" plus "c". + for doc in db.test.find().where('this.a < (this.b + this.c)'): + print(doc) Raises :class:`TypeError` if `code` is not an instance of - :class:`basestring` (:class:`str` in python 3). Raises - :class:`~pymongo.errors.InvalidOperation` if this + :class:`str`. Raises :class:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has already been used. Only the last call to :meth:`where` applied to a :class:`Cursor` has any effect. + .. note:: MongoDB 4.4 drops support for :class:`~bson.code.Code` + with scope variables. Consider using `$expr`_ instead. + :Parameters: - `code`: JavaScript expression to use as a filter + + .. _$expr: https://mongodb.com/docs/manual/reference/operator/query/expr/ + .. _$where: https://mongodb.com/docs/manual/reference/operator/query/where/ """ self.__check_okay_to_chain() if not isinstance(code, Code): code = Code(code) - self.__spec["$where"] = code + # Avoid overwriting a filter argument that was given by the user + # when updating the spec. + spec: dict[str, Any] + if self.__has_filter: + spec = dict(self.__spec) + else: + spec = cast(dict, self.__spec) + spec["$where"] = code + self.__spec = spec return self - def collation(self, collation): + def collation(self, collation: Optional[_CollationIn]) -> Cursor[_DocumentType]: """Adds a :class:`~pymongo.collation.Collation` to this query. - This option is only supported on MongoDB 3.4 and above. - Raises :exc:`TypeError` if `collation` is not an instance of :class:`~pymongo.collation.Collation` or a ``dict``. Raises :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has @@ -934,7 +1042,7 @@ def collation(self, collation): self.__collation = validate_collation_or_none(collation) return self - def __send_message(self, operation): + def __send_message(self, operation: Union[_Query, _GetMore]) -> None: """Send a query or getmore operation and handles the response. If operation is ``None`` this is an exhaust cursor, which reads @@ -946,64 +1054,57 @@ def __send_message(self, operation): client = self.__collection.database.client # OP_MSG is required to support exhaust cursors with encryption. if client._encrypter and self.__exhaust: - raise InvalidOperation( - "exhaust cursors do not support auto encryption") + raise InvalidOperation("exhaust cursors do not support auto encryption") try: - response = client._run_operation_with_response( - operation, self._unpack_response, exhaust=self.__exhaust, - address=self.__address) - except OperationFailure: - self.__killed = True - - # Make sure exhaust socket is returned immediately, if necessary. - self.__die() - + response = client._run_operation( + operation, self._unpack_response, address=self.__address + ) + except OperationFailure as exc: + if exc.code in _CURSOR_CLOSED_ERRORS or self.__exhaust: + # Don't send killCursors because the cursor is already closed. + self.__killed = True + if exc.timeout: + self.__die(False) + else: + self.close() # If this is a tailable cursor the error is likely # due to capped collection roll over. Setting # self.__killed to True ensures Cursor.alive will be # False. No need to re-raise. - if self.__query_flags & _QUERY_OPTIONS["tailable_cursor"]: + if ( + exc.code in _CURSOR_CLOSED_ERRORS + and self.__query_flags & _QUERY_OPTIONS["tailable_cursor"] + ): return raise - except NotMasterError: - # Don't send kill cursors to another server after a "not master" - # error. It's completely pointless. - self.__killed = True - - # Make sure exhaust socket is returned immediately, if necessary. - self.__die() - - raise except ConnectionFailure: - # Don't try to send kill cursors on another socket - # or to another server. It can cause a _pinValue - # assertion on some server releases if we get here - # due to a socket timeout. self.__killed = True - self.__die() + self.close() raise except Exception: - # Close the cursor - self.__die() + self.close() raise self.__address = response.address - if self.__exhaust and not self.__exhaust_mgr: - # 'response' is an ExhaustResponse. - self.__exhaust_mgr = _SocketManager(response.socket_info, - response.pool) + if isinstance(response, PinnedResponse): + if not self.__sock_mgr: + self.__sock_mgr = _ConnectionManager(response.conn, response.more_to_come) cmd_name = operation.name docs = response.docs if response.from_command: if cmd_name != "explain": - cursor = docs[0]['cursor'] - self.__id = cursor['id'] - if cmd_name == 'find': - documents = cursor['firstBatch'] + cursor = docs[0]["cursor"] + self.__id = cursor["id"] + if cmd_name == "find": + documents = cursor["firstBatch"] + # Update the namespace used for future getMore commands. + ns = cursor.get("ns") + if ns: + self.__dbname, self.__collname = ns.split(".", 1) else: - documents = cursor['nextBatch'] + documents = cursor["nextBatch"] self.__data = deque(documents) self.__retrieved += len(documents) else: @@ -1011,32 +1112,36 @@ def __send_message(self, operation): self.__data = deque(docs) self.__retrieved += len(docs) else: + assert isinstance(response.data, _OpReply) self.__id = response.data.cursor_id self.__data = deque(docs) self.__retrieved += response.data.number_returned if self.__id == 0: - self.__killed = True # Don't wait for garbage collection to call __del__, return the # socket and the session to the pool now. - self.__die() + self.close() if self.__limit and self.__id and self.__limit <= self.__retrieved: - self.__die() - - def _unpack_response(self, response, cursor_id, codec_options, - user_fields=None, legacy_response=False): - return response.unpack_response(cursor_id, codec_options, user_fields, - legacy_response) - - def _read_preference(self): + self.close() + + def _unpack_response( + self, + response: Union[_OpReply, _OpMsg], + cursor_id: Optional[int], + codec_options: CodecOptions, + user_fields: Optional[Mapping[str, Any]] = None, + legacy_response: bool = False, + ) -> Sequence[_DocumentOut]: + return response.unpack_response(cursor_id, codec_options, user_fields, legacy_response) + + def _read_preference(self) -> _ServerMode: if self.__read_preference is None: # Save the read preference for getMore commands. - self.__read_preference = self.__collection._read_preference_for( - self.session) + self.__read_preference = self.__collection._read_preference_for(self.session) return self.__read_preference - def _refresh(self): + def _refresh(self) -> int: """Refreshes the cursor with more data from Mongo. Returns the length of self.__data after refresh. Will exit early if @@ -1051,25 +1156,28 @@ def _refresh(self): if self.__id is None: # Query if (self.__min or self.__max) and not self.__hint: - warnings.warn("using a min/max query operator without " - "specifying a Cursor.hint is deprecated. A " - "hint will be required when using min/max in " - "PyMongo 4.0", - DeprecationWarning, stacklevel=3) - q = self._query_class(self.__query_flags, - self.__collection.database.name, - self.__collection.name, - self.__skip, - self.__query_spec(), - self.__projection, - self.__codec_options, - self._read_preference(), - self.__limit, - self.__batch_size, - self.__read_concern, - self.__collation, - self.__session, - self.__collection.database.client) + raise InvalidOperation( + "Passing a 'hint' is required when using the min/max query" + " option to ensure the query utilizes the correct index" + ) + q = self._query_class( + self.__query_flags, + self.__collection.database.name, + self.__collection.name, + self.__skip, + self.__query_spec(), + self.__projection, + self.__codec_options, + self._read_preference(), + self.__limit, + self.__batch_size, + self.__read_concern, + self.__collation, + self.__session, + self.__collection.database.client, + self.__allow_disk_use, + self.__exhaust, + ) self.__send_message(q) elif self.__id: # Get More if self.__limit: @@ -1078,28 +1186,31 @@ def _refresh(self): limit = min(limit, self.__batch_size) else: limit = self.__batch_size - # Exhaust cursors don't send getMore messages. - g = self._getmore_class(self.__collection.database.name, - self.__collection.name, - limit, - self.__id, - self.__codec_options, - self._read_preference(), - self.__session, - self.__collection.database.client, - self.__max_await_time_ms, - self.__exhaust_mgr) + g = self._getmore_class( + self.__dbname, + self.__collname, + limit, + self.__id, + self.__codec_options, + self._read_preference(), + self.__session, + self.__collection.database.client, + self.__max_await_time_ms, + self.__sock_mgr, + self.__exhaust, + self.__comment, + ) self.__send_message(g) return len(self.__data) @property - def alive(self): + def alive(self) -> bool: """Does this cursor have the potential to return more data? This is mostly useful with `tailable cursors - `_ + `_ since they will stop iterating even though they *may* return more results in the future. @@ -1117,19 +1228,15 @@ def alive(self): return bool(len(self.__data) or (not self.__killed)) @property - def cursor_id(self): + def cursor_id(self) -> Optional[int]: """Returns the id of the cursor - Useful if you need to manage cursor ids and want to handle killing - cursors manually using - :meth:`~pymongo.mongo_client.MongoClient.kill_cursors` - .. versionadded:: 2.2 """ return self.__id @property - def address(self): + def address(self) -> Optional[tuple[str, Any]]: """The (host, port) of the server used, or None. .. versionchanged:: 3.0 @@ -1138,121 +1245,134 @@ def address(self): return self.__address @property - def session(self): + def session(self) -> Optional[ClientSession]: """The cursor's :class:`~pymongo.client_session.ClientSession`, or None. .. versionadded:: 3.6 """ if self.__explicit_session: return self.__session + return None - def __iter__(self): + def __iter__(self) -> Cursor[_DocumentType]: return self - def next(self): + def next(self) -> _DocumentType: """Advance the cursor.""" if self.__empty: raise StopIteration if len(self.__data) or self._refresh(): - if self.__manipulate: - _db = self.__collection.database - return _db._fix_outgoing(self.__data.popleft(), - self.__collection) - else: - return self.__data.popleft() + return self.__data.popleft() else: raise StopIteration __next__ = next - def __enter__(self): + def __enter__(self) -> Cursor[_DocumentType]: return self - def __exit__(self, exc_type, exc_val, exc_tb): + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: self.close() - def __copy__(self): + def __copy__(self) -> Cursor[_DocumentType]: """Support function for `copy.copy()`. .. versionadded:: 2.4 """ return self._clone(deepcopy=False) - def __deepcopy__(self, memo): + def __deepcopy__(self, memo: Any) -> Any: """Support function for `copy.deepcopy()`. .. versionadded:: 2.4 """ return self._clone(deepcopy=True) - def _deepcopy(self, x, memo=None): + @overload + def _deepcopy(self, x: Iterable, memo: Optional[dict[int, Union[list, dict]]] = None) -> list: + ... + + @overload + def _deepcopy( + self, x: SupportsItems, memo: Optional[dict[int, Union[list, dict]]] = None + ) -> dict: + ... + + def _deepcopy( + self, x: Union[Iterable, SupportsItems], memo: Optional[dict[int, Union[list, dict]]] = None + ) -> Union[list, dict]: """Deepcopy helper for the data dictionary or list. Regular expressions cannot be deep copied but as they are immutable we don't have to copy them when cloning. """ - if not hasattr(x, 'items'): + y: Union[list, dict] + iterator: Iterable[tuple[Any, Any]] + if not hasattr(x, "items"): y, is_list, iterator = [], True, enumerate(x) else: - y, is_list, iterator = {}, False, iteritems(x) - + y, is_list, iterator = {}, False, cast("SupportsItems", x).items() if memo is None: memo = {} val_id = id(x) if val_id in memo: - return memo.get(val_id) + return memo[val_id] memo[val_id] = y for key, value in iterator: if isinstance(value, (dict, list)) and not isinstance(value, SON): - value = self._deepcopy(value, memo) + value = self._deepcopy(value, memo) # noqa: PLW2901 elif not isinstance(value, RE_TYPE): - value = copy.deepcopy(value, memo) + value = copy.deepcopy(value, memo) # noqa: PLW2901 if is_list: - y.append(value) + y.append(value) # type: ignore[union-attr] else: if not isinstance(key, RE_TYPE): - key = copy.deepcopy(key, memo) + key = copy.deepcopy(key, memo) # noqa: PLW2901 y[key] = value return y -class RawBatchCursor(Cursor): +class RawBatchCursor(Cursor, Generic[_DocumentType]): """A cursor / iterator over raw batches of BSON data from a query result.""" _query_class = _RawBatchQuery _getmore_class = _RawBatchGetMore - def __init__(self, *args, **kwargs): + def __init__(self, collection: Collection[_DocumentType], *args: Any, **kwargs: Any) -> None: """Create a new cursor / iterator over raw batches of BSON data. Should not be called directly by application developers - see :meth:`~pymongo.collection.Collection.find_raw_batches` instead. - .. mongodoc:: cursors + .. seealso:: The MongoDB documentation on `cursors `_. """ - manipulate = kwargs.get('manipulate') - kwargs['manipulate'] = False - super(RawBatchCursor, self).__init__(*args, **kwargs) - - # Throw only after cursor's initialized, to prevent errors in __del__. - if manipulate: - raise InvalidOperation( - "Cannot use RawBatchCursor with manipulate=True") - - def _unpack_response(self, response, cursor_id, codec_options, - user_fields=None, legacy_response=False): - return response.raw_response(cursor_id) - - def explain(self): + super().__init__(collection, *args, **kwargs) + + def _unpack_response( + self, + response: Union[_OpReply, _OpMsg], + cursor_id: Optional[int], + codec_options: CodecOptions[Mapping[str, Any]], + user_fields: Optional[Mapping[str, Any]] = None, + legacy_response: bool = False, + ) -> list[_DocumentOut]: + raw_response = response.raw_response(cursor_id, user_fields=user_fields) + if not legacy_response: + # OP_MSG returns firstBatch/nextBatch documents as a BSON array + # Re-assemble the array of documents into a document stream + _convert_raw_document_lists_to_streams(raw_response[0]) + return cast(List["_DocumentOut"], raw_response) + + def explain(self) -> _DocumentType: """Returns an explain plan record for this cursor. - .. mongodoc:: explain + .. seealso:: The MongoDB documentation on `explain `_. """ clone = self._clone(deepcopy=True, base=Cursor(self.collection)) return clone.explain() - def __getitem__(self, index): + def __getitem__(self, index: Any) -> NoReturn: raise InvalidOperation("Cannot call __getitem__ on RawBatchCursor") diff --git a/pymongo/cursor_manager.py b/pymongo/cursor_manager.py deleted file mode 100644 index c05cf301e7..0000000000 --- a/pymongo/cursor_manager.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright 2009-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""DEPRECATED - A manager to handle when cursors are killed after they are -closed. - -New cursor managers should be defined as subclasses of CursorManager and can be -installed on a client by calling -:meth:`~pymongo.mongo_client.MongoClient.set_cursor_manager`. - -.. versionchanged:: 3.3 - Deprecated, for real this time. - -.. versionchanged:: 3.0 - Undeprecated. :meth:`~pymongo.cursor_manager.CursorManager.close` now - requires an `address` argument. The ``BatchCursorManager`` class is removed. -""" - -import warnings -import weakref -from bson.py3compat import integer_types - - -class CursorManager(object): - """DEPRECATED - The cursor manager base class.""" - - def __init__(self, client): - """Instantiate the manager. - - :Parameters: - - `client`: a MongoClient - """ - warnings.warn( - "Cursor managers are deprecated.", - DeprecationWarning, - stacklevel=2) - self.__client = weakref.ref(client) - - def close(self, cursor_id, address): - """Kill a cursor. - - Raises TypeError if cursor_id is not an instance of (int, long). - - :Parameters: - - `cursor_id`: cursor id to close - - `address`: the cursor's server's (host, port) pair - - .. versionchanged:: 3.0 - Now requires an `address` argument. - """ - if not isinstance(cursor_id, integer_types): - raise TypeError("cursor_id must be an integer") - - self.__client().kill_cursors([cursor_id], address) diff --git a/pymongo/daemon.py b/pymongo/daemon.py index f066a02c23..b40384df13 100644 --- a/pymongo/daemon.py +++ b/pymongo/daemon.py @@ -18,46 +18,29 @@ client-side field level encryption is enabled. See :ref:`automatic-client-side-encryption` for more info. """ +from __future__ import annotations import os import subprocess import sys -import time +import warnings +from typing import Any, Optional, Sequence # The maximum amount of time to wait for the intermediate subprocess. _WAIT_TIMEOUT = 10 _THIS_FILE = os.path.realpath(__file__) -if sys.version_info[0] < 3: - def _popen_wait(popen, timeout): - """Implement wait timeout support for Python 2.""" - from pymongo.monotonic import time as _time - deadline = _time() + timeout - # Initial delay of 1ms - delay = .0005 - while True: - returncode = popen.poll() - if returncode is not None: - return returncode - - remaining = deadline - _time() - if remaining <= 0: - # Just return None instead of raising an error. - return None - delay = min(delay * 2, remaining, .5) - time.sleep(delay) -else: - def _popen_wait(popen, timeout): - """Implement wait timeout support for Python 3.""" - try: - return popen.wait(timeout=timeout) - except subprocess.TimeoutExpired: - # Silence TimeoutExpired errors. - return None +def _popen_wait(popen: subprocess.Popen[Any], timeout: Optional[float]) -> Optional[int]: + """Implement wait timeout support for Python 3.""" + try: + return popen.wait(timeout=timeout) + except subprocess.TimeoutExpired: + # Silence TimeoutExpired errors. + return None -def _silence_resource_warning(popen): +def _silence_resource_warning(popen: Optional[subprocess.Popen[Any]]) -> None: """Silence Popen's ResourceWarning. Note this should only be used if the process was created as a daemon. @@ -66,21 +49,34 @@ def _silence_resource_warning(popen): # "ResourceWarning: subprocess XXX is still running". # See https://bugs.python.org/issue38890 and # https://bugs.python.org/issue26741. - popen.returncode = 0 + # popen is None when mongocryptd spawning fails + if popen is not None: + popen.returncode = 0 -if sys.platform == 'win32': +if sys.platform == "win32": # On Windows we spawn the daemon process simply by using DETACHED_PROCESS. - _DETACHED_PROCESS = getattr(subprocess, 'DETACHED_PROCESS', 0x00000008) + _DETACHED_PROCESS = getattr(subprocess, "DETACHED_PROCESS", 0x00000008) - def _spawn_daemon(args): + def _spawn_daemon(args: Sequence[str]) -> None: """Spawn a daemon process (Windows).""" - with open(os.devnull, 'r+b') as devnull: - popen = subprocess.Popen( - args, - creationflags=_DETACHED_PROCESS, - stdin=devnull, stderr=devnull, stdout=devnull) - _silence_resource_warning(popen) + try: + with open(os.devnull, "r+b") as devnull: + popen = subprocess.Popen( + args, # noqa: S603 + creationflags=_DETACHED_PROCESS, + stdin=devnull, + stderr=devnull, + stdout=devnull, + ) + _silence_resource_warning(popen) + except FileNotFoundError as exc: + warnings.warn( + f"Failed to start {args[0]}: is it on your $PATH?\nOriginal exception: {exc}", + RuntimeWarning, + stacklevel=2, + ) + else: # On Unix we spawn the daemon process with a double Popen. # 1) The first Popen runs this file as a Python script using the current @@ -93,26 +89,35 @@ def _spawn_daemon(args): # to be safe to call from any thread. Using Popen instead of fork also # avoids triggering the application's os.register_at_fork() callbacks when # we spawn the mongocryptd daemon process. - def _spawn(args): + def _spawn(args: Sequence[str]) -> Optional[subprocess.Popen[Any]]: """Spawn the process and silence stdout/stderr.""" - with open(os.devnull, 'r+b') as devnull: - return subprocess.Popen( - args, - close_fds=True, - stdin=devnull, stderr=devnull, stdout=devnull) - - - def _spawn_daemon_double_popen(args): + try: + with open(os.devnull, "r+b") as devnull: + return subprocess.Popen( + args, # noqa: S603 + close_fds=True, + stdin=devnull, + stderr=devnull, + stdout=devnull, + ) + except FileNotFoundError as exc: + warnings.warn( + f"Failed to start {args[0]}: is it on your $PATH?\nOriginal exception: {exc}", + RuntimeWarning, + stacklevel=2, + ) + return None + + def _spawn_daemon_double_popen(args: Sequence[str]) -> None: """Spawn a daemon process using a double subprocess.Popen.""" spawner_args = [sys.executable, _THIS_FILE] spawner_args.extend(args) - temp_proc = subprocess.Popen(spawner_args, close_fds=True) + temp_proc = subprocess.Popen(spawner_args, close_fds=True) # noqa: S603 # Reap the intermediate child process to avoid creating zombie # processes. _popen_wait(temp_proc, _WAIT_TIMEOUT) - - def _spawn_daemon(args): + def _spawn_daemon(args: Sequence[str]) -> None: """Spawn a daemon process (Unix).""" # "If Python is unable to retrieve the real path to its executable, # sys.executable will be an empty string or None". @@ -129,10 +134,9 @@ def _spawn_daemon(args): # until the main application exits. _spawn(args) - - if __name__ == '__main__': + if __name__ == "__main__": # Attempt to start a new session to decouple from the parent. - if hasattr(os, 'setsid'): + if hasattr(os, "setsid"): try: os.setsid() except OSError: diff --git a/pymongo/database.py b/pymongo/database.py index 701e55221e..a52b3a29a5 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -13,57 +13,79 @@ # limitations under the License. """Database level operations.""" - -import warnings - -from bson.code import Code -from bson.codec_options import DEFAULT_CODEC_OPTIONS +from __future__ import annotations + +from copy import deepcopy +from typing import ( + TYPE_CHECKING, + Any, + Generic, + Mapping, + MutableMapping, + NoReturn, + Optional, + Sequence, + TypeVar, + Union, + cast, + overload, +) + +from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions from bson.dbref import DBRef -from bson.py3compat import iteritems, string_type, _unicode from bson.son import SON -from pymongo import auth, common +from bson.timestamp import Timestamp +from pymongo import _csot, common from pymongo.aggregation import _DatabaseAggregationCommand from pymongo.change_stream import DatabaseChangeStream from pymongo.collection import Collection from pymongo.command_cursor import CommandCursor -from pymongo.errors import (CollectionInvalid, - ConfigurationError, - InvalidName, - OperationFailure) -from pymongo.message import _first_batch -from pymongo.read_preferences import ReadPreference -from pymongo.son_manipulator import SONManipulator -from pymongo.write_concern import DEFAULT_WRITE_CONCERN - - -_INDEX_REGEX = {"name": {"$regex": r"^(?!.*\$)"}} -_SYSTEM_FILTER = {"filter": {"name": {"$regex": r"^(?!system\.)"}}} - - -def _check_name(name): - """Check if a database name is valid. - """ +from pymongo.common import _ecoc_coll_name, _esc_coll_name +from pymongo.errors import CollectionInvalid, InvalidName, InvalidOperation +from pymongo.read_preferences import ReadPreference, _ServerMode +from pymongo.typings import _CollationIn, _DocumentType, _DocumentTypeArg, _Pipeline + +if TYPE_CHECKING: + import bson + import bson.codec_options + from pymongo.client_session import ClientSession + from pymongo.mongo_client import MongoClient + from pymongo.pool import Connection + from pymongo.read_concern import ReadConcern + from pymongo.server import Server + from pymongo.write_concern import WriteConcern + + +def _check_name(name: str) -> None: + """Check if a database name is valid.""" if not name: raise InvalidName("database name cannot be the empty string") - for invalid_char in [' ', '.', '$', '/', '\\', '\x00', '"']: + for invalid_char in [" ", ".", "$", "/", "\\", "\x00", '"']: if invalid_char in name: - raise InvalidName("database names cannot contain the " - "character %r" % invalid_char) + raise InvalidName("database names cannot contain the character %r" % invalid_char) -class Database(common.BaseObject): - """A Mongo database. - """ +_CodecDocumentType = TypeVar("_CodecDocumentType", bound=Mapping[str, Any]) - def __init__(self, client, name, codec_options=None, read_preference=None, - write_concern=None, read_concern=None): + +class Database(common.BaseObject, Generic[_DocumentType]): + """A Mongo database.""" + + def __init__( + self, + client: MongoClient[_DocumentType], + name: str, + codec_options: Optional[bson.CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> None: """Get a database by client and name. Raises :class:`TypeError` if `name` is not an instance of - :class:`basestring` (:class:`str` in python 3). Raises - :class:`~pymongo.errors.InvalidName` if `name` is not a valid - database name. + :class:`str`. Raises :class:`~pymongo.errors.InvalidName` if + `name` is not a valid database name. :Parameters: - `client`: A :class:`~pymongo.mongo_client.MongoClient` instance. @@ -80,7 +102,14 @@ def __init__(self, client, name, codec_options=None, read_preference=None, :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the default) client.read_concern is used. - .. mongodoc:: databases + .. seealso:: The MongoDB documentation on `databases `_. + + .. versionchanged:: 4.0 + Removed the eval, system_js, error, last_status, previous_error, + reset_error_history, authenticate, logout, collection_names, + current_op, add_user, remove_user, profiling_level, + set_profiling_level, and profiling_info methods. + See the :ref:`pymongo4-migration-guide`. .. versionchanged:: 3.2 Added the read_concern option. @@ -97,144 +126,50 @@ def __init__(self, client, name, codec_options=None, read_preference=None, db.__my_collection__ """ - super(Database, self).__init__( + super().__init__( codec_options or client.codec_options, read_preference or client.read_preference, write_concern or client.write_concern, - read_concern or client.read_concern) + read_concern or client.read_concern, + ) - if not isinstance(name, string_type): - raise TypeError("name must be an instance " - "of %s" % (string_type.__name__,)) + if not isinstance(name, str): + raise TypeError("name must be an instance of str") - if name != '$external': + if name != "$external": _check_name(name) - self.__name = _unicode(name) - self.__client = client - - self.__incoming_manipulators = [] - self.__incoming_copying_manipulators = [] - self.__outgoing_manipulators = [] - self.__outgoing_copying_manipulators = [] - - def add_son_manipulator(self, manipulator): - """Add a new son manipulator to this database. - - **DEPRECATED** - `add_son_manipulator` is deprecated. - - .. versionchanged:: 3.0 - Deprecated add_son_manipulator. - """ - warnings.warn("add_son_manipulator is deprecated", - DeprecationWarning, stacklevel=2) - base = SONManipulator() - def method_overwritten(instance, method): - """Test if this method has been overridden.""" - return (getattr( - instance, method).__func__ != getattr(base, method).__func__) - - if manipulator.will_copy(): - if method_overwritten(manipulator, "transform_incoming"): - self.__incoming_copying_manipulators.insert(0, manipulator) - if method_overwritten(manipulator, "transform_outgoing"): - self.__outgoing_copying_manipulators.insert(0, manipulator) - else: - if method_overwritten(manipulator, "transform_incoming"): - self.__incoming_manipulators.insert(0, manipulator) - if method_overwritten(manipulator, "transform_outgoing"): - self.__outgoing_manipulators.insert(0, manipulator) + self.__name = name + self.__client: MongoClient[_DocumentType] = client + self._timeout = client.options.timeout @property - def system_js(self): - """**DEPRECATED**: :class:`SystemJS` helper for this :class:`Database`. - - See the documentation for :class:`SystemJS` for more details. - """ - return SystemJS(self) - - @property - def client(self): + def client(self) -> MongoClient[_DocumentType]: """The client instance for this :class:`Database`.""" return self.__client @property - def name(self): + def name(self) -> str: """The name of this :class:`Database`.""" return self.__name - @property - def incoming_manipulators(self): - """**DEPRECATED**: All incoming SON manipulators. - - .. versionchanged:: 3.5 - Deprecated. - - .. versionadded:: 2.0 - """ - warnings.warn("Database.incoming_manipulators() is deprecated", - DeprecationWarning, stacklevel=2) - - return [manipulator.__class__.__name__ - for manipulator in self.__incoming_manipulators] - - @property - def incoming_copying_manipulators(self): - """**DEPRECATED**: All incoming SON copying manipulators. - - .. versionchanged:: 3.5 - Deprecated. - - .. versionadded:: 2.0 - """ - warnings.warn("Database.incoming_copying_manipulators() is deprecated", - DeprecationWarning, stacklevel=2) - - return [manipulator.__class__.__name__ - for manipulator in self.__incoming_copying_manipulators] - - @property - def outgoing_manipulators(self): - """**DEPRECATED**: All outgoing SON manipulators. - - .. versionchanged:: 3.5 - Deprecated. - - .. versionadded:: 2.0 - """ - warnings.warn("Database.outgoing_manipulators() is deprecated", - DeprecationWarning, stacklevel=2) - - return [manipulator.__class__.__name__ - for manipulator in self.__outgoing_manipulators] - - @property - def outgoing_copying_manipulators(self): - """**DEPRECATED**: All outgoing SON copying manipulators. - - .. versionchanged:: 3.5 - Deprecated. - - .. versionadded:: 2.0 - """ - warnings.warn("Database.outgoing_copying_manipulators() is deprecated", - DeprecationWarning, stacklevel=2) - - return [manipulator.__class__.__name__ - for manipulator in self.__outgoing_copying_manipulators] - - def with_options(self, codec_options=None, read_preference=None, - write_concern=None, read_concern=None): + def with_options( + self, + codec_options: Optional[bson.CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> Database[_DocumentType]: """Get a clone of this database changing the specified settings. >>> db1.read_preference Primary() - >>> from pymongo import ReadPreference - >>> db2 = db1.with_options(read_preference=ReadPreference.SECONDARY) + >>> from pymongo.read_preferences import Secondary + >>> db2 = db1.with_options(read_preference=Secondary([{'node': 'analytics'}])) >>> db1.read_preference Primary() >>> db2.read_preference - Secondary(tag_sets=None) + Secondary(tag_sets=[{'node': 'analytics'}], max_staleness=-1, hedge=None) :Parameters: - `codec_options` (optional): An instance of @@ -256,26 +191,30 @@ def with_options(self, codec_options=None, read_preference=None, .. versionadded:: 3.8 """ - return Database(self.client, - self.__name, - codec_options or self.codec_options, - read_preference or self.read_preference, - write_concern or self.write_concern, - read_concern or self.read_concern) - - def __eq__(self, other): + return Database( + self.client, + self.__name, + codec_options or self.codec_options, + read_preference or self.read_preference, + write_concern or self.write_concern, + read_concern or self.read_concern, + ) + + def __eq__(self, other: Any) -> bool: if isinstance(other, Database): - return (self.__client == other.client and - self.__name == other.name) + return self.__client == other.client and self.__name == other.name return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other - def __repr__(self): - return "Database(%r, %r)" % (self.__client, self.__name) + def __hash__(self) -> int: + return hash((self.__client, self.__name)) + + def __repr__(self) -> str: + return f"Database({self.__client!r}, {self.__name!r})" - def __getattr__(self, name): + def __getattr__(self, name: str) -> Collection[_DocumentType]: """Get a collection of this database by name. Raises InvalidName if an invalid collection name is used. @@ -283,13 +222,14 @@ def __getattr__(self, name): :Parameters: - `name`: the name of the collection to get """ - if name.startswith('_'): + if name.startswith("_"): raise AttributeError( - "Database has no attribute %r. To access the %s" - " collection, use database[%r]." % (name, name, name)) + f"Database has no attribute {name!r}. To access the {name}" + f" collection, use database[{name!r}]." + ) return self.__getitem__(name) - def __getitem__(self, name): + def __getitem__(self, name: str) -> Collection[_DocumentType]: """Get a collection of this database by name. Raises InvalidName if an invalid collection name is used. @@ -299,8 +239,14 @@ def __getitem__(self, name): """ return Collection(self, name) - def get_collection(self, name, codec_options=None, read_preference=None, - write_concern=None, read_concern=None): + def get_collection( + self, + name: str, + codec_options: Optional[bson.CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> Collection[_DocumentType]: """Get a :class:`~pymongo.collection.Collection` with the given name and options. @@ -339,12 +285,54 @@ def get_collection(self, name, codec_options=None, read_preference=None, used. """ return Collection( - self, name, False, codec_options, read_preference, - write_concern, read_concern) - - def create_collection(self, name, codec_options=None, - read_preference=None, write_concern=None, - read_concern=None, session=None, **kwargs): + self, + name, + False, + codec_options, + read_preference, + write_concern, + read_concern, + ) + + def _get_encrypted_fields( + self, kwargs: Mapping[str, Any], coll_name: str, ask_db: bool + ) -> Optional[Mapping[str, Any]]: + encrypted_fields = kwargs.get("encryptedFields") + if encrypted_fields: + return cast(Mapping[str, Any], deepcopy(encrypted_fields)) + if ( + self.client.options.auto_encryption_opts + and self.client.options.auto_encryption_opts._encrypted_fields_map + and self.client.options.auto_encryption_opts._encrypted_fields_map.get( + f"{self.name}.{coll_name}" + ) + ): + return cast( + Mapping[str, Any], + deepcopy( + self.client.options.auto_encryption_opts._encrypted_fields_map[ + f"{self.name}.{coll_name}" + ] + ), + ) + if ask_db and self.client.options.auto_encryption_opts: + options = self[coll_name].options() + if options.get("encryptedFields"): + return cast(Mapping[str, Any], deepcopy(options["encryptedFields"])) + return None + + @_csot.apply + def create_collection( + self, + name: str, + codec_options: Optional[bson.CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + session: Optional[ClientSession] = None, + check_exists: Optional[bool] = True, + **kwargs: Any, + ) -> Collection[_DocumentType]: """Create a new :class:`~pymongo.collection.Collection` in this database. @@ -353,18 +341,6 @@ def create_collection(self, name, codec_options=None, creation. :class:`~pymongo.errors.CollectionInvalid` will be raised if the collection already exists. - Options should be passed as keyword arguments to this method. Supported - options vary with MongoDB release. Some examples include: - - - "size": desired initial size for the collection (in - bytes). For capped collections this size is the max - size of the collection. - - "capped": if True, this is a capped collection - - "max": maximum number of objects if capped (optional) - - See the MongoDB documentation for a full list of supported options by - server version. - :Parameters: - `name`: the name of the collection to create - `codec_options` (optional): An instance of @@ -386,8 +362,78 @@ def create_collection(self, name, codec_options=None, :class:`~pymongo.collation.Collation`. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - ``check_exists`` (optional): if True (the default), send a listCollections command to + check if the collection already exists before creation. - `**kwargs` (optional): additional keyword arguments will - be passed as options for the create collection command + be passed as options for the `create collection command`_ + + All optional `create collection command`_ parameters should be passed + as keyword arguments to this method. Valid options include, but are not + limited to: + + - ``size`` (int): desired initial size for the collection (in + bytes). For capped collections this size is the max + size of the collection. + - ``capped`` (bool): if True, this is a capped collection + - ``max`` (int): maximum number of objects if capped (optional) + - ``timeseries`` (dict): a document specifying configuration options for + timeseries collections + - ``expireAfterSeconds`` (int): the number of seconds after which a + document in a timeseries collection expires + - ``validator`` (dict): a document specifying validation rules or expressions + for the collection + - ``validationLevel`` (str): how strictly to apply the + validation rules to existing documents during an update. The default level + is "strict" + - ``validationAction`` (str): whether to "error" on invalid documents + (the default) or just "warn" about the violations but allow invalid + documents to be inserted + - ``indexOptionDefaults`` (dict): a document specifying a default configuration + for indexes when creating a collection + - ``viewOn`` (str): the name of the source collection or view from which + to create the view + - ``pipeline`` (list): a list of aggregation pipeline stages + - ``comment`` (str): a user-provided comment to attach to this command. + This option is only supported on MongoDB >= 4.4. + - ``encryptedFields`` (dict): **(BETA)** Document that describes the encrypted fields for + Queryable Encryption. For example:: + + { + "escCollection": "enxcol_.encryptedCollection.esc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), + "bsonType": "string", + "queries": {"queryType": "equality"} + }, + { + "path": "ssn", + "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), + "bsonType": "string" + } + ] + } + - ``clusteredIndex`` (dict): Document that specifies the clustered index + configuration. It must have the following form:: + + { + // key pattern must be {_id: 1} + key: , // required + unique: , // required, must be `true` + name: , // optional, otherwise automatically generated + v: , // optional, must be `2` if provided + } + - ``changeStreamPreAndPostImages`` (dict): a document with a boolean field ``enabled`` for + enabling pre- and post-images. + + .. versionchanged:: 4.2 + Added the ``check_exists``, ``clusteredIndex``, and ``encryptedFields`` parameters. + + .. versionchanged:: 3.11 + This method is now supported inside multi-document transactions + with MongoDB 4.4+. .. versionchanged:: 3.6 Added ``session`` parameter. @@ -398,62 +444,47 @@ def create_collection(self, name, codec_options=None, .. versionchanged:: 3.0 Added the codec_options, read_preference, and write_concern options. - .. versionchanged:: 2.2 - Removed deprecated argument: options + .. _create collection command: + https://mongodb.com/docs/manual/reference/command/create """ - with self.__client._tmp_session(session) as s: - if name in self.list_collection_names( - filter={"name": name}, session=s): - raise CollectionInvalid("collection %s already exists" % name) - - return Collection(self, name, True, codec_options, - read_preference, write_concern, - read_concern, session=s, **kwargs) - - def _apply_incoming_manipulators(self, son, collection): - """Apply incoming manipulators to `son`.""" - for manipulator in self.__incoming_manipulators: - son = manipulator.transform_incoming(son, collection) - return son + encrypted_fields = self._get_encrypted_fields(kwargs, name, False) + if encrypted_fields: + common.validate_is_mapping("encryptedFields", encrypted_fields) + kwargs["encryptedFields"] = encrypted_fields - def _apply_incoming_copying_manipulators(self, son, collection): - """Apply incoming copying manipulators to `son`.""" - for manipulator in self.__incoming_copying_manipulators: - son = manipulator.transform_incoming(son, collection) - return son + clustered_index = kwargs.get("clusteredIndex") + if clustered_index: + common.validate_is_mapping("clusteredIndex", clustered_index) - def _fix_incoming(self, son, collection): - """Apply manipulators to an incoming SON object before it gets stored. - - :Parameters: - - `son`: the son object going into the database - - `collection`: the collection the son object is being saved in - """ - son = self._apply_incoming_manipulators(son, collection) - son = self._apply_incoming_copying_manipulators(son, collection) - return son - - def _fix_outgoing(self, son, collection): - """Apply manipulators to a SON object as it comes out of the database. - - :Parameters: - - `son`: the son object coming out of the database - - `collection`: the collection the son object was saved in - """ - for manipulator in reversed(self.__outgoing_manipulators): - son = manipulator.transform_outgoing(son, collection) - for manipulator in reversed(self.__outgoing_copying_manipulators): - son = manipulator.transform_outgoing(son, collection) - return son + with self.__client._tmp_session(session) as s: + # Skip this check in a transaction where listCollections is not + # supported. + if ( + check_exists + and (not s or not s.in_transaction) + and name in self.list_collection_names(filter={"name": name}, session=s) + ): + raise CollectionInvalid("collection %s already exists" % name) + return Collection( + self, + name, + True, + codec_options, + read_preference, + write_concern, + read_concern, + session=s, + **kwargs, + ) - def aggregate(self, pipeline, session=None, **kwargs): + def aggregate( + self, pipeline: _Pipeline, session: Optional[ClientSession] = None, **kwargs: Any + ) -> CommandCursor[_DocumentType]: """Perform a database-level aggregation. See the `aggregation pipeline`_ documentation for a list of stages that are supported. - Introduced in MongoDB 3.6. - .. code-block:: python # Lists all operations currently running on the server. @@ -461,21 +492,6 @@ def aggregate(self, pipeline, session=None, **kwargs): for operation in cursor: print(operation) - All optional `aggregate command`_ parameters should be passed as - keyword arguments to this method. Valid options include, but are not - limited to: - - - `allowDiskUse` (bool): Enables writing to temporary files. When set - to True, aggregation stages can write data to the _tmp subdirectory - of the --dbpath directory. The default is False. - - `maxTimeMS` (int): The maximum amount of time to allow the operation - to run in milliseconds. - - `batchSize` (int): The maximum number of documents to return per - batch. Ignored if the connected mongod or mongos does not support - returning aggregate results using a cursor. - - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. - The :meth:`aggregate` method obeys the :attr:`read_preference` of this :class:`Database`, except when ``$out`` or ``$merge`` are used, in which case :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY` @@ -491,7 +507,27 @@ def aggregate(self, pipeline, session=None, **kwargs): - `pipeline`: a list of aggregation pipeline stages - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - - `**kwargs` (optional): See list of options above. + - `**kwargs` (optional): extra `aggregate command`_ parameters. + + All optional `aggregate command`_ parameters should be passed as + keyword arguments to this method. Valid options include, but are not + limited to: + + - `allowDiskUse` (bool): Enables writing to temporary files. When set + to True, aggregation stages can write data to the _tmp subdirectory + of the --dbpath directory. The default is False. + - `maxTimeMS` (int): The maximum amount of time to allow the operation + to run in milliseconds. + - `batchSize` (int): The maximum number of documents to return per + batch. Ignored if the connected mongod or mongos does not support + returning aggregate results using a cursor. + - `collation` (optional): An instance of + :class:`~pymongo.collation.Collation`. + - `let` (dict): A dict of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. ``"$$var"``). This option is + only supported on MongoDB >= 5.0. :Returns: A :class:`~pymongo.command_cursor.CommandCursor` over the result @@ -500,22 +536,39 @@ def aggregate(self, pipeline, session=None, **kwargs): .. versionadded:: 3.9 .. _aggregation pipeline: - https://docs.mongodb.com/manual/reference/operator/aggregation-pipeline + https://mongodb.com/docs/manual/reference/operator/aggregation-pipeline .. _aggregate command: - https://docs.mongodb.com/manual/reference/command/aggregate + https://mongodb.com/docs/manual/reference/command/aggregate """ with self.client._tmp_session(session, close=False) as s: cmd = _DatabaseAggregationCommand( - self, CommandCursor, pipeline, kwargs, session is not None, - user_fields={'cursor': {'firstBatch': 1}}) + self, + CommandCursor, + pipeline, + kwargs, + session is not None, + user_fields={"cursor": {"firstBatch": 1}}, + ) return self.client._retryable_read( - cmd.get_cursor, cmd.get_read_preference(s), s, - retryable=not cmd._performs_write) - - def watch(self, pipeline=None, full_document=None, resume_after=None, - max_await_time_ms=None, batch_size=None, collation=None, - start_at_operation_time=None, session=None, start_after=None): + cmd.get_cursor, cmd.get_read_preference(s), s, retryable=not cmd._performs_write # type: ignore[arg-type] + ) + + def watch( + self, + pipeline: Optional[_Pipeline] = None, + full_document: Optional[str] = None, + resume_after: Optional[Mapping[str, Any]] = None, + max_await_time_ms: Optional[int] = None, + batch_size: Optional[int] = None, + collation: Optional[_CollationIn] = None, + start_at_operation_time: Optional[Timestamp] = None, + session: Optional[ClientSession] = None, + start_after: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + full_document_before_change: Optional[str] = None, + show_expanded_events: Optional[bool] = None, + ) -> DatabaseChangeStream[_DocumentType]: """Watch changes on this database. Performs an aggregation with an implicit initial ``$changeStream`` @@ -543,14 +596,13 @@ def watch(self, pipeline=None, full_document=None, resume_after=None, .. code-block:: python try: - with db.watch( - [{'$match': {'operationType': 'insert'}}]) as stream: + with db.watch([{"$match": {"operationType": "insert"}}]) as stream: for insert_change in stream: print(insert_change) except pymongo.errors.PyMongoError: # The ChangeStream encountered an unrecoverable error or the # resume attempt failed to recreate the cursor. - logging.error('...') + logging.error("...") For a precise description of the resume process see the `change streams specification`_. @@ -561,11 +613,15 @@ def watch(self, pipeline=None, full_document=None, resume_after=None, pipeline stages are valid after a ``$changeStream`` stage, see the MongoDB documentation on change streams for the supported stages. - `full_document` (optional): The fullDocument to pass as an option - to the ``$changeStream`` stage. Allowed values: 'updateLookup'. - When set to 'updateLookup', the change notification for partial - updates will include both a delta describing the changes to the - document, as well as a copy of the entire document that was - changed from some time after the change occurred. + to the ``$changeStream`` stage. Allowed values: 'updateLookup', + 'whenAvailable', 'required'. When set to 'updateLookup', the + change notification for partial updates will include both a delta + describing the changes to the document, as well as a copy of the + entire document that was changed from some time after the change + occurred. + - `full_document_before_change`: Allowed values: 'whenAvailable' + and 'required'. Change events may now result in a + 'fullDocumentBeforeChange' response field. - `resume_after` (optional): A resume token. If provided, the change stream will start returning changes that occur directly after the operation specified in the resume token. A resume token @@ -586,40 +642,107 @@ def watch(self, pipeline=None, full_document=None, resume_after=None, - `start_after` (optional): The same as `resume_after` except that `start_after` can resume notifications after an invalidate event. This option and `resume_after` are mutually exclusive. + - `comment` (optional): A user-provided comment to attach to this + command. + - `show_expanded_events` (optional): Include expanded events such as DDL events like `dropIndexes`. :Returns: A :class:`~pymongo.change_stream.DatabaseChangeStream` cursor. + .. versionchanged:: 4.3 + Added `show_expanded_events` parameter. + + .. versionchanged:: 4.2 + Added ``full_document_before_change`` parameter. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + .. versionchanged:: 3.9 Added the ``start_after`` parameter. .. versionadded:: 3.7 - .. mongodoc:: changeStreams + .. seealso:: The MongoDB documentation on `changeStreams `_. .. _change streams specification: - https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.rst + https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.md """ return DatabaseChangeStream( - self, pipeline, full_document, resume_after, max_await_time_ms, - batch_size, collation, start_at_operation_time, session, - start_after) - - def _command(self, sock_info, command, slave_ok=False, value=1, check=True, - allowable_errors=None, read_preference=ReadPreference.PRIMARY, - codec_options=DEFAULT_CODEC_OPTIONS, - write_concern=None, - parse_write_concern_error=False, session=None, **kwargs): + self, + pipeline, + full_document, + resume_after, + max_await_time_ms, + batch_size, + collation, + start_at_operation_time, + session, + start_after, + comment, + full_document_before_change, + show_expanded_events=show_expanded_events, + ) + + @overload + def _command( + self, + conn: Connection, + command: Union[str, MutableMapping[str, Any]], + value: int = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: _ServerMode = ReadPreference.PRIMARY, + codec_options: CodecOptions[dict[str, Any]] = DEFAULT_CODEC_OPTIONS, + write_concern: Optional[WriteConcern] = None, + parse_write_concern_error: bool = False, + session: Optional[ClientSession] = None, + **kwargs: Any, + ) -> dict[str, Any]: + ... + + @overload + def _command( + self, + conn: Connection, + command: Union[str, MutableMapping[str, Any]], + value: int = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: _ServerMode = ReadPreference.PRIMARY, + codec_options: CodecOptions[_CodecDocumentType] = ..., + write_concern: Optional[WriteConcern] = None, + parse_write_concern_error: bool = False, + session: Optional[ClientSession] = None, + **kwargs: Any, + ) -> _CodecDocumentType: + ... + + def _command( + self, + conn: Connection, + command: Union[str, MutableMapping[str, Any]], + value: int = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: _ServerMode = ReadPreference.PRIMARY, + codec_options: Union[ + CodecOptions[dict[str, Any]], CodecOptions[_CodecDocumentType] + ] = DEFAULT_CODEC_OPTIONS, + write_concern: Optional[WriteConcern] = None, + parse_write_concern_error: bool = False, + session: Optional[ClientSession] = None, + **kwargs: Any, + ) -> Union[dict[str, Any], _CodecDocumentType]: """Internal command helper.""" - if isinstance(command, string_type): + if isinstance(command, str): command = SON([(command, value)]) command.update(kwargs) with self.__client._tmp_session(session) as s: - return sock_info.command( + return conn.command( self.__name, command, - slave_ok, read_preference, codec_options, check, @@ -627,17 +750,58 @@ def _command(self, sock_info, command, slave_ok=False, value=1, check=True, write_concern=write_concern, parse_write_concern_error=parse_write_concern_error, session=s, - client=self.__client) - - def command(self, command, value=1, check=True, - allowable_errors=None, read_preference=None, - codec_options=DEFAULT_CODEC_OPTIONS, session=None, **kwargs): + client=self.__client, + ) + + @overload + def command( + self, + command: Union[str, MutableMapping[str, Any]], + value: Any = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: Optional[_ServerMode] = None, + codec_options: None = None, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> dict[str, Any]: + ... + + @overload + def command( + self, + command: Union[str, MutableMapping[str, Any]], + value: Any = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: Optional[_ServerMode] = None, + codec_options: CodecOptions[_CodecDocumentType] = ..., + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> _CodecDocumentType: + ... + + @_csot.apply + def command( + self, + command: Union[str, MutableMapping[str, Any]], + value: Any = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: Optional[_ServerMode] = None, + codec_options: Optional[bson.codec_options.CodecOptions[_CodecDocumentType]] = None, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> Union[dict[str, Any], _CodecDocumentType]: """Issue a MongoDB command. Send command `command` to the database and return the - response. If `command` is an instance of :class:`basestring` - (:class:`str` in python 3) then the command {`command`: `value`} - will be sent. Otherwise, `command` must be an instance of + response. If `command` is an instance of :class:`str` + then the command {`command`: `value`} will be sent. + Otherwise, `command` must be an instance of :class:`dict` and will be sent as is. Any additional keyword arguments will be added to the final @@ -648,10 +812,10 @@ def command(self, command, value=1, check=True, >>> db.command("buildinfo") - For a command where the value matters, like ``{collstats: + For a command where the value matters, like ``{count: collection_name}`` we can do: - >>> db.command("collstats", collection_name) + >>> db.command("count", collection_name) For commands that take additional arguments we can use kwargs. So ``{filemd5: object_id, root: file_root}`` becomes: @@ -684,16 +848,25 @@ def command(self, command, value=1, check=True, instance. - `session` (optional): A :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): additional keyword arguments will be added to the command document before it is sent + .. note:: :meth:`command` does **not** obey this Database's :attr:`read_preference` or :attr:`codec_options`. You must use the - `read_preference` and `codec_options` parameters instead. + ``read_preference`` and ``codec_options`` parameters instead. .. note:: :meth:`command` does **not** apply any custom TypeDecoders when decoding the command response. + .. note:: If this client has been configured to use MongoDB Stable + API (see :ref:`versioned-api-ref`), then :meth:`command` will + automatically add API versioning options to the given command. + Explicitly adding API versioning options in the command and + declaring an API version on the client is not supported. + .. versionchanged:: 3.6 Added ``session`` parameter. @@ -704,121 +877,234 @@ def command(self, command, value=1, check=True, regular expressions as :class:`~bson.regex.Regex` objects. Use :meth:`~bson.regex.Regex.try_compile` to attempt to convert from a BSON regular expression to a Python regular expression object. - Added the `codec_options` parameter. + Added the ``codec_options`` parameter. - .. versionchanged:: 2.7 - Added `compile_re` option. If set to False, PyMongo represented BSON - regular expressions as :class:`~bson.regex.Regex` objects instead of - attempting to compile BSON regular expressions as Python native - regular expressions, thus preventing errors for some incompatible - patterns, see `PYTHON-500`_. + .. seealso:: The MongoDB documentation on `commands `_. + """ + opts = codec_options or DEFAULT_CODEC_OPTIONS + if comment is not None: + kwargs["comment"] = comment - .. versionchanged:: 2.3 - Added `tag_sets` and `secondary_acceptable_latency_ms` options. - .. versionchanged:: 2.2 - Added support for `as_class` - the class you want to use for - the resulting documents + if read_preference is None: + read_preference = (session and session._txn_read_preference()) or ReadPreference.PRIMARY + with self.__client._conn_for_reads(read_preference, session) as ( + connection, + read_preference, + ): + return self._command( + connection, + command, + value, + check, + allowable_errors, + read_preference, + opts, + session=session, + **kwargs, + ) + + @_csot.apply + def cursor_command( + self, + command: Union[str, MutableMapping[str, Any]], + value: Any = 1, + read_preference: Optional[_ServerMode] = None, + codec_options: Optional[bson.codec_options.CodecOptions[_CodecDocumentType]] = None, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + max_await_time_ms: Optional[int] = None, + **kwargs: Any, + ) -> CommandCursor[_DocumentType]: + """Issue a MongoDB command and parse the response as a cursor. + + If the response from the server does not include a cursor field, an error will be thrown. + + Otherwise, behaves identically to issuing a normal MongoDB command. - .. _PYTHON-500: https://jira.mongodb.org/browse/PYTHON-500 + :Parameters: + - `command`: document representing the command to be issued, + or the name of the command (for simple commands only). - .. mongodoc:: commands - """ - if read_preference is None: - read_preference = ((session and session._txn_read_preference()) - or ReadPreference.PRIMARY) - with self.__client._socket_for_reads( - read_preference, session) as (sock_info, slave_ok): - return self._command(sock_info, command, slave_ok, value, - check, allowable_errors, read_preference, - codec_options, session=session, **kwargs) - - def _retryable_read_command(self, command, value=1, check=True, - allowable_errors=None, read_preference=None, - codec_options=DEFAULT_CODEC_OPTIONS, session=None, **kwargs): - """Same as command but used for retryable read commands.""" - if read_preference is None: - read_preference = ((session and session._txn_read_preference()) - or ReadPreference.PRIMARY) + .. note:: the order of keys in the `command` document is + significant (the "verb" must come first), so commands + which require multiple keys (e.g. `findandmodify`) + should use an instance of :class:`~bson.son.SON` or + a string and kwargs instead of a Python `dict`. - def _cmd(session, server, sock_info, slave_ok): - return self._command(sock_info, command, slave_ok, value, - check, allowable_errors, read_preference, - codec_options, session=session, **kwargs) + - `value` (optional): value to use for the command verb when + `command` is passed as a string + - `read_preference` (optional): The read preference for this + operation. See :mod:`~pymongo.read_preferences` for options. + If the provided `session` is in a transaction, defaults to the + read preference configured for the transaction. + Otherwise, defaults to + :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. + - `codec_options`: A :class:`~bson.codec_options.CodecOptions` + instance. + - `session` (optional): A + :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to future getMores for this + command. + - `max_await_time_ms` (optional): The number of ms to wait for more data on future getMores for this command. + - `**kwargs` (optional): additional keyword arguments will + be added to the command document before it is sent - return self.__client._retryable_read( - _cmd, read_preference, session) + .. note:: :meth:`command` does **not** obey this Database's + :attr:`read_preference` or :attr:`codec_options`. You must use the + ``read_preference`` and ``codec_options`` parameters instead. - def _list_collections(self, sock_info, slave_okay, session, - read_preference, **kwargs): - """Internal listCollections helper.""" + .. note:: :meth:`command` does **not** apply any custom TypeDecoders + when decoding the command response. - coll = self.get_collection( - "$cmd", read_preference=read_preference) - if sock_info.max_wire_version > 2: - cmd = SON([("listCollections", 1), - ("cursor", {})]) - cmd.update(kwargs) - with self.__client._tmp_session( - session, close=False) as tmp_session: - cursor = self._command( - sock_info, cmd, slave_okay, - read_preference=read_preference, - session=tmp_session)["cursor"] - return CommandCursor( - coll, - cursor, - sock_info.address, + .. note:: If this client has been configured to use MongoDB Stable + API (see :ref:`versioned-api-ref`), then :meth:`command` will + automatically add API versioning options to the given command. + Explicitly adding API versioning options in the command and + declaring an API version on the client is not supported. + + .. seealso:: The MongoDB documentation on `commands `_. + """ + with self.__client._tmp_session(session, close=False) as tmp_session: + opts = codec_options or DEFAULT_CODEC_OPTIONS + + if read_preference is None: + read_preference = ( + tmp_session and tmp_session._txn_read_preference() + ) or ReadPreference.PRIMARY + with self.__client._conn_for_reads(read_preference, tmp_session) as ( + conn, + read_preference, + ): + response = self._command( + conn, + command, + value, + True, + None, + read_preference, + opts, session=tmp_session, - explicit_session=session is not None) - else: - match = _INDEX_REGEX - if "filter" in kwargs: - match = {"$and": [_INDEX_REGEX, kwargs["filter"]]} - dblen = len(self.name.encode("utf8") + b".") - pipeline = [ - {"$project": {"name": {"$substr": ["$name", dblen, -1]}, - "options": 1}}, - {"$match": match} + **kwargs, + ) + coll = self.get_collection("$cmd", read_preference=read_preference) + if response.get("cursor"): + cmd_cursor = CommandCursor( + coll, + response["cursor"], + conn.address, + max_await_time_ms=max_await_time_ms, + session=tmp_session, + explicit_session=session is not None, + comment=comment, + ) + cmd_cursor._maybe_pin_connection(conn) + return cmd_cursor + else: + raise InvalidOperation("Command does not return a cursor.") + + def _retryable_read_command( + self, + command: Union[str, MutableMapping[str, Any]], + session: Optional[ClientSession] = None, + ) -> dict[str, Any]: + """Same as command but used for retryable read commands.""" + read_preference = (session and session._txn_read_preference()) or ReadPreference.PRIMARY + + def _cmd( + session: Optional[ClientSession], + _server: Server, + conn: Connection, + read_preference: _ServerMode, + ) -> dict[str, Any]: + return self._command( + conn, + command, + read_preference=read_preference, + session=session, + ) + + return self.__client._retryable_read(_cmd, read_preference, session) + + def _list_collections( + self, + conn: Connection, + session: Optional[ClientSession], + read_preference: _ServerMode, + **kwargs: Any, + ) -> CommandCursor[MutableMapping[str, Any]]: + """Internal listCollections helper.""" + coll = cast( + Collection[MutableMapping[str, Any]], + self.get_collection("$cmd", read_preference=read_preference), + ) + cmd = SON([("listCollections", 1), ("cursor", {})]) + cmd.update(kwargs) + with self.__client._tmp_session(session, close=False) as tmp_session: + cursor = self._command(conn, cmd, read_preference=read_preference, session=tmp_session)[ + "cursor" ] - cmd = SON([("aggregate", "system.namespaces"), - ("pipeline", pipeline), - ("cursor", kwargs.get("cursor", {}))]) - cursor = self._command(sock_info, cmd, slave_okay)["cursor"] - return CommandCursor(coll, cursor, sock_info.address) - - def list_collections(self, session=None, filter=None, **kwargs): - """Get a cursor over the collectons of this database. + cmd_cursor = CommandCursor( + coll, + cursor, + conn.address, + session=tmp_session, + explicit_session=session is not None, + comment=cmd.get("comment"), + ) + cmd_cursor._maybe_pin_connection(conn) + return cmd_cursor + + def list_collections( + self, + session: Optional[ClientSession] = None, + filter: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> CommandCursor[MutableMapping[str, Any]]: + """Get a cursor over the collections of this database. :Parameters: - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - `filter` (optional): A query document to filter the list of collections returned from the listCollections command. + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): Optional parameters of the `listCollections command - `_ + `_ can be passed as keyword arguments to this method. The supported options differ by server version. + :Returns: An instance of :class:`~pymongo.command_cursor.CommandCursor`. .. versionadded:: 3.6 """ if filter is not None: - kwargs['filter'] = filter - read_pref = ((session and session._txn_read_preference()) - or ReadPreference.PRIMARY) - - def _cmd(session, server, sock_info, slave_okay): - return self._list_collections( - sock_info, slave_okay, session, read_preference=read_pref, - **kwargs) - - return self.__client._retryable_read( - _cmd, read_pref, session) - - def list_collection_names(self, session=None, filter=None, **kwargs): + kwargs["filter"] = filter + read_pref = (session and session._txn_read_preference()) or ReadPreference.PRIMARY + if comment is not None: + kwargs["comment"] = comment + + def _cmd( + session: Optional[ClientSession], + _server: Server, + conn: Connection, + read_preference: _ServerMode, + ) -> CommandCursor[MutableMapping[str, Any]]: + return self._list_collections(conn, session, read_preference=read_preference, **kwargs) + + return self.__client._retryable_read(_cmd, read_pref, session) + + def list_collection_names( + self, + session: Optional[ClientSession] = None, + filter: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> list[str]: """Get a list of all the collection names in this database. For example, to list all non-system collections:: @@ -831,19 +1117,25 @@ def list_collection_names(self, session=None, filter=None, **kwargs): :class:`~pymongo.client_session.ClientSession`. - `filter` (optional): A query document to filter the list of collections returned from the listCollections command. + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): Optional parameters of the `listCollections command - `_ + `_ can be passed as keyword arguments to this method. The supported options differ by server version. + .. versionchanged:: 3.8 Added the ``filter`` and ``**kwargs`` parameters. .. versionadded:: 3.6 """ + if comment is not None: + kwargs["comment"] = comment if filter is None: kwargs["nameOnly"] = True + else: # The enumerate collections spec states that "drivers MUST NOT set # nameOnly if a filter specifies any keys other than name." @@ -852,35 +1144,33 @@ def list_collection_names(self, session=None, filter=None, **kwargs): if not filter or (len(filter) == 1 and "name" in filter): kwargs["nameOnly"] = True - return [result["name"] - for result in self.list_collections(session=session, **kwargs)] - - def collection_names(self, include_system_collections=True, - session=None): - """**DEPRECATED**: Get a list of all the collection names in this - database. + return [result["name"] for result in self.list_collections(session=session, **kwargs)] - :Parameters: - - `include_system_collections` (optional): if ``False`` list - will not include system collections (e.g ``system.indexes``) - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. + def _drop_helper( + self, name: str, session: Optional[ClientSession] = None, comment: Optional[Any] = None + ) -> dict[str, Any]: + command = SON([("drop", name)]) + if comment is not None: + command["comment"] = comment - .. versionchanged:: 3.7 - Deprecated. Use :meth:`list_collection_names` instead. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - """ - warnings.warn("collection_names is deprecated. Use " - "list_collection_names instead.", - DeprecationWarning, stacklevel=2) - kws = {} if include_system_collections else _SYSTEM_FILTER - return [result["name"] - for result in self.list_collections(session=session, - nameOnly=True, **kws)] - - def drop_collection(self, name_or_collection, session=None): + with self.__client._conn_for_writes(session) as connection: + return self._command( + connection, + command, + allowable_errors=["ns not found", 26], + write_concern=self._write_concern_for(session), + parse_write_concern_error=True, + session=session, + ) + + @_csot.apply + def drop_collection( + self, + name_or_collection: Union[str, Collection[_DocumentTypeArg]], + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + encrypted_fields: Optional[Mapping[str, Any]] = None, + ) -> dict[str, Any]: """Drop a collection. :Parameters: @@ -888,10 +1178,39 @@ def drop_collection(self, name_or_collection, session=None): collection object itself - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. + - `encrypted_fields`: **(BETA)** Document that describes the encrypted fields for + Queryable Encryption. For example:: + + { + "escCollection": "enxcol_.encryptedCollection.esc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), + "bsonType": "string", + "queries": {"queryType": "equality"} + }, + { + "path": "ssn", + "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), + "bsonType": "string" + } + ] + + } + .. note:: The :attr:`~pymongo.database.Database.write_concern` of - this database is automatically applied to this operation when using - MongoDB >= 3.4. + this database is automatically applied to this operation. + + .. versionchanged:: 4.2 + Added ``encrypted_fields`` parameter. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. .. versionchanged:: 3.6 Added ``session`` parameter. @@ -905,27 +1224,40 @@ def drop_collection(self, name_or_collection, session=None): if isinstance(name, Collection): name = name.name - if not isinstance(name, string_type): - raise TypeError("name_or_collection must be an " - "instance of %s" % (string_type.__name__,)) - - self.__client._purge_index(self.__name, name) - - with self.__client._socket_for_writes(session) as sock_info: - return self._command( - sock_info, 'drop', value=_unicode(name), - allowable_errors=['ns not found'], - write_concern=self._write_concern_for(session), - parse_write_concern_error=True, - session=session) - - def validate_collection(self, name_or_collection, - scandata=False, full=False, session=None): + if not isinstance(name, str): + raise TypeError("name_or_collection must be an instance of str") + encrypted_fields = self._get_encrypted_fields( + {"encryptedFields": encrypted_fields}, + name, + True, + ) + if encrypted_fields: + common.validate_is_mapping("encrypted_fields", encrypted_fields) + self._drop_helper( + _esc_coll_name(encrypted_fields, name), session=session, comment=comment + ) + self._drop_helper( + _ecoc_coll_name(encrypted_fields, name), session=session, comment=comment + ) + + return self._drop_helper(name, session, comment) + + def validate_collection( + self, + name_or_collection: Union[str, Collection[_DocumentTypeArg]], + scandata: bool = False, + full: bool = False, + session: Optional[ClientSession] = None, + background: Optional[bool] = None, + comment: Optional[Any] = None, + ) -> dict[str, Any]: """Validate a collection. Returns a dict of validation info. Raises CollectionInvalid if validation fails. + See also the MongoDB documentation on the `validate command`_. + :Parameters: - `name_or_collection`: A Collection object or the name of a collection to validate. @@ -937,36 +1269,50 @@ def validate_collection(self, name_or_collection, documents. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `background` (optional): A boolean flag that determines whether + the command runs in the background. Requires MongoDB 4.4+. + - `comment` (optional): A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.11 + Added ``background`` parameter. .. versionchanged:: 3.6 Added ``session`` parameter. + + .. _validate command: https://mongodb.com/docs/manual/reference/command/validate/ """ name = name_or_collection if isinstance(name, Collection): name = name.name - if not isinstance(name, string_type): - raise TypeError("name_or_collection must be an instance of " - "%s or Collection" % (string_type.__name__,)) + if not isinstance(name, str): + raise TypeError("name_or_collection must be an instance of str or Collection") + cmd = SON([("validate", name), ("scandata", scandata), ("full", full)]) + if comment is not None: + cmd["comment"] = comment + + if background is not None: + cmd["background"] = background - result = self.command("validate", _unicode(name), - scandata=scandata, full=full, session=session) + result = self.command(cmd, session=session) valid = True # Pre 1.9 results if "result" in result: info = result["result"] if info.find("exception") != -1 or info.find("corrupt") != -1: - raise CollectionInvalid("%s invalid: %s" % (name, info)) + raise CollectionInvalid(f"{name} invalid: {info}") # Sharded results elif "raw" in result: - for _, res in iteritems(result["raw"]): + for _, res in result["raw"].items(): if "result" in res: info = res["result"] - if (info.find("exception") != -1 or - info.find("corrupt") != -1): - raise CollectionInvalid("%s invalid: " - "%s" % (name, info)) + if info.find("exception") != -1 or info.find("corrupt") != -1: + raise CollectionInvalid(f"{name} invalid: {info}") elif not res.get("valid", False): valid = False break @@ -975,517 +1321,32 @@ def validate_collection(self, name_or_collection, valid = False if not valid: - raise CollectionInvalid("%s invalid: %r" % (name, result)) + raise CollectionInvalid(f"{name} invalid: {result!r}") return result - def _current_op(self, include_all=False, session=None): - """Helper for running $currentOp.""" - cmd = SON([("currentOp", 1), ("$all", include_all)]) - with self.__client._socket_for_writes(session) as sock_info: - if sock_info.max_wire_version >= 4: - return self.__client.admin._command( - sock_info, cmd, codec_options=self.codec_options, - session=session) - else: - spec = {"$all": True} if include_all else {} - return _first_batch(sock_info, "admin", "$cmd.sys.inprog", - spec, -1, True, self.codec_options, - ReadPreference.PRIMARY, cmd, - self.client._event_listeners) - - def current_op(self, include_all=False, session=None): - """**DEPRECATED**: Get information on operations currently running. - - Starting with MongoDB 3.6 this helper is obsolete. The functionality - provided by this helper is available in MongoDB 3.6+ using the - `$currentOp aggregation pipeline stage`_, which can be used with - :meth:`aggregate`. Note that, while this helper can only return - a single document limited to a 16MB result, :meth:`aggregate` - returns a cursor avoiding that limitation. - - Users of MongoDB versions older than 3.6 can use the `currentOp command`_ - directly:: - - # MongoDB 3.2 and 3.4 - client.admin.command("currentOp") - - Or query the "inprog" virtual collection:: - - # MongoDB 2.6 and 3.0 - client.admin["$cmd.sys.inprog"].find_one() - - :Parameters: - - `include_all` (optional): if ``True`` also list currently - idle operations in the result - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - .. versionchanged:: 3.9 - Deprecated. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. _$currentOp aggregation pipeline stage: https://docs.mongodb.com/manual/reference/operator/aggregation/currentOp/ - .. _currentOp command: https://docs.mongodb.com/manual/reference/command/currentOp/ - """ - warnings.warn("current_op() is deprecated. See the documentation for " - "more information", - DeprecationWarning, stacklevel=2) - return self._current_op(include_all, session) - - def profiling_level(self, session=None): - """Get the database's current profiling level. - - Returns one of (:data:`~pymongo.OFF`, - :data:`~pymongo.SLOW_ONLY`, :data:`~pymongo.ALL`). - - :Parameters: - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. mongodoc:: profiling - """ - result = self.command("profile", -1, session=session) - - assert result["was"] >= 0 and result["was"] <= 2 - return result["was"] - - def set_profiling_level(self, level, slow_ms=None, session=None): - """Set the database's profiling level. - - :Parameters: - - `level`: Specifies a profiling level, see list of possible values - below. - - `slow_ms`: Optionally modify the threshold for the profile to - consider a query or operation. Even if the profiler is off queries - slower than the `slow_ms` level will get written to the logs. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - Possible `level` values: - - +----------------------------+------------------------------------+ - | Level | Setting | - +============================+====================================+ - | :data:`~pymongo.OFF` | Off. No profiling. | - +----------------------------+------------------------------------+ - | :data:`~pymongo.SLOW_ONLY` | On. Only includes slow operations. | - +----------------------------+------------------------------------+ - | :data:`~pymongo.ALL` | On. Includes all operations. | - +----------------------------+------------------------------------+ - - Raises :class:`ValueError` if level is not one of - (:data:`~pymongo.OFF`, :data:`~pymongo.SLOW_ONLY`, - :data:`~pymongo.ALL`). - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. mongodoc:: profiling - """ - if not isinstance(level, int) or level < 0 or level > 2: - raise ValueError("level must be one of (OFF, SLOW_ONLY, ALL)") - - if slow_ms is not None and not isinstance(slow_ms, int): - raise TypeError("slow_ms must be an integer") - - if slow_ms is not None: - self.command("profile", level, slowms=slow_ms, session=session) - else: - self.command("profile", level, session=session) - - def profiling_info(self, session=None): - """Returns a list containing current profiling information. - - :Parameters: - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. mongodoc:: profiling - """ - return list(self["system.profile"].find(session=session)) - - def error(self): - """**DEPRECATED**: Get the error if one occurred on the last operation. - - This method is obsolete: all MongoDB write operations (insert, update, - remove, and so on) use the write concern ``w=1`` and report their - errors by default. - - .. versionchanged:: 2.8 - Deprecated. - """ - warnings.warn("Database.error() is deprecated", - DeprecationWarning, stacklevel=2) - - error = self.command("getlasterror") - error_msg = error.get("err", "") - if error_msg is None: - return None - if error_msg.startswith("not master"): - # Reset primary server and request check, if another thread isn't - # doing so already. - primary = self.__client.primary - if primary: - self.__client._reset_server_and_request_check(primary) - return error - - def last_status(self): - """**DEPRECATED**: Get status information from the last operation. - - This method is obsolete: all MongoDB write operations (insert, update, - remove, and so on) use the write concern ``w=1`` and report their - errors by default. - - Returns a SON object with status information. - - .. versionchanged:: 2.8 - Deprecated. - """ - warnings.warn("last_status() is deprecated", - DeprecationWarning, stacklevel=2) - - return self.command("getlasterror") - - def previous_error(self): - """**DEPRECATED**: Get the most recent error on this database. - - This method is obsolete: all MongoDB write operations (insert, update, - remove, and so on) use the write concern ``w=1`` and report their - errors by default. - - Only returns errors that have occurred since the last call to - :meth:`reset_error_history`. Returns None if no such errors have - occurred. - - .. versionchanged:: 2.8 - Deprecated. - """ - warnings.warn("previous_error() is deprecated", - DeprecationWarning, stacklevel=2) + # See PYTHON-3084. + __iter__ = None - error = self.command("getpreverror") - if error.get("err", 0) is None: - return None - return error - - def reset_error_history(self): - """**DEPRECATED**: Reset the error history of this database. - - This method is obsolete: all MongoDB write operations (insert, update, - remove, and so on) use the write concern ``w=1`` and report their - errors by default. - - Calls to :meth:`previous_error` will only return errors that have - occurred since the most recent call to this method. - - .. versionchanged:: 2.8 - Deprecated. - """ - warnings.warn("reset_error_history() is deprecated", - DeprecationWarning, stacklevel=2) - - self.command("reseterror") - - def __iter__(self): - return self - - def __next__(self): + def __next__(self) -> NoReturn: raise TypeError("'Database' object is not iterable") next = __next__ - def _default_role(self, read_only): - """Return the default user role for this database.""" - if self.name == "admin": - if read_only: - return "readAnyDatabase" - else: - return "root" - else: - if read_only: - return "read" - else: - return "dbOwner" - - def _create_or_update_user( - self, create, name, password, read_only, session=None, **kwargs): - """Use a command to create (if create=True) or modify a user. - """ - opts = {} - if read_only or (create and "roles" not in kwargs): - warnings.warn("Creating a user with the read_only option " - "or without roles is deprecated in MongoDB " - ">= 2.6", DeprecationWarning) - - opts["roles"] = [self._default_role(read_only)] - - if read_only: - warnings.warn("The read_only option is deprecated in MongoDB " - ">= 2.6, use 'roles' instead", DeprecationWarning) - - if password is not None: - if "digestPassword" in kwargs: - raise ConfigurationError("The digestPassword option is not " - "supported via add_user. Please use " - "db.command('createUser', ...) " - "instead for this option.") - opts["pwd"] = password - - # Don't send {} as writeConcern. - if self.write_concern.acknowledged and self.write_concern.document: - opts["writeConcern"] = self.write_concern.document - opts.update(kwargs) - - if create: - command_name = "createUser" - else: - command_name = "updateUser" - - self.command(command_name, name, session=session, **opts) - - def add_user(self, name, password=None, read_only=None, session=None, - **kwargs): - """**DEPRECATED**: Create user `name` with password `password`. - - Add a new user with permissions for this :class:`Database`. - - .. note:: Will change the password if user `name` already exists. - - .. note:: add_user is deprecated and will be removed in PyMongo - 4.0. Starting with MongoDB 2.6 user management is handled with four - database commands, createUser_, usersInfo_, updateUser_, and - dropUser_. - - To create a user:: - - db.command("createUser", "admin", pwd="password", roles=["root"]) - - To create a read-only user:: - - db.command("createUser", "user", pwd="password", roles=["read"]) - - To change a password:: - - db.command("updateUser", "user", pwd="newpassword") - - Or change roles:: - - db.command("updateUser", "user", roles=["readWrite"]) - - .. _createUser: https://docs.mongodb.com/manual/reference/command/createUser/ - .. _usersInfo: https://docs.mongodb.com/manual/reference/command/usersInfo/ - .. _updateUser: https://docs.mongodb.com/manual/reference/command/updateUser/ - .. _dropUser: https://docs.mongodb.com/manual/reference/command/createUser/ - - .. warning:: Never create or modify users over an insecure network without - the use of TLS. See :doc:`/examples/tls` for more information. - - :Parameters: - - `name`: the name of the user to create - - `password` (optional): the password of the user to create. Can not - be used with the ``userSource`` argument. - - `read_only` (optional): if ``True`` the user will be read only - - `**kwargs` (optional): optional fields for the user document - (e.g. ``userSource``, ``otherDBRoles``, or ``roles``). See - ``_ - for more information. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - .. versionchanged:: 3.7 - Added support for SCRAM-SHA-256 users with MongoDB 4.0 and later. - - .. versionchanged:: 3.6 - Added ``session`` parameter. Deprecated add_user. - - .. versionchanged:: 2.5 - Added kwargs support for optional fields introduced in MongoDB 2.4 - - .. versionchanged:: 2.2 - Added support for read only users - """ - warnings.warn("add_user is deprecated and will be removed in PyMongo " - "4.0. Use db.command with createUser or updateUser " - "instead", DeprecationWarning, stacklevel=2) - if not isinstance(name, string_type): - raise TypeError("name must be an " - "instance of %s" % (string_type.__name__,)) - if password is not None: - if not isinstance(password, string_type): - raise TypeError("password must be an " - "instance of %s" % (string_type.__name__,)) - if len(password) == 0: - raise ValueError("password can't be empty") - if read_only is not None: - read_only = common.validate_boolean('read_only', read_only) - if 'roles' in kwargs: - raise ConfigurationError("Can not use " - "read_only and roles together") - - try: - uinfo = self.command("usersInfo", name, session=session) - # Create the user if not found in uinfo, otherwise update one. - self._create_or_update_user( - (not uinfo["users"]), name, password, read_only, - session=session, **kwargs) - except OperationFailure as exc: - # Unauthorized. Attempt to create the user in case of - # localhost exception. - if exc.code == 13: - self._create_or_update_user( - True, name, password, read_only, session=session, **kwargs) - else: - raise - - def remove_user(self, name, session=None): - """**DEPRECATED**: Remove user `name` from this :class:`Database`. - - User `name` will no longer have permissions to access this - :class:`Database`. - - .. note:: remove_user is deprecated and will be removed in PyMongo - 4.0. Use the dropUser command instead:: - - db.command("dropUser", "user") - - :Parameters: - - `name`: the name of the user to remove - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - .. versionchanged:: 3.6 - Added ``session`` parameter. Deprecated remove_user. - """ - warnings.warn("remove_user is deprecated and will be removed in " - "PyMongo 4.0. Use db.command with dropUser " - "instead", DeprecationWarning, stacklevel=2) - cmd = SON([("dropUser", name)]) - # Don't send {} as writeConcern. - if self.write_concern.acknowledged and self.write_concern.document: - cmd["writeConcern"] = self.write_concern.document - self.command(cmd, session=session) - - def authenticate(self, name=None, password=None, - source=None, mechanism='DEFAULT', **kwargs): - """**DEPRECATED**: Authenticate to use this database. - - .. warning:: Starting in MongoDB 3.6, calling :meth:`authenticate` - invalidates all existing cursors. It may also leave logical sessions - open on the server for up to 30 minutes until they time out. - - Authentication lasts for the life of the underlying client - instance, or until :meth:`logout` is called. - - Raises :class:`TypeError` if (required) `name`, (optional) `password`, - or (optional) `source` is not an instance of :class:`basestring` - (:class:`str` in python 3). - - .. note:: - - This method authenticates the current connection, and - will also cause all new :class:`~socket.socket` connections - in the underlying client instance to be authenticated automatically. - - - Authenticating more than once on the same database with different - credentials is not supported. You must call :meth:`logout` before - authenticating with new credentials. - - - When sharing a client instance between multiple threads, all - threads will share the authentication. If you need different - authentication profiles for different purposes you must use - distinct client instances. - - :Parameters: - - `name`: the name of the user to authenticate. Optional when - `mechanism` is MONGODB-X509 and the MongoDB server version is - >= 3.4. - - `password` (optional): the password of the user to authenticate. - Not used with GSSAPI or MONGODB-X509 authentication. - - `source` (optional): the database to authenticate on. If not - specified the current database is used. - - `mechanism` (optional): See :data:`~pymongo.auth.MECHANISMS` for - options. If no mechanism is specified, PyMongo automatically uses - MONGODB-CR when connected to a pre-3.0 version of MongoDB, - SCRAM-SHA-1 when connected to MongoDB 3.0 through 3.6, and - negotiates the mechanism to use (SCRAM-SHA-1 or SCRAM-SHA-256) when - connected to MongoDB 4.0+. - - `authMechanismProperties` (optional): Used to specify - authentication mechanism specific options. To specify the service - name for GSSAPI authentication pass - authMechanismProperties='SERVICE_NAME:' - - .. versionchanged:: 3.7 - Added support for SCRAM-SHA-256 with MongoDB 4.0 and later. - - .. versionchanged:: 3.5 - Deprecated. Authenticating multiple users conflicts with support for - logical sessions in MongoDB 3.6. To authenticate as multiple users, - create multiple instances of MongoClient. - - .. versionadded:: 2.8 - Use SCRAM-SHA-1 with MongoDB 3.0 and later. - - .. versionchanged:: 2.5 - Added the `source` and `mechanism` parameters. :meth:`authenticate` - now raises a subclass of :class:`~pymongo.errors.PyMongoError` if - authentication fails due to invalid credentials or configuration - issues. - - .. mongodoc:: authenticate - """ - if name is not None and not isinstance(name, string_type): - raise TypeError("name must be an " - "instance of %s" % (string_type.__name__,)) - if password is not None and not isinstance(password, string_type): - raise TypeError("password must be an " - "instance of %s" % (string_type.__name__,)) - if source is not None and not isinstance(source, string_type): - raise TypeError("source must be an " - "instance of %s" % (string_type.__name__,)) - common.validate_auth_mechanism('mechanism', mechanism) - - validated_options = {} - for option, value in iteritems(kwargs): - normalized, val = common.validate_auth_option(option, value) - validated_options[normalized] = val - - credentials = auth._build_credentials_tuple( - mechanism, - source, - name, - password, - validated_options, - self.name) - - self.client._cache_credentials( - self.name, - credentials, - connect=True) - - return True - - def logout(self): - """**DEPRECATED**: Deauthorize use of this database. - - .. warning:: Starting in MongoDB 3.6, calling :meth:`logout` - invalidates all existing cursors. It may also leave logical sessions - open on the server for up to 30 minutes until they time out. - """ - warnings.warn("Database.logout() is deprecated", - DeprecationWarning, stacklevel=2) - - # Sockets will be deauthenticated as they are used. - self.client._purge_credentials(self.name) - - def dereference(self, dbref, session=None, **kwargs): + def __bool__(self) -> NoReturn: + raise NotImplementedError( + "Database objects do not implement truth " + "value testing or bool(). Please compare " + "with None instead: database is not None" + ) + + def dereference( + self, + dbref: DBRef, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> Optional[_DocumentType]: """Dereference a :class:`~bson.dbref.DBRef`, getting the document it points to. @@ -1499,92 +1360,25 @@ def dereference(self, dbref, session=None, **kwargs): - `dbref`: the reference - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): any additional keyword arguments are the same as the arguments to :meth:`~pymongo.collection.Collection.find`. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. .. versionchanged:: 3.6 Added ``session`` parameter. """ if not isinstance(dbref, DBRef): raise TypeError("cannot dereference a %s" % type(dbref)) if dbref.database is not None and dbref.database != self.__name: - raise ValueError("trying to dereference a DBRef that points to " - "another database (%r not %r)" % (dbref.database, - self.__name)) + raise ValueError( + "trying to dereference a DBRef that points to " + f"another database ({dbref.database!r} not {self.__name!r})" + ) return self[dbref.collection].find_one( - {"_id": dbref.id}, session=session, **kwargs) - - def eval(self, code, *args): - """**DEPRECATED**: Evaluate a JavaScript expression in MongoDB. - - :Parameters: - - `code`: string representation of JavaScript code to be - evaluated - - `args` (optional): additional positional arguments are - passed to the `code` being evaluated - - .. warning:: the eval command is deprecated in MongoDB 3.0 and - will be removed in a future server version. - """ - warnings.warn("Database.eval() is deprecated", - DeprecationWarning, stacklevel=2) - - if not isinstance(code, Code): - code = Code(code) - - result = self.command("$eval", code, args=args) - return result.get("retval", None) - - def __call__(self, *args, **kwargs): - """This is only here so that some API misusages are easier to debug. - """ - raise TypeError("'Database' object is not callable. If you meant to " - "call the '%s' method on a '%s' object it is " - "failing because no such method exists." % ( - self.__name, self.__client.__class__.__name__)) - - -class SystemJS(object): - """**DEPRECATED**: Helper class for dealing with stored JavaScript. - """ - - def __init__(self, database): - """**DEPRECATED**: Get a system js helper for the database `database`. - - SystemJS will be removed in PyMongo 4.0. - """ - warnings.warn("SystemJS is deprecated", - DeprecationWarning, stacklevel=2) - - if not database.write_concern.acknowledged: - database = database.client.get_database( - database.name, write_concern=DEFAULT_WRITE_CONCERN) - # can't just assign it since we've overridden __setattr__ - object.__setattr__(self, "_db", database) - - def __setattr__(self, name, code): - self._db.system.js.replace_one( - {"_id": name}, {"_id": name, "value": Code(code)}, True) - - def __setitem__(self, name, code): - self.__setattr__(name, code) - - def __delattr__(self, name): - self._db.system.js.delete_one({"_id": name}) - - def __delitem__(self, name): - self.__delattr__(name) - - def __getattr__(self, name): - return lambda *args: self._db.eval(Code("function() { " - "return this[name].apply(" - "this, arguments); }", - scope={'name': name}), *args) - - def __getitem__(self, name): - return self.__getattr__(name) - - def list(self): - """Get a list of the names of the functions stored in this database.""" - return [x["_id"] for x in self._db.system.js.find(projection=["_id"])] + {"_id": dbref.id}, session=session, comment=comment, **kwargs + ) diff --git a/pymongo/driver_info.py b/pymongo/driver_info.py index 1f5235aca7..9e7cfbda33 100644 --- a/pymongo/driver_info.py +++ b/pymongo/driver_info.py @@ -13,13 +13,13 @@ # permissions and limitations under the License. """Advanced options for MongoDB drivers implemented on top of PyMongo.""" +from __future__ import annotations from collections import namedtuple +from typing import Optional -from bson.py3compat import string_type - -class DriverInfo(namedtuple('DriverInfo', ['name', 'version', 'platform'])): +class DriverInfo(namedtuple("DriverInfo", ["name", "version", "platform"])): """Info about a driver wrapping PyMongo. The MongoDB server logs PyMongo's name, version, and platform whenever @@ -28,12 +28,15 @@ class DriverInfo(namedtuple('DriverInfo', ['name', 'version', 'platform'])): like 'MyDriver', '1.2.3', 'some platform info'. Any of these strings may be None to accept PyMongo's default. """ - def __new__(cls, name=None, version=None, platform=None): - self = super(DriverInfo, cls).__new__(cls, name, version, platform) - for name, value in self._asdict().items(): - if value is not None and not isinstance(value, string_type): - raise TypeError("Wrong type for DriverInfo %s option, value " - "must be an instance of %s" % ( - name, string_type.__name__)) + + def __new__( + cls, name: str, version: Optional[str] = None, platform: Optional[str] = None + ) -> DriverInfo: + self = super().__new__(cls, name, version, platform) + for key, value in self._asdict().items(): + if value is not None and not isinstance(value, str): + raise TypeError( + f"Wrong type for DriverInfo {key} option, value must be an instance of str" + ) return self diff --git a/pymongo/encryption.py b/pymongo/encryption.py index f71cd48b6d..cdaf2358d2 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -13,12 +13,24 @@ # limitations under the License. """Support for explicit client-side field level encryption.""" +from __future__ import annotations import contextlib -import os -import subprocess -import uuid +import enum +import socket import weakref +from copy import deepcopy +from typing import ( + TYPE_CHECKING, + Any, + Generic, + Iterator, + Mapping, + MutableMapping, + Optional, + Sequence, + cast, +) try: from pymongocrypt.auto_encrypter import AutoEncrypter @@ -26,48 +38,62 @@ from pymongocrypt.explicit_encrypter import ExplicitEncrypter from pymongocrypt.mongocrypt import MongoCryptOptions from pymongocrypt.state_machine import MongoCryptCallback + _HAVE_PYMONGOCRYPT = True except ImportError: _HAVE_PYMONGOCRYPT = False MongoCryptCallback = object from bson import _dict_to_bson, decode, encode +from bson.binary import STANDARD, UUID_SUBTYPE, Binary from bson.codec_options import CodecOptions -from bson.binary import (Binary, - STANDARD, - UUID_SUBTYPE) from bson.errors import BSONError -from bson.raw_bson import (DEFAULT_RAW_BSON_OPTIONS, - RawBSONDocument, - _inflate_bson) +from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument, _inflate_bson from bson.son import SON - -from pymongo.errors import (ConfigurationError, - EncryptionError, - InvalidOperation, - ServerSelectionTimeoutError) +from pymongo import _csot +from pymongo.collection import Collection +from pymongo.common import CONNECT_TIMEOUT +from pymongo.cursor import Cursor +from pymongo.daemon import _spawn_daemon +from pymongo.database import Database +from pymongo.encryption_options import AutoEncryptionOpts, RangeOpts +from pymongo.errors import ( + ConfigurationError, + EncryptedCollectionError, + EncryptionError, + InvalidOperation, + PyMongoError, + ServerSelectionTimeoutError, +) from pymongo.mongo_client import MongoClient -from pymongo.pool import _configured_socket, PoolOptions +from pymongo.network import BLOCKING_IO_ERRORS +from pymongo.operations import UpdateOne +from pymongo.pool import PoolOptions, _configured_socket, _raise_connection_failure from pymongo.read_concern import ReadConcern +from pymongo.results import BulkWriteResult, DeleteResult from pymongo.ssl_support import get_ssl_context +from pymongo.typings import _DocumentType, _DocumentTypeArg from pymongo.uri_parser import parse_host from pymongo.write_concern import WriteConcern -from pymongo.daemon import _spawn_daemon +if TYPE_CHECKING: + from pymongocrypt.mongocrypt import MongoCryptKmsContext _HTTPS_PORT = 443 -_KMS_CONNECT_TIMEOUT = 10 # TODO: CDRIVER-3262 will define this value. -_MONGOCRYPTD_TIMEOUT_MS = 1000 +_KMS_CONNECT_TIMEOUT = CONNECT_TIMEOUT # CDRIVER-3262 redefined this value to CONNECT_TIMEOUT +_MONGOCRYPTD_TIMEOUT_MS = 10000 + -_DATA_KEY_OPTS = CodecOptions(document_class=SON, uuid_representation=STANDARD) +_DATA_KEY_OPTS: CodecOptions[SON[str, Any]] = CodecOptions( + document_class=SON[str, Any], uuid_representation=STANDARD +) # Use RawBSONDocument codec options to avoid needlessly decoding # documents from the key vault. -_KEY_VAULT_OPTS = CodecOptions(document_class=RawBSONDocument, - uuid_representation=STANDARD) +_KEY_VAULT_OPTS = CodecOptions(document_class=RawBSONDocument) @contextlib.contextmanager -def _wrap_encryption_errors(): +def _wrap_encryption_errors() -> Iterator[None]: """Context manager to wrap encryption related errors.""" try: yield @@ -76,26 +102,37 @@ def _wrap_encryption_errors(): # we should propagate them unchanged. raise except Exception as exc: - raise EncryptionError(exc) + raise EncryptionError(exc) from None -class _EncryptionIO(MongoCryptCallback): - def __init__(self, client, key_vault_coll, mongocryptd_client, opts): +class _EncryptionIO(MongoCryptCallback): # type: ignore[misc] + def __init__( + self, + client: Optional[MongoClient[_DocumentTypeArg]], + key_vault_coll: Collection[_DocumentTypeArg], + mongocryptd_client: Optional[MongoClient[_DocumentTypeArg]], + opts: AutoEncryptionOpts, + ): """Internal class to perform I/O on behalf of pymongocrypt.""" + self.client_ref: Any # Use a weak ref to break reference cycle. if client is not None: self.client_ref = weakref.ref(client) else: self.client_ref = None - self.key_vault_coll = key_vault_coll.with_options( - codec_options=_KEY_VAULT_OPTS, - read_concern=ReadConcern(level='majority'), - write_concern=WriteConcern(w='majority')) + self.key_vault_coll: Optional[Collection[RawBSONDocument]] = cast( + Collection[RawBSONDocument], + key_vault_coll.with_options( + codec_options=_KEY_VAULT_OPTS, + read_concern=ReadConcern(level="majority"), + write_concern=WriteConcern(w="majority"), + ), + ) self.mongocryptd_client = mongocryptd_client self.opts = opts self._spawned = False - def kms_request(self, kms_context): + def kms_request(self, kms_context: MongoCryptKmsContext) -> None: """Complete a KMS request. :Parameters: @@ -106,21 +143,52 @@ def kms_request(self, kms_context): """ endpoint = kms_context.endpoint message = kms_context.message + provider = kms_context.kms_provider + ctx = self.opts._kms_ssl_contexts.get(provider) + if ctx is None: + # Enable strict certificate verification, OCSP, match hostname, and + # SNI using the system default CA certificates. + ctx = get_ssl_context( + None, # certfile + None, # passphrase + None, # ca_certs + None, # crlfile + False, # allow_invalid_certificates + False, # allow_invalid_hostnames + False, + ) # disable_ocsp_endpoint_check + # CSOT: set timeout for socket creation. + connect_timeout = max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0.001) + opts = PoolOptions( + connect_timeout=connect_timeout, + socket_timeout=connect_timeout, + ssl_context=ctx, + ) host, port = parse_host(endpoint, _HTTPS_PORT) - ctx = get_ssl_context(None, None, None, None, None, None, True) - opts = PoolOptions(connect_timeout=_KMS_CONNECT_TIMEOUT, - socket_timeout=_KMS_CONNECT_TIMEOUT, - ssl_context=ctx) - conn = _configured_socket((host, port), opts) try: - conn.sendall(message) - while kms_context.bytes_needed > 0: - data = conn.recv(kms_context.bytes_needed) - kms_context.feed(data) - finally: - conn.close() - - def collection_info(self, database, filter): + conn = _configured_socket((host, port), opts) + try: + conn.sendall(message) + while kms_context.bytes_needed > 0: + # CSOT: update timeout. + conn.settimeout(max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0)) + data = conn.recv(kms_context.bytes_needed) + if not data: + raise OSError("KMS connection closed") + kms_context.feed(data) + except BLOCKING_IO_ERRORS: + raise socket.timeout("timed out") from None + finally: + conn.close() + except (PyMongoError, MongoCryptError): + raise # Propagate pymongo errors directly. + except Exception as error: + # Wrap I/O errors in PyMongo exceptions. + _raise_connection_failure((host, port), error) + + def collection_info( + self, database: Database[Mapping[str, Any]], filter: bytes + ) -> Optional[bytes]: """Get the collection info for a namespace. The returned collection info is passed to libmongocrypt which reads @@ -133,23 +201,23 @@ def collection_info(self, database, filter): :Returns: The first document from the listCollections command response as BSON. """ - with self.client_ref()[database].list_collections( - filter=RawBSONDocument(filter)) as cursor: + with self.client_ref()[database].list_collections(filter=RawBSONDocument(filter)) as cursor: for doc in cursor: return _dict_to_bson(doc, False, _DATA_KEY_OPTS) + return None - def spawn(self): + def spawn(self) -> None: """Spawn mongocryptd. Note this method is thread safe; at most one mongocryptd will start successfully. """ self._spawned = True - args = [self.opts._mongocryptd_spawn_path or 'mongocryptd'] + args = [self.opts._mongocryptd_spawn_path or "mongocryptd"] args.extend(self.opts._mongocryptd_spawn_args) _spawn_daemon(args) - def mark_command(self, database, cmd): + def mark_command(self, database: str, cmd: bytes) -> bytes: """Mark a command for encryption. :Parameters: @@ -164,20 +232,21 @@ def mark_command(self, database, cmd): # Database.command only supports mutable mappings so we need to decode # the raw BSON command first. inflated_cmd = _inflate_bson(cmd, DEFAULT_RAW_BSON_OPTIONS) + assert self.mongocryptd_client is not None try: res = self.mongocryptd_client[database].command( - inflated_cmd, - codec_options=DEFAULT_RAW_BSON_OPTIONS) + inflated_cmd, codec_options=DEFAULT_RAW_BSON_OPTIONS + ) except ServerSelectionTimeoutError: if self.opts._mongocryptd_bypass_spawn: raise self.spawn() res = self.mongocryptd_client[database].command( - inflated_cmd, - codec_options=DEFAULT_RAW_BSON_OPTIONS) + inflated_cmd, codec_options=DEFAULT_RAW_BSON_OPTIONS + ) return res.raw - def fetch_keys(self, filter): + def fetch_keys(self, filter: bytes) -> Iterator[bytes]: """Yields one or more keys from the key vault. :Parameters: @@ -186,11 +255,12 @@ def fetch_keys(self, filter): :Returns: A generator which yields the requested keys from the key vault. """ + assert self.key_vault_coll is not None with self.key_vault_coll.find(RawBSONDocument(filter)) as cursor: for key in cursor: yield key.raw - def insert_data_key(self, data_key): + def insert_data_key(self, data_key: bytes) -> Binary: """Insert a data key into the key vault. :Parameters: @@ -199,15 +269,16 @@ def insert_data_key(self, data_key): :Returns: The _id of the inserted data key document. """ - raw_doc = RawBSONDocument(data_key) - data_key_id = raw_doc.get('_id') - if not isinstance(data_key_id, uuid.UUID): - raise TypeError('data_key _id must be a UUID') + raw_doc = RawBSONDocument(data_key, _KEY_VAULT_OPTS) + data_key_id = raw_doc.get("_id") + if not isinstance(data_key_id, Binary) or data_key_id.subtype != UUID_SUBTYPE: + raise TypeError("data_key _id must be Binary with a UUID subtype") + assert self.key_vault_coll is not None self.key_vault_coll.insert_one(raw_doc) - return Binary(data_key_id.bytes, subtype=UUID_SUBTYPE) + return data_key_id - def bson_encode(self, doc): + def bson_encode(self, doc: MutableMapping[str, Any]) -> bytes: """Encode a document to BSON. A document can be any mapping type (like :class:`dict`). @@ -220,7 +291,7 @@ def bson_encode(self, doc): """ return encode(doc) - def close(self): + def close(self) -> None: """Release resources. Note it is not safe to call this method from __del__ or any GC hooks. @@ -232,53 +303,123 @@ def close(self): self.mongocryptd_client = None -class _Encrypter(object): - def __init__(self, io_callbacks, opts): - """Encrypts and decrypts MongoDB commands. +class RewrapManyDataKeyResult: + """Result object returned by a :meth:`~ClientEncryption.rewrap_many_data_key` operation. + + .. versionadded:: 4.2 + """ + + def __init__(self, bulk_write_result: Optional[BulkWriteResult] = None) -> None: + self._bulk_write_result = bulk_write_result + + @property + def bulk_write_result(self) -> Optional[BulkWriteResult]: + """The result of the bulk write operation used to update the key vault + collection with one or more rewrapped data keys. If + :meth:`~ClientEncryption.rewrap_many_data_key` does not find any matching keys to rewrap, + no bulk write operation will be executed and this field will be + ``None``. + """ + return self._bulk_write_result + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self._bulk_write_result!r})" + + +class _Encrypter: + """Encrypts and decrypts MongoDB commands. - This class is used to support automatic encryption and decryption of - MongoDB commands. + This class is used to support automatic encryption and decryption of + MongoDB commands. + """ + + def __init__(self, client: MongoClient[_DocumentTypeArg], opts: AutoEncryptionOpts): + """Create a _Encrypter for a client. :Parameters: - - `io_callbacks`: A :class:`MongoCryptCallback`. + - `client`: The encrypted MongoClient. - `opts`: The encrypted client's :class:`AutoEncryptionOpts`. """ if opts._schema_map is None: schema_map = None else: schema_map = _dict_to_bson(opts._schema_map, False, _DATA_KEY_OPTS) - self._auto_encrypter = AutoEncrypter(io_callbacks, MongoCryptOptions( - opts._kms_providers, schema_map)) + + if opts._encrypted_fields_map is None: + encrypted_fields_map = None + else: + encrypted_fields_map = _dict_to_bson(opts._encrypted_fields_map, False, _DATA_KEY_OPTS) self._bypass_auto_encryption = opts._bypass_auto_encryption + self._internal_client = None + + def _get_internal_client( + encrypter: _Encrypter, mongo_client: MongoClient[_DocumentTypeArg] + ) -> MongoClient[_DocumentTypeArg]: + if mongo_client.options.pool_options.max_pool_size is None: + # Unlimited pool size, use the same client. + return mongo_client + # Else - limited pool size, use an internal client. + if encrypter._internal_client is not None: + return encrypter._internal_client + internal_client = mongo_client._duplicate(minPoolSize=0, auto_encryption_opts=None) + encrypter._internal_client = internal_client + return internal_client + + if opts._key_vault_client is not None: + key_vault_client = opts._key_vault_client + else: + key_vault_client = _get_internal_client(self, client) + + if opts._bypass_auto_encryption: + metadata_client = None + else: + metadata_client = _get_internal_client(self, client) + + db, coll = opts._key_vault_namespace.split(".", 1) + key_vault_coll = key_vault_client[db][coll] + + mongocryptd_client: MongoClient[Mapping[str, Any]] = MongoClient( + opts._mongocryptd_uri, connect=False, serverSelectionTimeoutMS=_MONGOCRYPTD_TIMEOUT_MS + ) + + io_callbacks = _EncryptionIO( + metadata_client, key_vault_coll, mongocryptd_client, opts + ) # type:ignore[misc] + self._auto_encrypter = AutoEncrypter( + io_callbacks, + MongoCryptOptions( + opts._kms_providers, + schema_map, + crypt_shared_lib_path=opts._crypt_shared_lib_path, + crypt_shared_lib_required=opts._crypt_shared_lib_required, + bypass_encryption=opts._bypass_auto_encryption, + encrypted_fields_map=encrypted_fields_map, + bypass_query_analysis=opts._bypass_query_analysis, + ), + ) self._closed = False - def encrypt(self, database, cmd, check_keys, codec_options): + def encrypt( + self, database: str, cmd: Mapping[str, Any], codec_options: CodecOptions[_DocumentTypeArg] + ) -> MutableMapping[str, Any]: """Encrypt a MongoDB command. :Parameters: - `database`: The database for this command. - `cmd`: A command document. - - `check_keys`: If True, check `cmd` for invalid keys. - `codec_options`: The CodecOptions to use while encoding `cmd`. :Returns: The encrypted command to execute. """ self._check_closed() - # Workaround for $clusterTime which is incompatible with - # check_keys. - cluster_time = check_keys and cmd.pop('$clusterTime', None) - encoded_cmd = _dict_to_bson(cmd, check_keys, codec_options) + encoded_cmd = _dict_to_bson(cmd, False, codec_options) with _wrap_encryption_errors(): encrypted_cmd = self._auto_encrypter.encrypt(database, encoded_cmd) # TODO: PYTHON-1922 avoid decoding the encrypted_cmd. - encrypt_cmd = _inflate_bson( - encrypted_cmd, DEFAULT_RAW_BSON_OPTIONS) - if cluster_time: - encrypt_cmd['$clusterTime'] = cluster_time - return encrypt_cmd + return _inflate_bson(encrypted_cmd, DEFAULT_RAW_BSON_OPTIONS) - def decrypt(self, response): + def decrypt(self, response: bytes) -> Optional[bytes]: """Decrypt a MongoDB command response. :Parameters: @@ -289,54 +430,76 @@ def decrypt(self, response): """ self._check_closed() with _wrap_encryption_errors(): - return self._auto_encrypter.decrypt(response) + return cast(bytes, self._auto_encrypter.decrypt(response)) - def _check_closed(self): + def _check_closed(self) -> None: if self._closed: raise InvalidOperation("Cannot use MongoClient after close") - def close(self): + def close(self) -> None: """Cleanup resources.""" self._closed = True self._auto_encrypter.close() + if self._internal_client: + self._internal_client.close() + self._internal_client = None - @staticmethod - def create(client, opts): - """Create a _CommandEncyptor for a client. - :Parameters: - - `client`: The encrypted MongoClient. - - `opts`: The encrypted client's :class:`AutoEncryptionOpts`. +class Algorithm(str, enum.Enum): + """An enum that defines the supported encryption algorithms.""" - :Returns: - A :class:`_CommandEncrypter` for this client. - """ - key_vault_client = opts._key_vault_client or client - db, coll = opts._key_vault_namespace.split('.', 1) - key_vault_coll = key_vault_client[db][coll] + AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic = "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + """AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic.""" + AEAD_AES_256_CBC_HMAC_SHA_512_Random = "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + """AEAD_AES_256_CBC_HMAC_SHA_512_Random.""" + INDEXED = "Indexed" + """Indexed. - mongocryptd_client = MongoClient( - opts._mongocryptd_uri, connect=False, - serverSelectionTimeoutMS=_MONGOCRYPTD_TIMEOUT_MS) + .. versionadded:: 4.2 + """ + UNINDEXED = "Unindexed" + """Unindexed. - io_callbacks = _EncryptionIO( - client, key_vault_coll, mongocryptd_client, opts) - return _Encrypter(io_callbacks, opts) + .. versionadded:: 4.2 + """ + RANGEPREVIEW = "RangePreview" + """RangePreview. + .. note:: Support for Range queries is in beta. + Backwards-breaking changes may be made before the final release. -class Algorithm(object): - """An enum that defines the supported encryption algorithms.""" - AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic = ( - "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic") - AEAD_AES_256_CBC_HMAC_SHA_512_Random = ( - "AEAD_AES_256_CBC_HMAC_SHA_512-Random") + .. versionadded:: 4.4 + """ + + +class QueryType(str, enum.Enum): + """An enum that defines the supported values for explicit encryption query_type. + + .. versionadded:: 4.2 + """ + EQUALITY = "equality" + """Used to encrypt a value for an equality query.""" -class ClientEncryption(object): + RANGEPREVIEW = "rangePreview" + """Used to encrypt a value for a range query. + + .. note:: Support for Range queries is in beta. + Backwards-breaking changes may be made before the final release. +""" + + +class ClientEncryption(Generic[_DocumentType]): """Explicit client-side field level encryption.""" - def __init__(self, kms_providers, key_vault_namespace, key_vault_client, - codec_options): + def __init__( + self, + kms_providers: Mapping[str, Any], + key_vault_namespace: str, + key_vault_client: MongoClient[_DocumentTypeArg], + codec_options: CodecOptions[_DocumentTypeArg], + kms_tls_options: Optional[Mapping[str, Any]] = None, + ) -> None: """Explicit client-side field level encryption. The ClientEncryption class encapsulates explicit operations on a key @@ -351,16 +514,30 @@ def __init__(self, kms_providers, key_vault_namespace, key_vault_client, See :ref:`explicit-client-side-encryption` for an example. :Parameters: - - `kms_providers`: Map of KMS provider options. Two KMS providers - are supported: "aws" and "local". The kmsProviders map values - differ by provider: + - `kms_providers`: Map of KMS provider options. The `kms_providers` + map values differ by provider: - `aws`: Map with "accessKeyId" and "secretAccessKey" as strings. These are the AWS access key ID and AWS secret access key used - to generate KMS messages. - - `local`: Map with "key" as a 96-byte array or string. "key" - is the master key used to encrypt/decrypt data keys. This key - should be generated and stored as securely as possible. + to generate KMS messages. An optional "sessionToken" may be + included to support temporary AWS credentials. + - `azure`: Map with "tenantId", "clientId", and "clientSecret" as + strings. Additionally, "identityPlatformEndpoint" may also be + specified as a string (defaults to 'login.microsoftonline.com'). + These are the Azure Active Directory credentials used to + generate Azure Key Vault messages. + - `gcp`: Map with "email" as a string and "privateKey" + as `bytes` or a base64 encoded string. + Additionally, "endpoint" may also be specified as a string + (defaults to 'oauth2.googleapis.com'). These are the + credentials used to generate Google Cloud KMS messages. + - `kmip`: Map with "endpoint" as a host with required port. + For example: ``{"endpoint": "example.com:443"}``. + - `local`: Map with "key" as `bytes` (96 bytes in length) or + a base64 encoded string which decodes + to 96 bytes. "key" is the master key used to encrypt/decrypt + data keys. This key should be generated and stored as securely + as possible. - `key_vault_namespace`: The namespace for the key vault collection. The key vault collection contains all data keys used for encryption @@ -375,6 +552,20 @@ def __init__(self, kms_providers, key_vault_namespace, key_vault_client, should be the same CodecOptions instance configured on the MongoClient, Database, or Collection used to access application data. + - `kms_tls_options` (optional): A map of KMS provider names to TLS + options to use when creating secure connections to KMS providers. + Accepts the same TLS options as + :class:`pymongo.mongo_client.MongoClient`. For example, to + override the system default CA file:: + + kms_tls_options={'kmip': {'tlsCAFile': certifi.where()}} + + Or to supply a client certificate:: + + kms_tls_options={'kmip': {'tlsCertificateKeyFile': 'client.pem'}} + + .. versionchanged:: 4.0 + Added the `kms_tls_options` parameter and the "kmip" KMS provider. .. versionadded:: 3.9 """ @@ -382,35 +573,135 @@ def __init__(self, kms_providers, key_vault_namespace, key_vault_client, raise ConfigurationError( "client-side field level encryption requires the pymongocrypt " "library: install a compatible version with: " - "python -m pip install 'pymongo[encryption]'") + "python -m pip install 'pymongo[encryption]'" + ) if not isinstance(codec_options, CodecOptions): - raise TypeError("codec_options must be an instance of " - "bson.codec_options.CodecOptions") + raise TypeError("codec_options must be an instance of bson.codec_options.CodecOptions") self._kms_providers = kms_providers self._key_vault_namespace = key_vault_namespace self._key_vault_client = key_vault_client self._codec_options = codec_options - db, coll = key_vault_namespace.split('.', 1) + db, coll = key_vault_namespace.split(".", 1) key_vault_coll = key_vault_client[db][coll] - self._io_callbacks = _EncryptionIO(None, key_vault_coll, None, None) + opts = AutoEncryptionOpts( + kms_providers, key_vault_namespace, kms_tls_options=kms_tls_options + ) + self._io_callbacks: Optional[_EncryptionIO] = _EncryptionIO( + None, key_vault_coll, None, opts + ) self._encryption = ExplicitEncrypter( - self._io_callbacks, MongoCryptOptions(kms_providers, None)) + self._io_callbacks, MongoCryptOptions(kms_providers, None) + ) + # Use the same key vault collection as the callback. + assert self._io_callbacks.key_vault_coll is not None + self._key_vault_coll = self._io_callbacks.key_vault_coll + + def create_encrypted_collection( + self, + database: Database[_DocumentTypeArg], + name: str, + encrypted_fields: Mapping[str, Any], + kms_provider: Optional[str] = None, + master_key: Optional[Mapping[str, Any]] = None, + **kwargs: Any, + ) -> tuple[Collection[_DocumentTypeArg], Mapping[str, Any]]: + """Create a collection with encryptedFields. + + .. warning:: + This function does not update the encryptedFieldsMap in the client's + AutoEncryptionOpts, thus the user must create a new client after calling this function with + the encryptedFields returned. + + Normally collection creation is automatic. This method should + only be used to specify options on + creation. :class:`~pymongo.errors.EncryptionError` will be + raised if the collection already exists. + + :Parameters: + - `name`: the name of the collection to create + - `encrypted_fields` (dict): Document that describes the encrypted fields for + Queryable Encryption. For example:: + + { + "escCollection": "enxcol_.encryptedCollection.esc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), + "bsonType": "string", + "queries": {"queryType": "equality"} + }, + { + "path": "ssn", + "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), + "bsonType": "string" + } + ] + } + + The "keyId" may be set to ``None`` to auto-generate the data keys. + - `kms_provider` (optional): the KMS provider to be used + - `master_key` (optional): Identifies a KMS-specific key used to encrypt the + new data key. If the kmsProvider is "local" the `master_key` is + not applicable and may be omitted. + - `**kwargs` (optional): additional keyword arguments are the same as "create_collection". + + All optional `create collection command`_ parameters should be passed + as keyword arguments to this method. + See the documentation for :meth:`~pymongo.database.Database.create_collection` for all valid options. + + :Raises: + - :class:`~pymongo.errors.EncryptedCollectionError`: When either data-key creation or creating the collection fails. + + .. versionadded:: 4.4 + + .. _create collection command: + https://mongodb.com/docs/manual/reference/command/create - def create_data_key(self, kms_provider, master_key=None, - key_alt_names=None): + """ + encrypted_fields = deepcopy(encrypted_fields) + for i, field in enumerate(encrypted_fields["fields"]): + if isinstance(field, dict) and field.get("keyId") is None: + try: + encrypted_fields["fields"][i]["keyId"] = self.create_data_key( + kms_provider=kms_provider, # type:ignore[arg-type] + master_key=master_key, + ) + except EncryptionError as exc: + raise EncryptedCollectionError(exc, encrypted_fields) from exc + kwargs["encryptedFields"] = encrypted_fields + kwargs["check_exists"] = False + try: + return ( + database.create_collection(name=name, **kwargs), + encrypted_fields, + ) + except Exception as exc: + raise EncryptedCollectionError(exc, encrypted_fields) from exc + + def create_data_key( + self, + kms_provider: str, + master_key: Optional[Mapping[str, Any]] = None, + key_alt_names: Optional[Sequence[str]] = None, + key_material: Optional[bytes] = None, + ) -> Binary: """Create and insert a new data key into the key vault collection. :Parameters: - `kms_provider`: The KMS provider to use. Supported values are - "aws" and "local". + "aws", "azure", "gcp", "kmip", and "local". - `master_key`: Identifies a KMS-specific key used to encrypt the new data key. If the kmsProvider is "local" the `master_key` is - not applicable and may be omitted. If the `kms_provider` is "aws" - it is required and has the following fields:: + not applicable and may be omitted. + + If the `kms_provider` is "aws" it is required and has the + following fields:: - `region` (string): Required. The AWS region, e.g. "us-east-1". - `key` (string): Required. The Amazon Resource Name (ARN) to @@ -419,29 +710,119 @@ def create_data_key(self, kms_provider, master_key=None, requests to. May include port number, e.g. "kms.us-east-1.amazonaws.com:443". + If the `kms_provider` is "azure" it is required and has the + following fields:: + + - `keyVaultEndpoint` (string): Required. Host with optional + port, e.g. "example.vault.azure.net". + - `keyName` (string): Required. Key name in the key vault. + - `keyVersion` (string): Optional. Version of the key to use. + + If the `kms_provider` is "gcp" it is required and has the + following fields:: + + - `projectId` (string): Required. The Google cloud project ID. + - `location` (string): Required. The GCP location, e.g. "us-east1". + - `keyRing` (string): Required. Name of the key ring that contains + the key to use. + - `keyName` (string): Required. Name of the key to use. + - `keyVersion` (string): Optional. Version of the key to use. + - `endpoint` (string): Optional. Host with optional port. + Defaults to "cloudkms.googleapis.com". + + If the `kms_provider` is "kmip" it is optional and has the + following fields:: + + - `keyId` (string): Optional. `keyId` is the KMIP Unique + Identifier to a 96 byte KMIP Secret Data managed object. If + keyId is omitted, the driver creates a random 96 byte KMIP + Secret Data managed object. + - `endpoint` (string): Optional. Host with optional + port, e.g. "example.vault.azure.net:". + - `key_alt_names` (optional): An optional list of string alternate names used to reference a key. If a key is created with alternate names, then encryption may refer to the key by the unique alternate name instead of by ``key_id``. The following example shows creating and referring to a data key by alternate name:: - client_encryption.create_data_key("local", keyAltNames=["name1"]) + client_encryption.create_data_key("local", key_alt_names=["name1"]) # reference the key with the alternate name - client_encryption.encrypt("457-55-5462", keyAltName="name1", - algorithm=Algorithm.Random) + client_encryption.encrypt("457-55-5462", key_alt_name="name1", + algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random) + - `key_material` (optional): Sets the custom key material to be used + by the data key for encryption and decryption. :Returns: The ``_id`` of the created data key document as a :class:`~bson.binary.Binary` with subtype :data:`~bson.binary.UUID_SUBTYPE`. + + .. versionchanged:: 4.2 + Added the `key_material` parameter. """ self._check_closed() with _wrap_encryption_errors(): - return self._encryption.create_data_key( - kms_provider, master_key=master_key, - key_alt_names=key_alt_names) - - def encrypt(self, value, algorithm, key_id=None, key_alt_name=None): + return cast( + Binary, + self._encryption.create_data_key( + kms_provider, + master_key=master_key, + key_alt_names=key_alt_names, + key_material=key_material, + ), + ) + + def _encrypt_helper( + self, + value: Any, + algorithm: str, + key_id: Optional[Binary] = None, + key_alt_name: Optional[str] = None, + query_type: Optional[str] = None, + contention_factor: Optional[int] = None, + range_opts: Optional[RangeOpts] = None, + is_expression: bool = False, + ) -> Any: + self._check_closed() + if key_id is not None and not ( + isinstance(key_id, Binary) and key_id.subtype == UUID_SUBTYPE + ): + raise TypeError("key_id must be a bson.binary.Binary with subtype 4") + + doc = encode( + {"v": value}, + codec_options=self._codec_options, + ) + range_opts_bytes = None + if range_opts: + range_opts_bytes = encode( + range_opts.document, + codec_options=self._codec_options, + ) + with _wrap_encryption_errors(): + encrypted_doc = self._encryption.encrypt( + value=doc, + algorithm=algorithm, + key_id=key_id, + key_alt_name=key_alt_name, + query_type=query_type, + contention_factor=contention_factor, + range_opts=range_opts_bytes, + is_expression=is_expression, + ) + return decode(encrypted_doc)["v"] + + def encrypt( + self, + value: Any, + algorithm: str, + key_id: Optional[Binary] = None, + key_alt_name: Optional[str] = None, + query_type: Optional[str] = None, + contention_factor: Optional[int] = None, + range_opts: Optional[RangeOpts] = None, + ) -> Binary: """Encrypt a BSON value with a given key and algorithm. Note that exactly one of ``key_id`` or ``key_alt_name`` must be @@ -455,24 +836,84 @@ def encrypt(self, value, algorithm, key_id=None, key_alt_name=None): :class:`~bson.binary.Binary` with subtype 4 ( :attr:`~bson.binary.UUID_SUBTYPE`). - `key_alt_name`: Identifies a key vault document by 'keyAltName'. + - `query_type` (str): The query type to execute. See :class:`QueryType` for valid options. + - `contention_factor` (int): The contention factor to use + when the algorithm is :attr:`Algorithm.INDEXED`. An integer value + *must* be given when the :attr:`Algorithm.INDEXED` algorithm is + used. + - `range_opts`: Experimental only, not intended for public use. :Returns: The encrypted value, a :class:`~bson.binary.Binary` with subtype 6. + + .. versionchanged:: 4.2 + Added the `query_type` and `contention_factor` parameters. """ - self._check_closed() - if (key_id is not None and not ( - isinstance(key_id, Binary) and - key_id.subtype == UUID_SUBTYPE)): - raise TypeError( - 'key_id must be a bson.binary.Binary with subtype 4') + return cast( + Binary, + self._encrypt_helper( + value=value, + algorithm=algorithm, + key_id=key_id, + key_alt_name=key_alt_name, + query_type=query_type, + contention_factor=contention_factor, + range_opts=range_opts, + is_expression=False, + ), + ) + + def encrypt_expression( + self, + expression: Mapping[str, Any], + algorithm: str, + key_id: Optional[Binary] = None, + key_alt_name: Optional[str] = None, + query_type: Optional[str] = None, + contention_factor: Optional[int] = None, + range_opts: Optional[RangeOpts] = None, + ) -> RawBSONDocument: + """Encrypt a BSON expression with a given key and algorithm. - doc = encode({'v': value}, codec_options=self._codec_options) - with _wrap_encryption_errors(): - encrypted_doc = self._encryption.encrypt( - doc, algorithm, key_id=key_id, key_alt_name=key_alt_name) - return decode(encrypted_doc)['v'] + Note that exactly one of ``key_id`` or ``key_alt_name`` must be + provided. + + :Parameters: + - `expression`: The BSON aggregate or match expression to encrypt. + - `algorithm` (string): The encryption algorithm to use. See + :class:`Algorithm` for some valid options. + - `key_id`: Identifies a data key by ``_id`` which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + - `key_alt_name`: Identifies a key vault document by 'keyAltName'. + - `query_type` (str): The query type to execute. See + :class:`QueryType` for valid options. + - `contention_factor` (int): The contention factor to use + when the algorithm is :attr:`Algorithm.INDEXED`. An integer value + *must* be given when the :attr:`Algorithm.INDEXED` algorithm is + used. + - `range_opts`: Experimental only, not intended for public use. + + :Returns: + The encrypted expression, a :class:`~bson.RawBSONDocument`. - def decrypt(self, value): + .. versionadded:: 4.4 + """ + return cast( + RawBSONDocument, + self._encrypt_helper( + value=expression, + algorithm=algorithm, + key_id=key_id, + key_alt_name=key_alt_name, + query_type=query_type, + contention_factor=contention_factor, + range_opts=range_opts, + is_expression=True, + ), + ) + + def decrypt(self, value: Binary) -> Any: """Decrypt an encrypted value. :Parameters: @@ -484,26 +925,203 @@ def decrypt(self, value): """ self._check_closed() if not (isinstance(value, Binary) and value.subtype == 6): - raise TypeError( - 'value to decrypt must be a bson.binary.Binary with subtype 6') + raise TypeError("value to decrypt must be a bson.binary.Binary with subtype 6") with _wrap_encryption_errors(): - doc = encode({'v': value}) + doc = encode({"v": value}) decrypted_doc = self._encryption.decrypt(doc) - return decode(decrypted_doc, - codec_options=self._codec_options)['v'] + return decode(decrypted_doc, codec_options=self._codec_options)["v"] + + def get_key(self, id: Binary) -> Optional[RawBSONDocument]: + """Get a data key by id. + + :Parameters: + - `id` (Binary): The UUID of a key a which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + + :Returns: + The key document. + + .. versionadded:: 4.2 + """ + self._check_closed() + assert self._key_vault_coll is not None + return self._key_vault_coll.find_one({"_id": id}) + + def get_keys(self) -> Cursor[RawBSONDocument]: + """Get all of the data keys. + + :Returns: + An instance of :class:`~pymongo.cursor.Cursor` over the data key + documents. + + .. versionadded:: 4.2 + """ + self._check_closed() + assert self._key_vault_coll is not None + return self._key_vault_coll.find({}) + + def delete_key(self, id: Binary) -> DeleteResult: + """Delete a key document in the key vault collection that has the given ``key_id``. + + :Parameters: + - `id` (Binary): The UUID of a key a which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + + :Returns: + The delete result. + + .. versionadded:: 4.2 + """ + self._check_closed() + assert self._key_vault_coll is not None + return self._key_vault_coll.delete_one({"_id": id}) - def __enter__(self): + def add_key_alt_name(self, id: Binary, key_alt_name: str) -> Any: + """Add ``key_alt_name`` to the set of alternate names in the key document with UUID ``key_id``. + + :Parameters: + - ``id``: The UUID of a key a which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + - ``key_alt_name``: The key alternate name to add. + + :Returns: + The previous version of the key document. + + .. versionadded:: 4.2 + """ + self._check_closed() + update = {"$addToSet": {"keyAltNames": key_alt_name}} + assert self._key_vault_coll is not None + return self._key_vault_coll.find_one_and_update({"_id": id}, update) + + def get_key_by_alt_name(self, key_alt_name: str) -> Optional[RawBSONDocument]: + """Get a key document in the key vault collection that has the given ``key_alt_name``. + + :Parameters: + - `key_alt_name`: (str): The key alternate name of the key to get. + + :Returns: + The key document. + + .. versionadded:: 4.2 + """ + self._check_closed() + assert self._key_vault_coll is not None + return self._key_vault_coll.find_one({"keyAltNames": key_alt_name}) + + def remove_key_alt_name(self, id: Binary, key_alt_name: str) -> Optional[RawBSONDocument]: + """Remove ``key_alt_name`` from the set of keyAltNames in the key document with UUID ``id``. + + Also removes the ``keyAltNames`` field from the key document if it would otherwise be empty. + + :Parameters: + - ``id``: The UUID of a key a which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + - ``key_alt_name``: The key alternate name to remove. + + :Returns: + Returns the previous version of the key document. + + .. versionadded:: 4.2 + """ + self._check_closed() + pipeline = [ + { + "$set": { + "keyAltNames": { + "$cond": [ + {"$eq": ["$keyAltNames", [key_alt_name]]}, + "$$REMOVE", + { + "$filter": { + "input": "$keyAltNames", + "cond": {"$ne": ["$$this", key_alt_name]}, + } + }, + ] + } + } + } + ] + assert self._key_vault_coll is not None + return self._key_vault_coll.find_one_and_update({"_id": id}, pipeline) + + def rewrap_many_data_key( + self, + filter: Mapping[str, Any], + provider: Optional[str] = None, + master_key: Optional[Mapping[str, Any]] = None, + ) -> RewrapManyDataKeyResult: + """Decrypts and encrypts all matching data keys in the key vault with a possibly new `master_key` value. + + :Parameters: + - `filter`: A document used to filter the data keys. + - `provider`: The new KMS provider to use to encrypt the data keys, + or ``None`` to use the current KMS provider(s). + - ``master_key``: The master key fields corresponding to the new KMS + provider when ``provider`` is not ``None``. + + :Returns: + A :class:`RewrapManyDataKeyResult`. + + This method allows you to re-encrypt all of your data-keys with a new CMK, or master key. + Note that this does *not* require re-encrypting any of the data in your encrypted collections, + but rather refreshes the key that protects the keys that encrypt the data: + + .. code-block:: python + + client_encryption.rewrap_many_data_key( + filter={"keyAltNames": "optional filter for which keys you want to update"}, + master_key={ + "provider": "azure", # replace with your cloud provider + "master_key": { + # put the rest of your master_key options here + "key": "" + }, + }, + ) + + .. versionadded:: 4.2 + """ + if master_key is not None and provider is None: + raise ConfigurationError("A provider must be given if a master_key is given") + self._check_closed() + with _wrap_encryption_errors(): + raw_result = self._encryption.rewrap_many_data_key(filter, provider, master_key) + if raw_result is None: + return RewrapManyDataKeyResult() + + raw_doc = RawBSONDocument(raw_result, DEFAULT_RAW_BSON_OPTIONS) + replacements = [] + for key in raw_doc["v"]: + update_model = { + "$set": {"keyMaterial": key["keyMaterial"], "masterKey": key["masterKey"]}, + "$currentDate": {"updateDate": True}, + } + op = UpdateOne({"_id": key["_id"]}, update_model) + replacements.append(op) + if not replacements: + return RewrapManyDataKeyResult() + assert self._key_vault_coll is not None + result = self._key_vault_coll.bulk_write(replacements) + return RewrapManyDataKeyResult(result) + + def __enter__(self) -> ClientEncryption[_DocumentType]: return self - def __exit__(self, exc_type, exc_val, exc_tb): + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: self.close() - def _check_closed(self): + def _check_closed(self) -> None: if self._encryption is None: raise InvalidOperation("Cannot use closed ClientEncryption") - def close(self): + def close(self) -> None: """Release resources. Note that using this class in a with-statement will automatically call diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index 3158b3e84d..61480467a3 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -13,28 +13,46 @@ # limitations under the License. """Support for automatic client-side field level encryption.""" +from __future__ import annotations -import copy +from typing import TYPE_CHECKING, Any, Mapping, Optional try: - import pymongocrypt + import pymongocrypt # noqa: F401 + _HAVE_PYMONGOCRYPT = True except ImportError: _HAVE_PYMONGOCRYPT = False - +from bson import int64 +from pymongo.common import validate_is_mapping from pymongo.errors import ConfigurationError +from pymongo.uri_parser import _parse_kms_tls_options +if TYPE_CHECKING: + from pymongo.mongo_client import MongoClient + from pymongo.typings import _DocumentTypeArg -class AutoEncryptionOpts(object): + +class AutoEncryptionOpts: """Options to configure automatic client-side field level encryption.""" - def __init__(self, kms_providers, key_vault_namespace, - key_vault_client=None, schema_map=None, - bypass_auto_encryption=False, - mongocryptd_uri='mongodb://localhost:27020', - mongocryptd_bypass_spawn=False, - mongocryptd_spawn_path='mongocryptd', - mongocryptd_spawn_args=None): + def __init__( + self, + kms_providers: Mapping[str, Any], + key_vault_namespace: str, + key_vault_client: Optional[MongoClient[_DocumentTypeArg]] = None, + schema_map: Optional[Mapping[str, Any]] = None, + bypass_auto_encryption: bool = False, + mongocryptd_uri: str = "mongodb://localhost:27020", + mongocryptd_bypass_spawn: bool = False, + mongocryptd_spawn_path: str = "mongocryptd", + mongocryptd_spawn_args: Optional[list[str]] = None, + kms_tls_options: Optional[Mapping[str, Any]] = None, + crypt_shared_lib_path: Optional[str] = None, + crypt_shared_lib_required: bool = False, + bypass_query_analysis: bool = False, + encrypted_fields_map: Optional[Mapping[str, Any]] = None, + ) -> None: """Options to configure automatic client-side field level encryption. Automatic client-side field level encryption requires MongoDB 4.2 @@ -52,16 +70,30 @@ def __init__(self, kms_providers, key_vault_namespace, See :ref:`automatic-client-side-encryption` for an example. :Parameters: - - `kms_providers`: Map of KMS provider options. Two KMS providers - are supported: "aws" and "local". The kmsProviders map values - differ by provider: + - `kms_providers`: Map of KMS provider options. The `kms_providers` + map values differ by provider: - `aws`: Map with "accessKeyId" and "secretAccessKey" as strings. These are the AWS access key ID and AWS secret access key used - to generate KMS messages. - - `local`: Map with "key" as a 96-byte array or string. "key" - is the master key used to encrypt/decrypt data keys. This key - should be generated and stored as securely as possible. + to generate KMS messages. An optional "sessionToken" may be + included to support temporary AWS credentials. + - `azure`: Map with "tenantId", "clientId", and "clientSecret" as + strings. Additionally, "identityPlatformEndpoint" may also be + specified as a string (defaults to 'login.microsoftonline.com'). + These are the Azure Active Directory credentials used to + generate Azure Key Vault messages. + - `gcp`: Map with "email" as a string and "privateKey" + as `bytes` or a base64 encoded string. + Additionally, "endpoint" may also be specified as a string + (defaults to 'oauth2.googleapis.com'). These are the + credentials used to generate Google Cloud KMS messages. + - `kmip`: Map with "endpoint" as a host with required port. + For example: ``{"endpoint": "example.com:443"}``. + - `local`: Map with "key" as `bytes` (96 bytes in length) or + a base64 encoded string which decodes + to 96 bytes. "key" is the master key used to encrypt/decrypt + data keys. This key should be generated and stored as securely + as possible. - `key_vault_namespace`: The namespace for the key vault collection. The key vault collection contains all data keys used for encryption @@ -104,6 +136,53 @@ def __init__(self, kms_providers, key_vault_namespace, ``['--idleShutdownTimeoutSecs=60']``. If the list does not include the ``idleShutdownTimeoutSecs`` option then ``'--idleShutdownTimeoutSecs=60'`` will be added. + - `kms_tls_options` (optional): A map of KMS provider names to TLS + options to use when creating secure connections to KMS providers. + Accepts the same TLS options as + :class:`pymongo.mongo_client.MongoClient`. For example, to + override the system default CA file:: + + kms_tls_options={'kmip': {'tlsCAFile': certifi.where()}} + + Or to supply a client certificate:: + + kms_tls_options={'kmip': {'tlsCertificateKeyFile': 'client.pem'}} + - `crypt_shared_lib_path` (optional): Override the path to load the crypt_shared library. + - `crypt_shared_lib_required` (optional): If True, raise an error if libmongocrypt is + unable to load the crypt_shared library. + - `bypass_query_analysis` (optional): If ``True``, disable automatic analysis + of outgoing commands. Set `bypass_query_analysis` to use explicit + encryption on indexed fields without the MongoDB Enterprise Advanced + licensed crypt_shared library. + - `encrypted_fields_map`: Map of collection namespace ("db.coll") to documents + that described the encrypted fields for Queryable Encryption. For example:: + + { + "db.encryptedCollection": { + "escCollection": "enxcol_.encryptedCollection.esc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), + "bsonType": "string", + "queries": {"queryType": "equality"} + }, + { + "path": "ssn", + "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), + "bsonType": "string" + } + ] + } + } + + .. versionchanged:: 4.2 + Added `encrypted_fields_map` `crypt_shared_lib_path`, `crypt_shared_lib_required`, + and `bypass_query_analysis` parameters. + + .. versionchanged:: 4.0 + Added the `kms_tls_options` parameter and the "kmip" KMS provider. .. versionadded:: 3.9 """ @@ -111,8 +190,14 @@ def __init__(self, kms_providers, key_vault_namespace, raise ConfigurationError( "client side encryption requires the pymongocrypt library: " "install a compatible version with: " - "python -m pip install 'pymongo[encryption]'") - + "python -m pip install 'pymongo[encryption]'" + ) + if encrypted_fields_map: + validate_is_mapping("encrypted_fields_map", encrypted_fields_map) + self._encrypted_fields_map = encrypted_fields_map + self._bypass_query_analysis = bypass_query_analysis + self._crypt_shared_lib_path = crypt_shared_lib_path + self._crypt_shared_lib_required = crypt_shared_lib_required self._kms_providers = kms_providers self._key_vault_namespace = key_vault_namespace self._key_vault_client = key_vault_client @@ -121,21 +206,54 @@ def __init__(self, kms_providers, key_vault_namespace, self._mongocryptd_uri = mongocryptd_uri self._mongocryptd_bypass_spawn = mongocryptd_bypass_spawn self._mongocryptd_spawn_path = mongocryptd_spawn_path - self._mongocryptd_spawn_args = (copy.copy(mongocryptd_spawn_args) or - ['--idleShutdownTimeoutSecs=60']) + if mongocryptd_spawn_args is None: + mongocryptd_spawn_args = ["--idleShutdownTimeoutSecs=60"] + self._mongocryptd_spawn_args = mongocryptd_spawn_args if not isinstance(self._mongocryptd_spawn_args, list): - raise TypeError('mongocryptd_spawn_args must be a list') - if not any('idleShutdownTimeoutSecs' in s - for s in self._mongocryptd_spawn_args): - self._mongocryptd_spawn_args.append('--idleShutdownTimeoutSecs=60') + raise TypeError("mongocryptd_spawn_args must be a list") + if not any("idleShutdownTimeoutSecs" in s for s in self._mongocryptd_spawn_args): + self._mongocryptd_spawn_args.append("--idleShutdownTimeoutSecs=60") + # Maps KMS provider name to a SSLContext. + self._kms_ssl_contexts = _parse_kms_tls_options(kms_tls_options) + self._bypass_query_analysis = bypass_query_analysis + + +class RangeOpts: + """Options to configure encrypted queries using the rangePreview algorithm.""" + def __init__( + self, + sparsity: int, + min: Optional[Any] = None, + max: Optional[Any] = None, + precision: Optional[int] = None, + ) -> None: + """Options to configure encrypted queries using the rangePreview algorithm. -def validate_auto_encryption_opts_or_none(option, value): - """Validate the driver keyword arg.""" - if value is None: - return value - if not isinstance(value, AutoEncryptionOpts): - raise TypeError("%s must be an instance of AutoEncryptionOpts" % ( - option,)) + .. note:: This feature is experimental only, and not intended for public use. + + :Parameters: + - `sparsity`: An integer. + - `min`: A BSON scalar value corresponding to the type being queried. + - `max`: A BSON scalar value corresponding to the type being queried. + - `precision`: An integer, may only be set for double or decimal128 types. + + .. versionadded:: 4.4 + """ + self.min = min + self.max = max + self.sparsity = sparsity + self.precision = precision - return value + @property + def document(self) -> dict[str, Any]: + doc = {} + for k, v in [ + ("sparsity", int64.Int64(self.sparsity)), + ("precision", self.precision), + ("min", self.min), + ("max", self.max), + ]: + if v is not None: + doc[k] = v + return doc diff --git a/pymongo/errors.py b/pymongo/errors.py index 2d9fd05029..3e11c1f697 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -13,49 +13,57 @@ # limitations under the License. """Exceptions raised by PyMongo.""" +from __future__ import annotations -import sys +from typing import TYPE_CHECKING, Any, Iterable, Mapping, Optional, Sequence, Union -from bson.errors import * +from bson.errors import InvalidDocument + +if TYPE_CHECKING: + from pymongo.typings import _DocumentOut try: - from ssl import CertificateError + # CPython 3.7+ + from ssl import SSLCertVerificationError as _CertificateError except ImportError: - from pymongo.ssl_match_hostname import CertificateError + try: + from ssl import CertificateError as _CertificateError + except ImportError: + + class _CertificateError(ValueError): # type: ignore + pass class PyMongoError(Exception): """Base class for all PyMongo exceptions.""" - def __init__(self, message='', error_labels=None): - super(PyMongoError, self).__init__(message) + + def __init__(self, message: str = "", error_labels: Optional[Iterable[str]] = None) -> None: + super().__init__(message) self._message = message self._error_labels = set(error_labels or []) - def has_error_label(self, label): + def has_error_label(self, label: str) -> bool: """Return True if this error contains the given label. .. versionadded:: 3.7 """ return label in self._error_labels - def _add_error_label(self, label): + def _add_error_label(self, label: str) -> None: """Add the given label to this error.""" self._error_labels.add(label) - def _remove_error_label(self, label): + def _remove_error_label(self, label: str) -> None: """Remove the given label from this error.""" - self._error_labels.remove(label) + self._error_labels.discard(label) - if sys.version_info[0] == 2: - def __str__(self): - if isinstance(self._message, unicode): - return self._message.encode('utf-8', errors='replace') - return str(self._message) + @property + def timeout(self) -> bool: + """True if this error was caused by a timeout. - def __unicode__(self): - if isinstance(self._message, unicode): - return self._message - return unicode(self._message, 'utf-8', errors='replace') + .. versionadded:: 4.2 + """ + return False class ProtocolError(PyMongoError): @@ -64,12 +72,19 @@ class ProtocolError(PyMongoError): class ConnectionFailure(PyMongoError): """Raised when a connection to the database cannot be made or is lost.""" - def __init__(self, message='', error_labels=None): - if error_labels is None: - # Connection errors are transient errors by default. - error_labels = ("TransientTransactionError",) - super(ConnectionFailure, self).__init__( - message, error_labels=error_labels) + + +class WaitQueueTimeoutError(ConnectionFailure): + """Raised when an operation times out waiting to checkout a connection from the pool. + + Subclass of :exc:`~pymongo.errors.ConnectionFailure`. + + .. versionadded:: 4.2 + """ + + @property + def timeout(self) -> bool: + return True class AutoReconnect(ConnectionFailure): @@ -84,8 +99,18 @@ class AutoReconnect(ConnectionFailure): Subclass of :exc:`~pymongo.errors.ConnectionFailure`. """ - def __init__(self, message='', errors=None): - super(AutoReconnect, self).__init__(message) + + errors: Union[Mapping[str, Any], Sequence[Any]] + details: Union[Mapping[str, Any], Sequence[Any]] + + def __init__( + self, message: str = "", errors: Optional[Union[Mapping[str, Any], Sequence[Any]]] = None + ) -> None: + error_labels = None + if errors is not None: + if isinstance(errors, dict): + error_labels = errors.get("errorLabels") + super().__init__(message, error_labels) self.errors = self.details = errors or [] @@ -98,9 +123,21 @@ class NetworkTimeout(AutoReconnect): Subclass of :exc:`~pymongo.errors.AutoReconnect`. """ + @property + def timeout(self) -> bool: + return True + + +def _format_detailed_error( + message: str, details: Optional[Union[Mapping[str, Any], list[Any]]] +) -> str: + if details is not None: + message = f"{message}, full error: {details}" + return message -class NotMasterError(AutoReconnect): - """The server responded "not master" or "node is recovering". + +class NotPrimaryError(AutoReconnect): + """The server responded "not primary" or "node is recovering". These errors result from a query, write, or command. The operation failed because the client thought it was using the primary but the primary has @@ -111,8 +148,15 @@ class NotMasterError(AutoReconnect): its view of the server as soon as possible after throwing this exception. Subclass of :exc:`~pymongo.errors.AutoReconnect`. + + .. versionadded:: 3.12 """ + def __init__( + self, message: str = "", errors: Optional[Union[Mapping[str, Any], list[Any]]] = None + ) -> None: + super().__init__(_format_detailed_error(message, errors), errors=errors) + class ServerSelectionTimeoutError(AutoReconnect): """Thrown when no MongoDB server is available for an operation @@ -126,10 +170,13 @@ class ServerSelectionTimeoutError(AutoReconnect): Preference that the replica set cannot satisfy. """ + @property + def timeout(self) -> bool: + return True + class ConfigurationError(PyMongoError): - """Raised when something is incorrectly configured. - """ + """Raised when something is incorrectly configured.""" class OperationFailure(PyMongoError): @@ -139,23 +186,32 @@ class OperationFailure(PyMongoError): The :attr:`details` attribute. """ - def __init__(self, error, code=None, details=None): + def __init__( + self, + error: str, + code: Optional[int] = None, + details: Optional[Mapping[str, Any]] = None, + max_wire_version: Optional[int] = None, + ) -> None: error_labels = None if details is not None: - error_labels = details.get('errorLabels') - super(OperationFailure, self).__init__( - error, error_labels=error_labels) + error_labels = details.get("errorLabels") + super().__init__(_format_detailed_error(error, details), error_labels=error_labels) self.__code = code self.__details = details + self.__max_wire_version = max_wire_version @property - def code(self): - """The error code returned by the server, if any. - """ + def _max_wire_version(self) -> Optional[int]: + return self.__max_wire_version + + @property + def code(self) -> Optional[int]: + """The error code returned by the server, if any.""" return self.__code @property - def details(self): + def details(self) -> Optional[Mapping[str, Any]]: """The complete error document returned by the server. Depending on the error that occurred, the error document @@ -166,6 +222,10 @@ def details(self): """ return self.__details + @property + def timeout(self) -> bool: + return self.__code in (50,) + class CursorNotFound(OperationFailure): """Raised while iterating query results if the cursor is @@ -184,6 +244,10 @@ class ExecutionTimeout(OperationFailure): .. versionadded:: 2.7 """ + @property + def timeout(self) -> bool: + return True + class WriteConcernError(OperationFailure): """Base exception type for errors raised due to write concern. @@ -209,19 +273,46 @@ class WTimeoutError(WriteConcernError): .. versionadded:: 2.7 """ + @property + def timeout(self) -> bool: + return True + class DuplicateKeyError(WriteError): """Raised when an insert or update fails due to a duplicate key error.""" +def _wtimeout_error(error: Any) -> bool: + """Return True if this writeConcernError doc is a caused by a timeout.""" + return error.get("code") == 50 or ("errInfo" in error and error["errInfo"].get("wtimeout")) + + class BulkWriteError(OperationFailure): """Exception class for bulk write errors. .. versionadded:: 2.7 """ - def __init__(self, results): - super(BulkWriteError, self).__init__( - "batch op errors occurred", 65, results) + + details: _DocumentOut + + def __init__(self, results: _DocumentOut) -> None: + super().__init__("batch op errors occurred", 65, results) + + def __reduce__(self) -> tuple[Any, Any]: + return self.__class__, (self.details,) + + @property + def timeout(self) -> bool: + # Check the last writeConcernError and last writeError to determine if this + # BulkWriteError was caused by a timeout. + wces = self.details.get("writeConcernErrors", []) + if wces and _wtimeout_error(wces[-1]): + return True + + werrs = self.details.get("writeErrors", []) + if werrs and werrs[-1].get("code") == 50: + return True + return False class InvalidOperation(PyMongoError): @@ -240,19 +331,8 @@ class InvalidURI(ConfigurationError): """Raised when trying to parse an invalid mongodb URI.""" -class ExceededMaxWaiters(PyMongoError): - """Raised when a thread tries to get a connection from a pool and - ``maxPoolSize * waitQueueMultiple`` threads are already waiting. - - .. versionadded:: 2.6 - """ - pass - - class DocumentTooLarge(InvalidDocument): - """Raised when an encoded document is too large for the connected server. - """ - pass + """Raised when an encoded document is too large for the connected server.""" class EncryptionError(PyMongoError): @@ -264,11 +344,43 @@ class EncryptionError(PyMongoError): .. versionadded:: 3.9 """ - def __init__(self, cause): - super(EncryptionError, self).__init__(str(cause)) + def __init__(self, cause: Exception) -> None: + super().__init__(str(cause)) self.__cause = cause @property - def cause(self): + def cause(self) -> Exception: """The exception that caused this encryption or decryption error.""" return self.__cause + + @property + def timeout(self) -> bool: + if isinstance(self.__cause, PyMongoError): + return self.__cause.timeout + return False + + +class EncryptedCollectionError(EncryptionError): + """Raised when creating a collection with encrypted_fields fails. + + .. versionadded:: 4.4 + """ + + def __init__(self, cause: Exception, encrypted_fields: Mapping[str, Any]) -> None: + super().__init__(cause) + self.__encrypted_fields = encrypted_fields + + @property + def encrypted_fields(self) -> Mapping[str, Any]: + """The encrypted_fields document that allows inferring which data keys are *known* to be created. + + Note that the returned document is not guaranteed to contain information about *all* of the data keys that + were created, for example in the case of an indefinite error like a timeout. Use the `cause` property to + determine whether a definite or indefinite error caused this error, and only rely on the accuracy of the + encrypted_fields if the error is definite. + """ + return self.__encrypted_fields + + +class _OperationCancelled(AutoReconnect): + """Internal error raised when a socket operation is cancelled.""" diff --git a/pymongo/event_loggers.py b/pymongo/event_loggers.py new file mode 100644 index 0000000000..287db3fc4d --- /dev/null +++ b/pymongo/event_loggers.py @@ -0,0 +1,223 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Example event logger classes. + +.. versionadded:: 3.11 + +These loggers can be registered using :func:`register` or +:class:`~pymongo.mongo_client.MongoClient`. + +``monitoring.register(CommandLogger())`` + +or + +``MongoClient(event_listeners=[CommandLogger()])`` +""" +from __future__ import annotations + +import logging + +from pymongo import monitoring + + +class CommandLogger(monitoring.CommandListener): + """A simple listener that logs command events. + + Listens for :class:`~pymongo.monitoring.CommandStartedEvent`, + :class:`~pymongo.monitoring.CommandSucceededEvent` and + :class:`~pymongo.monitoring.CommandFailedEvent` events and + logs them at the `INFO` severity level using :mod:`logging`. + .. versionadded:: 3.11 + """ + + def started(self, event: monitoring.CommandStartedEvent) -> None: + logging.info( + f"Command {event.command_name} with request id " + f"{event.request_id} started on server " + f"{event.connection_id}" + ) + + def succeeded(self, event: monitoring.CommandSucceededEvent) -> None: + logging.info( + f"Command {event.command_name} with request id " + f"{event.request_id} on server {event.connection_id} " + f"succeeded in {event.duration_micros} " + "microseconds" + ) + + def failed(self, event: monitoring.CommandFailedEvent) -> None: + logging.info( + f"Command {event.command_name} with request id " + f"{event.request_id} on server {event.connection_id} " + f"failed in {event.duration_micros} " + "microseconds" + ) + + +class ServerLogger(monitoring.ServerListener): + """A simple listener that logs server discovery events. + + Listens for :class:`~pymongo.monitoring.ServerOpeningEvent`, + :class:`~pymongo.monitoring.ServerDescriptionChangedEvent`, + and :class:`~pymongo.monitoring.ServerClosedEvent` + events and logs them at the `INFO` severity level using :mod:`logging`. + + .. versionadded:: 3.11 + """ + + def opened(self, event: monitoring.ServerOpeningEvent) -> None: + logging.info(f"Server {event.server_address} added to topology {event.topology_id}") + + def description_changed(self, event: monitoring.ServerDescriptionChangedEvent) -> None: + previous_server_type = event.previous_description.server_type + new_server_type = event.new_description.server_type + if new_server_type != previous_server_type: + # server_type_name was added in PyMongo 3.4 + logging.info( + f"Server {event.server_address} changed type from " + f"{event.previous_description.server_type_name} to " + f"{event.new_description.server_type_name}" + ) + + def closed(self, event: monitoring.ServerClosedEvent) -> None: + logging.warning(f"Server {event.server_address} removed from topology {event.topology_id}") + + +class HeartbeatLogger(monitoring.ServerHeartbeatListener): + """A simple listener that logs server heartbeat events. + + Listens for :class:`~pymongo.monitoring.ServerHeartbeatStartedEvent`, + :class:`~pymongo.monitoring.ServerHeartbeatSucceededEvent`, + and :class:`~pymongo.monitoring.ServerHeartbeatFailedEvent` + events and logs them at the `INFO` severity level using :mod:`logging`. + + .. versionadded:: 3.11 + """ + + def started(self, event: monitoring.ServerHeartbeatStartedEvent) -> None: + logging.info(f"Heartbeat sent to server {event.connection_id}") + + def succeeded(self, event: monitoring.ServerHeartbeatSucceededEvent) -> None: + # The reply.document attribute was added in PyMongo 3.4. + logging.info( + f"Heartbeat to server {event.connection_id} " + "succeeded with reply " + f"{event.reply.document}" + ) + + def failed(self, event: monitoring.ServerHeartbeatFailedEvent) -> None: + logging.warning( + f"Heartbeat to server {event.connection_id} failed with error {event.reply}" + ) + + +class TopologyLogger(monitoring.TopologyListener): + """A simple listener that logs server topology events. + + Listens for :class:`~pymongo.monitoring.TopologyOpenedEvent`, + :class:`~pymongo.monitoring.TopologyDescriptionChangedEvent`, + and :class:`~pymongo.monitoring.TopologyClosedEvent` + events and logs them at the `INFO` severity level using :mod:`logging`. + + .. versionadded:: 3.11 + """ + + def opened(self, event: monitoring.TopologyOpenedEvent) -> None: + logging.info(f"Topology with id {event.topology_id} opened") + + def description_changed(self, event: monitoring.TopologyDescriptionChangedEvent) -> None: + logging.info(f"Topology description updated for topology id {event.topology_id}") + previous_topology_type = event.previous_description.topology_type + new_topology_type = event.new_description.topology_type + if new_topology_type != previous_topology_type: + # topology_type_name was added in PyMongo 3.4 + logging.info( + f"Topology {event.topology_id} changed type from " + f"{event.previous_description.topology_type_name} to " + f"{event.new_description.topology_type_name}" + ) + # The has_writable_server and has_readable_server methods + # were added in PyMongo 3.4. + if not event.new_description.has_writable_server(): + logging.warning("No writable servers available.") + if not event.new_description.has_readable_server(): + logging.warning("No readable servers available.") + + def closed(self, event: monitoring.TopologyClosedEvent) -> None: + logging.info(f"Topology with id {event.topology_id} closed") + + +class ConnectionPoolLogger(monitoring.ConnectionPoolListener): + """A simple listener that logs server connection pool events. + + Listens for :class:`~pymongo.monitoring.PoolCreatedEvent`, + :class:`~pymongo.monitoring.PoolClearedEvent`, + :class:`~pymongo.monitoring.PoolClosedEvent`, + :~pymongo.monitoring.class:`ConnectionCreatedEvent`, + :class:`~pymongo.monitoring.ConnectionReadyEvent`, + :class:`~pymongo.monitoring.ConnectionClosedEvent`, + :class:`~pymongo.monitoring.ConnectionCheckOutStartedEvent`, + :class:`~pymongo.monitoring.ConnectionCheckOutFailedEvent`, + :class:`~pymongo.monitoring.ConnectionCheckedOutEvent`, + and :class:`~pymongo.monitoring.ConnectionCheckedInEvent` + events and logs them at the `INFO` severity level using :mod:`logging`. + + .. versionadded:: 3.11 + """ + + def pool_created(self, event: monitoring.PoolCreatedEvent) -> None: + logging.info(f"[pool {event.address}] pool created") + + def pool_ready(self, event: monitoring.PoolReadyEvent) -> None: + logging.info(f"[pool {event.address}] pool ready") + + def pool_cleared(self, event: monitoring.PoolClearedEvent) -> None: + logging.info(f"[pool {event.address}] pool cleared") + + def pool_closed(self, event: monitoring.PoolClosedEvent) -> None: + logging.info(f"[pool {event.address}] pool closed") + + def connection_created(self, event: monitoring.ConnectionCreatedEvent) -> None: + logging.info(f"[pool {event.address}][conn #{event.connection_id}] connection created") + + def connection_ready(self, event: monitoring.ConnectionReadyEvent) -> None: + logging.info( + f"[pool {event.address}][conn #{event.connection_id}] connection setup succeeded" + ) + + def connection_closed(self, event: monitoring.ConnectionClosedEvent) -> None: + logging.info( + f"[pool {event.address}][conn #{event.connection_id}] " + f'connection closed, reason: "{event.reason}"' + ) + + def connection_check_out_started( + self, event: monitoring.ConnectionCheckOutStartedEvent + ) -> None: + logging.info(f"[pool {event.address}] connection check out started") + + def connection_check_out_failed(self, event: monitoring.ConnectionCheckOutFailedEvent) -> None: + logging.info(f"[pool {event.address}] connection check out failed, reason: {event.reason}") + + def connection_checked_out(self, event: monitoring.ConnectionCheckedOutEvent) -> None: + logging.info( + f"[pool {event.address}][conn #{event.connection_id}] connection checked out of pool" + ) + + def connection_checked_in(self, event: monitoring.ConnectionCheckedInEvent) -> None: + logging.info( + f"[pool {event.address}][conn #{event.connection_id}] connection checked into pool" + ) diff --git a/pymongo/hello.py b/pymongo/hello.py new file mode 100644 index 0000000000..d38c285ab7 --- /dev/null +++ b/pymongo/hello.py @@ -0,0 +1,220 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Helpers for the 'hello' and legacy hello commands.""" +from __future__ import annotations + +import copy +import datetime +import itertools +from typing import Any, Generic, Mapping, Optional + +from bson.objectid import ObjectId +from pymongo import common +from pymongo.server_type import SERVER_TYPE +from pymongo.typings import ClusterTime, _DocumentType + + +class HelloCompat: + CMD = "hello" + LEGACY_CMD = "ismaster" + PRIMARY = "isWritablePrimary" + LEGACY_PRIMARY = "ismaster" + LEGACY_ERROR = "not master" + + +def _get_server_type(doc: Mapping[str, Any]) -> int: + """Determine the server type from a hello response.""" + if not doc.get("ok"): + return SERVER_TYPE.Unknown + + if doc.get("serviceId"): + return SERVER_TYPE.LoadBalancer + elif doc.get("isreplicaset"): + return SERVER_TYPE.RSGhost + elif doc.get("setName"): + if doc.get("hidden"): + return SERVER_TYPE.RSOther + elif doc.get(HelloCompat.PRIMARY): + return SERVER_TYPE.RSPrimary + elif doc.get(HelloCompat.LEGACY_PRIMARY): + return SERVER_TYPE.RSPrimary + elif doc.get("secondary"): + return SERVER_TYPE.RSSecondary + elif doc.get("arbiterOnly"): + return SERVER_TYPE.RSArbiter + else: + return SERVER_TYPE.RSOther + elif doc.get("msg") == "isdbgrid": + return SERVER_TYPE.Mongos + else: + return SERVER_TYPE.Standalone + + +class Hello(Generic[_DocumentType]): + """Parse a hello response from the server. + + .. versionadded:: 3.12 + """ + + __slots__ = ("_doc", "_server_type", "_is_writable", "_is_readable", "_awaitable") + + def __init__(self, doc: _DocumentType, awaitable: bool = False) -> None: + self._server_type = _get_server_type(doc) + self._doc: _DocumentType = doc + self._is_writable = self._server_type in ( + SERVER_TYPE.RSPrimary, + SERVER_TYPE.Standalone, + SERVER_TYPE.Mongos, + SERVER_TYPE.LoadBalancer, + ) + + self._is_readable = self.server_type == SERVER_TYPE.RSSecondary or self._is_writable + self._awaitable = awaitable + + @property + def document(self) -> _DocumentType: + """The complete hello command response document. + + .. versionadded:: 3.4 + """ + return copy.copy(self._doc) + + @property + def server_type(self) -> int: + return self._server_type + + @property + def all_hosts(self) -> set[tuple[str, int]]: + """List of hosts, passives, and arbiters known to this server.""" + return set( + map( + common.clean_node, + itertools.chain( + self._doc.get("hosts", []), + self._doc.get("passives", []), + self._doc.get("arbiters", []), + ), + ) + ) + + @property + def tags(self) -> Mapping[str, Any]: + """Replica set member tags or empty dict.""" + return self._doc.get("tags", {}) + + @property + def primary(self) -> Optional[tuple[str, int]]: + """This server's opinion about who the primary is, or None.""" + if self._doc.get("primary"): + return common.partition_node(self._doc["primary"]) + else: + return None + + @property + def replica_set_name(self) -> Optional[str]: + """Replica set name or None.""" + return self._doc.get("setName") + + @property + def max_bson_size(self) -> int: + return self._doc.get("maxBsonObjectSize", common.MAX_BSON_SIZE) + + @property + def max_message_size(self) -> int: + return self._doc.get("maxMessageSizeBytes", 2 * self.max_bson_size) + + @property + def max_write_batch_size(self) -> int: + return self._doc.get("maxWriteBatchSize", common.MAX_WRITE_BATCH_SIZE) + + @property + def min_wire_version(self) -> int: + return self._doc.get("minWireVersion", common.MIN_WIRE_VERSION) + + @property + def max_wire_version(self) -> int: + return self._doc.get("maxWireVersion", common.MAX_WIRE_VERSION) + + @property + def set_version(self) -> Optional[int]: + return self._doc.get("setVersion") + + @property + def election_id(self) -> Optional[ObjectId]: + return self._doc.get("electionId") + + @property + def cluster_time(self) -> Optional[ClusterTime]: + return self._doc.get("$clusterTime") + + @property + def logical_session_timeout_minutes(self) -> Optional[int]: + return self._doc.get("logicalSessionTimeoutMinutes") + + @property + def is_writable(self) -> bool: + return self._is_writable + + @property + def is_readable(self) -> bool: + return self._is_readable + + @property + def me(self) -> Optional[tuple[str, int]]: + me = self._doc.get("me") + if me: + return common.clean_node(me) + return None + + @property + def last_write_date(self) -> Optional[datetime.datetime]: + return self._doc.get("lastWrite", {}).get("lastWriteDate") + + @property + def compressors(self) -> Optional[list[str]]: + return self._doc.get("compression") + + @property + def sasl_supported_mechs(self) -> list[str]: + """Supported authentication mechanisms for the current user. + + For example:: + + >>> hello.sasl_supported_mechs + ["SCRAM-SHA-1", "SCRAM-SHA-256"] + + """ + return self._doc.get("saslSupportedMechs", []) + + @property + def speculative_authenticate(self) -> Optional[Mapping[str, Any]]: + """The speculativeAuthenticate field.""" + return self._doc.get("speculativeAuthenticate") + + @property + def topology_version(self) -> Optional[Mapping[str, Any]]: + return self._doc.get("topologyVersion") + + @property + def awaitable(self) -> bool: + return self._awaitable + + @property + def service_id(self) -> Optional[ObjectId]: + return self._doc.get("serviceId") + + @property + def hello_ok(self) -> bool: + return self._doc.get("helloOk", False) diff --git a/pymongo/helpers.py b/pymongo/helpers.py index 8bfc62c6be..cd7d434b08 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -13,188 +13,224 @@ # limitations under the License. """Bits and pieces used by the driver that don't really fit elsewhere.""" +from __future__ import annotations import sys import traceback +from collections import abc +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Container, + Iterable, + Mapping, + NoReturn, + Optional, + Sequence, + TypeVar, + Union, + cast, +) -from bson.py3compat import abc, iteritems, itervalues, string_type from bson.son import SON from pymongo import ASCENDING -from pymongo.errors import (CursorNotFound, - DuplicateKeyError, - ExecutionTimeout, - NotMasterError, - OperationFailure, - WriteError, - WriteConcernError, - WTimeoutError) +from pymongo.errors import ( + CursorNotFound, + DuplicateKeyError, + ExecutionTimeout, + NotPrimaryError, + OperationFailure, + WriteConcernError, + WriteError, + WTimeoutError, + _wtimeout_error, +) +from pymongo.hello import HelloCompat + +if TYPE_CHECKING: + from pymongo.cursor import _Hint + from pymongo.operations import _IndexList + from pymongo.typings import _DocumentOut # From the SDAM spec, the "node is shutting down" codes. -_SHUTDOWN_CODES = frozenset([ - 11600, # InterruptedAtShutdown - 91, # ShutdownInProgress -]) -# From the SDAM spec, the "not master" error codes are combined with the +_SHUTDOWN_CODES: frozenset = frozenset( + [ + 11600, # InterruptedAtShutdown + 91, # ShutdownInProgress + ] +) +# From the SDAM spec, the "not primary" error codes are combined with the # "node is recovering" error codes (of which the "node is shutting down" # errors are a subset). -_NOT_MASTER_CODES = frozenset([ - 10107, # NotMaster - 13435, # NotMasterNoSlaveOk - 11602, # InterruptedDueToReplStateChange - 13436, # NotMasterOrSecondary - 189, # PrimarySteppedDown -]) | _SHUTDOWN_CODES +_NOT_PRIMARY_CODES: frozenset = ( + frozenset( + [ + 10058, # LegacyNotPrimary <=3.2 "not primary" error code + 10107, # NotWritablePrimary + 13435, # NotPrimaryNoSecondaryOk + 11602, # InterruptedDueToReplStateChange + 13436, # NotPrimaryOrSecondary + 189, # PrimarySteppedDown + ] + ) + | _SHUTDOWN_CODES +) # From the retryable writes spec. -_RETRYABLE_ERROR_CODES = _NOT_MASTER_CODES | frozenset([ - 7, # HostNotFound - 6, # HostUnreachable - 89, # NetworkTimeout - 9001, # SocketException -]) -_UUNDER = u"_" +_RETRYABLE_ERROR_CODES: frozenset = _NOT_PRIMARY_CODES | frozenset( + [ + 7, # HostNotFound + 6, # HostUnreachable + 89, # NetworkTimeout + 9001, # SocketException + 262, # ExceededTimeLimit + ] +) +# Server code raised when re-authentication is required +_REAUTHENTICATION_REQUIRED_CODE: int = 391 -def _gen_index_name(keys): + +def _gen_index_name(keys: _IndexList) -> str: """Generate an index name from the set of fields it is over.""" - return _UUNDER.join(["%s_%s" % item for item in keys]) + return "_".join(["{}_{}".format(*item) for item in keys]) -def _index_list(key_or_list, direction=None): +def _index_list( + key_or_list: _Hint, direction: Optional[Union[int, str]] = None +) -> Sequence[tuple[str, Union[int, str, Mapping[str, Any]]]]: """Helper to generate a list of (key, direction) pairs. Takes such a list, or a single key, or a single key and direction. """ if direction is not None: + if not isinstance(key_or_list, str): + raise TypeError("Expected a string and a direction") return [(key_or_list, direction)] else: - if isinstance(key_or_list, string_type): + if isinstance(key_or_list, str): return [(key_or_list, ASCENDING)] + elif isinstance(key_or_list, abc.ItemsView): + return list(key_or_list) # type: ignore[arg-type] + elif isinstance(key_or_list, abc.Mapping): + return list(key_or_list.items()) elif not isinstance(key_or_list, (list, tuple)): - raise TypeError("if no direction is specified, " - "key_or_list must be an instance of list") - return key_or_list + raise TypeError("if no direction is specified, key_or_list must be an instance of list") + values: list[tuple[str, int]] = [] + for item in key_or_list: + if isinstance(item, str): + item = (item, ASCENDING) # noqa: PLW2901 + values.append(item) + return values -def _index_document(index_list): +def _index_document(index_list: _IndexList) -> SON[str, Any]: """Helper to generate an index specifying document. Takes a list of (key, direction) pairs. """ - if isinstance(index_list, abc.Mapping): - raise TypeError("passing a dict to sort/create_index/hint is not " - "allowed - use a list of tuples instead. did you " - "mean %r?" % list(iteritems(index_list))) - elif not isinstance(index_list, (list, tuple)): - raise TypeError("must use a list of (key, direction) pairs, " - "not: " + repr(index_list)) + if not isinstance(index_list, (list, tuple, abc.Mapping)): + raise TypeError( + "must use a dictionary or a list of (key, direction) pairs, not: " + repr(index_list) + ) if not len(index_list): - raise ValueError("key_or_list must not be the empty list") - - index = SON() - for (key, value) in index_list: - if not isinstance(key, string_type): - raise TypeError("first item in each key pair must be a string") - if not isinstance(value, (string_type, int, abc.Mapping)): - raise TypeError("second item in each key pair must be 1, -1, " - "'2d', 'geoHaystack', or another valid MongoDB " - "index specifier.") - index[key] = value + raise ValueError("key_or_list must not be empty") + + index: SON[str, Any] = SON() + + if isinstance(index_list, abc.Mapping): + for key in index_list: + value = index_list[key] + _validate_index_key_pair(key, value) + index[key] = value + else: + for item in index_list: + if isinstance(item, str): + item = (item, ASCENDING) # noqa: PLW2901 + key, value = item + _validate_index_key_pair(key, value) + index[key] = value return index -def _check_command_response(response, msg=None, allowable_errors=None, - parse_write_concern_error=False): - """Check the response to a command for errors. - """ +def _validate_index_key_pair(key: Any, value: Any) -> None: + if not isinstance(key, str): + raise TypeError("first item in each key pair must be an instance of str") + if not isinstance(value, (str, int, abc.Mapping)): + raise TypeError( + "second item in each key pair must be 1, -1, " + "'2d', or another valid MongoDB index specifier." + ) + + +def _check_command_response( + response: _DocumentOut, + max_wire_version: Optional[int], + allowable_errors: Optional[Container[Union[int, str]]] = None, + parse_write_concern_error: bool = False, +) -> None: + """Check the response to a command for errors.""" if "ok" not in response: # Server didn't recognize our message as a command. - raise OperationFailure(response.get("$err"), - response.get("code"), - response) - - if parse_write_concern_error and 'writeConcernError' in response: - _raise_write_concern_error(response['writeConcernError']) - - if not response["ok"]: - - details = response - # Mongos returns the error details in a 'raw' object - # for some errors. - if "raw" in response: - for shard in itervalues(response["raw"]): - # Grab the first non-empty raw error from a shard. - if shard.get("errmsg") and not shard.get("ok"): - details = shard - break - - errmsg = details["errmsg"] - if allowable_errors is None or errmsg not in allowable_errors: - - code = details.get("code") - # Server is "not master" or "recovering" - if code in _NOT_MASTER_CODES: - raise NotMasterError(errmsg, response) - elif ("not master" in errmsg - or "node is recovering" in errmsg): - raise NotMasterError(errmsg, response) - - # Server assertion failures - if errmsg == "db assertion failure": - errmsg = ("db assertion failure, assertion: '%s'" % - details.get("assertion", "")) - raise OperationFailure(errmsg, - details.get("assertionCode"), - response) - - # Other errors - # findAndModify with upsert can raise duplicate key error - if code in (11000, 11001, 12582): - raise DuplicateKeyError(errmsg, code, response) - elif code == 50: - raise ExecutionTimeout(errmsg, code, response) - elif code == 43: - raise CursorNotFound(errmsg, code, response) - - msg = msg or "%s" - raise OperationFailure(msg % errmsg, code, response) - - -def _check_gle_response(result): - """Return getlasterror response as a dict, or raise OperationFailure.""" - # Did getlasterror itself fail? - _check_command_response(result) - - if result.get("wtimeout", False): - # MongoDB versions before 1.8.0 return the error message in an "errmsg" - # field. If "errmsg" exists "err" will also exist set to None, so we - # have to check for "errmsg" first. - raise WTimeoutError(result.get("errmsg", result.get("err")), - result.get("code"), - result) - - error_msg = result.get("err", "") - if error_msg is None: - return result - - if error_msg.startswith("not master"): - raise NotMasterError(error_msg, result) - - details = result - - # mongos returns the error code in an error object for some errors. - if "errObjects" in result: - for errobj in result["errObjects"]: - if errobj.get("err") == error_msg: - details = errobj + raise OperationFailure( + response.get("$err"), # type: ignore[arg-type] + response.get("code"), + response, + max_wire_version, + ) + + if parse_write_concern_error and "writeConcernError" in response: + _error = response["writeConcernError"] + _labels = response.get("errorLabels") + if _labels: + _error.update({"errorLabels": _labels}) + _raise_write_concern_error(_error) + + if response["ok"]: + return + + details = response + # Mongos returns the error details in a 'raw' object + # for some errors. + if "raw" in response: + for shard in response["raw"].values(): + # Grab the first non-empty raw error from a shard. + if shard.get("errmsg") and not shard.get("ok"): + details = shard break + errmsg = details["errmsg"] code = details.get("code") + + # For allowable errors, only check for error messages when the code is not + # included. + if allowable_errors: + if code is not None: + if code in allowable_errors: + return + elif errmsg in allowable_errors: + return + + # Server is "not primary" or "recovering" + if code is not None: + if code in _NOT_PRIMARY_CODES: + raise NotPrimaryError(errmsg, response) + elif HelloCompat.LEGACY_ERROR in errmsg or "node is recovering" in errmsg: + raise NotPrimaryError(errmsg, response) + + # Other errors + # findAndModify with upsert can raise duplicate key error if code in (11000, 11001, 12582): - raise DuplicateKeyError(details["err"], code, result) - raise OperationFailure(details["err"], code, result) + raise DuplicateKeyError(errmsg, code, response, max_wire_version) + elif code == 50: + raise ExecutionTimeout(errmsg, code, response, max_wire_version) + elif code == 43: + raise CursorNotFound(errmsg, code, response, max_wire_version) + raise OperationFailure(errmsg, code, response, max_wire_version) -def _raise_last_write_error(write_errors): + +def _raise_last_write_error(write_errors: list[Any]) -> NoReturn: # If the last batch had multiple errors only report # the last error to emulate continue_on_error. error = write_errors[-1] @@ -203,40 +239,40 @@ def _raise_last_write_error(write_errors): raise WriteError(error.get("errmsg"), error.get("code"), error) -def _raise_write_concern_error(error): - if "errInfo" in error and error["errInfo"].get('wtimeout'): +def _raise_write_concern_error(error: Any) -> NoReturn: + if _wtimeout_error(error): # Make sure we raise WTimeoutError - raise WTimeoutError( - error.get("errmsg"), error.get("code"), error) - raise WriteConcernError( - error.get("errmsg"), error.get("code"), error) + raise WTimeoutError(error.get("errmsg"), error.get("code"), error) + raise WriteConcernError(error.get("errmsg"), error.get("code"), error) -def _check_write_command_response(result): - """Backward compatibility helper for write command error handling. - """ - # Prefer write errors over write concern errors - write_errors = result.get("writeErrors") - if write_errors: - _raise_last_write_error(write_errors) +def _get_wce_doc(result: Mapping[str, Any]) -> Optional[Mapping[str, Any]]: + """Return the writeConcernError or None.""" + wce = result.get("writeConcernError") + if wce: + # The server reports errorLabels at the top level but it's more + # convenient to attach it to the writeConcernError doc itself. + error_labels = result.get("errorLabels") + if error_labels: + wce["errorLabels"] = error_labels + return wce - error = result.get("writeConcernError") - if error: - _raise_write_concern_error(error) - -def _raise_last_error(bulk_write_result): - """Backward compatibility helper for insert error handling. - """ +def _check_write_command_response(result: Mapping[str, Any]) -> None: + """Backward compatibility helper for write command error handling.""" # Prefer write errors over write concern errors - write_errors = bulk_write_result.get("writeErrors") + write_errors = result.get("writeErrors") if write_errors: _raise_last_write_error(write_errors) - _raise_write_concern_error(bulk_write_result["writeConcernErrors"][-1]) + wce = _get_wce_doc(result) + if wce: + _raise_write_concern_error(wce) -def _fields_list_to_dict(fields, option_name): +def _fields_list_to_dict( + fields: Union[Mapping[str, Any], Iterable[str]], option_name: str +) -> Mapping[str, Any]: """Takes a sequence of field names and returns a matching dictionary. ["a", "b"] becomes {"a": 1, "b": 1} @@ -249,17 +285,14 @@ def _fields_list_to_dict(fields, option_name): return fields if isinstance(fields, (abc.Sequence, abc.Set)): - if not all(isinstance(field, string_type) for field in fields): - raise TypeError("%s must be a list of key names, each an " - "instance of %s" % (option_name, - string_type.__name__)) + if not all(isinstance(field, str) for field in fields): + raise TypeError(f"{option_name} must be a list of key names, each an instance of str") return dict.fromkeys(fields, 1) - raise TypeError("%s must be a mapping or " - "list of key names" % (option_name,)) + raise TypeError(f"{option_name} must be a mapping or list of key names") -def _handle_exception(): +def _handle_exception() -> None: """Print exceptions raised by subscribers to stderr.""" # Heavily influenced by logging.Handler.handleError. @@ -268,9 +301,45 @@ def _handle_exception(): if sys.stderr: einfo = sys.exc_info() try: - traceback.print_exception(einfo[0], einfo[1], einfo[2], - None, sys.stderr) - except IOError: + traceback.print_exception(einfo[0], einfo[1], einfo[2], None, sys.stderr) + except OSError: pass finally: del einfo + + +# See https://mypy.readthedocs.io/en/stable/generics.html?#decorator-factories +F = TypeVar("F", bound=Callable[..., Any]) + + +def _handle_reauth(func: F) -> F: + def inner(*args: Any, **kwargs: Any) -> Any: + no_reauth = kwargs.pop("no_reauth", False) + from pymongo.message import _BulkWriteContext + from pymongo.pool import Connection + + try: + return func(*args, **kwargs) + except OperationFailure as exc: + if no_reauth: + raise + if exc.code == _REAUTHENTICATION_REQUIRED_CODE: + # Look for an argument that either is a Connection + # or has a connection attribute, so we can trigger + # a reauth. + conn = None + for arg in args: + if isinstance(arg, Connection): + conn = arg + break + if isinstance(arg, _BulkWriteContext): + conn = arg.conn + break + if conn: + conn.authenticate(reauthenticate=True) + else: + raise + return func(*args, **kwargs) + raise + + return cast(F, inner) diff --git a/pymongo/ismaster.py b/pymongo/ismaster.py deleted file mode 100644 index e723ff0a93..0000000000 --- a/pymongo/ismaster.py +++ /dev/null @@ -1,158 +0,0 @@ -# Copyright 2014-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Parse a response to the 'ismaster' command.""" - -import itertools - -from bson.py3compat import imap -from pymongo import common -from pymongo.server_type import SERVER_TYPE - - -def _get_server_type(doc): - """Determine the server type from an ismaster response.""" - if not doc.get('ok'): - return SERVER_TYPE.Unknown - - if doc.get('isreplicaset'): - return SERVER_TYPE.RSGhost - elif doc.get('setName'): - if doc.get('hidden'): - return SERVER_TYPE.RSOther - elif doc.get('ismaster'): - return SERVER_TYPE.RSPrimary - elif doc.get('secondary'): - return SERVER_TYPE.RSSecondary - elif doc.get('arbiterOnly'): - return SERVER_TYPE.RSArbiter - else: - return SERVER_TYPE.RSOther - elif doc.get('msg') == 'isdbgrid': - return SERVER_TYPE.Mongos - else: - return SERVER_TYPE.Standalone - - -class IsMaster(object): - __slots__ = ('_doc', '_server_type', '_is_writable', '_is_readable') - - def __init__(self, doc): - """Parse an ismaster response from the server.""" - self._server_type = _get_server_type(doc) - self._doc = doc - self._is_writable = self._server_type in ( - SERVER_TYPE.RSPrimary, - SERVER_TYPE.Standalone, - SERVER_TYPE.Mongos) - - self._is_readable = ( - self.server_type == SERVER_TYPE.RSSecondary - or self._is_writable) - - @property - def document(self): - """The complete ismaster command response document. - - .. versionadded:: 3.4 - """ - return self._doc.copy() - - @property - def server_type(self): - return self._server_type - - @property - def all_hosts(self): - """List of hosts, passives, and arbiters known to this server.""" - return set(imap(common.clean_node, itertools.chain( - self._doc.get('hosts', []), - self._doc.get('passives', []), - self._doc.get('arbiters', [])))) - - @property - def tags(self): - """Replica set member tags or empty dict.""" - return self._doc.get('tags', {}) - - @property - def primary(self): - """This server's opinion about who the primary is, or None.""" - if self._doc.get('primary'): - return common.partition_node(self._doc['primary']) - else: - return None - - @property - def replica_set_name(self): - """Replica set name or None.""" - return self._doc.get('setName') - - @property - def max_bson_size(self): - return self._doc.get('maxBsonObjectSize', common.MAX_BSON_SIZE) - - @property - def max_message_size(self): - return self._doc.get('maxMessageSizeBytes', 2 * self.max_bson_size) - - @property - def max_write_batch_size(self): - return self._doc.get('maxWriteBatchSize', common.MAX_WRITE_BATCH_SIZE) - - @property - def min_wire_version(self): - return self._doc.get('minWireVersion', common.MIN_WIRE_VERSION) - - @property - def max_wire_version(self): - return self._doc.get('maxWireVersion', common.MAX_WIRE_VERSION) - - @property - def set_version(self): - return self._doc.get('setVersion') - - @property - def election_id(self): - return self._doc.get('electionId') - - @property - def cluster_time(self): - return self._doc.get('$clusterTime') - - @property - def logical_session_timeout_minutes(self): - return self._doc.get('logicalSessionTimeoutMinutes') - - @property - def is_writable(self): - return self._is_writable - - @property - def is_readable(self): - return self._is_readable - - @property - def me(self): - me = self._doc.get('me') - if me: - return common.clean_node(me) - - @property - def last_write_date(self): - return self._doc.get('lastWrite', {}).get('lastWriteDate') - - @property - def compressors(self): - return self._doc.get('compression') diff --git a/pymongo/lock.py b/pymongo/lock.py new file mode 100644 index 0000000000..e374785006 --- /dev/null +++ b/pymongo/lock.py @@ -0,0 +1,40 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import os +import threading +import weakref + +_HAS_REGISTER_AT_FORK = hasattr(os, "register_at_fork") + +# References to instances of _create_lock +_forkable_locks: weakref.WeakSet[threading.Lock] = weakref.WeakSet() + + +def _create_lock() -> threading.Lock: + """Represents a lock that is tracked upon instantiation using a WeakSet and + reset by pymongo upon forking. + """ + lock = threading.Lock() + if _HAS_REGISTER_AT_FORK: + _forkable_locks.add(lock) + return lock + + +def _release_locks() -> None: + # Completed the fork, reset all the locks in the child. + for lock in _forkable_locks: + if lock.locked(): + lock.release() diff --git a/pymongo/max_staleness_selectors.py b/pymongo/max_staleness_selectors.py index 6bc2fe7232..72edf555b3 100644 --- a/pymongo/max_staleness_selectors.py +++ b/pymongo/max_staleness_selectors.py @@ -26,46 +26,53 @@ where "SMax" is the secondary with the greatest lastWriteDate. """ +from __future__ import annotations + +from typing import TYPE_CHECKING from pymongo.errors import ConfigurationError from pymongo.server_type import SERVER_TYPE - +if TYPE_CHECKING: + from pymongo.server_selectors import Selection # Constant defined in Max Staleness Spec: An idle primary writes a no-op every # 10 seconds to refresh secondaries' lastWriteDate values. IDLE_WRITE_PERIOD = 10 SMALLEST_MAX_STALENESS = 90 -def _validate_max_staleness(max_staleness, - heartbeat_frequency): +def _validate_max_staleness(max_staleness: int, heartbeat_frequency: int) -> None: # We checked for max staleness -1 before this, it must be positive here. if max_staleness < heartbeat_frequency + IDLE_WRITE_PERIOD: raise ConfigurationError( "maxStalenessSeconds must be at least heartbeatFrequencyMS +" " %d seconds. maxStalenessSeconds is set to %d," - " heartbeatFrequencyMS is set to %d." % ( - IDLE_WRITE_PERIOD, max_staleness, heartbeat_frequency * 1000)) + " heartbeatFrequencyMS is set to %d." + % (IDLE_WRITE_PERIOD, max_staleness, heartbeat_frequency * 1000) + ) if max_staleness < SMALLEST_MAX_STALENESS: raise ConfigurationError( "maxStalenessSeconds must be at least %d. " - "maxStalenessSeconds is set to %d." % ( - SMALLEST_MAX_STALENESS, max_staleness)) + "maxStalenessSeconds is set to %d." % (SMALLEST_MAX_STALENESS, max_staleness) + ) -def _with_primary(max_staleness, selection): +def _with_primary(max_staleness: int, selection: Selection) -> Selection: """Apply max_staleness, in seconds, to a Selection with a known primary.""" primary = selection.primary + assert primary sds = [] for s in selection.server_descriptions: if s.server_type == SERVER_TYPE.RSSecondary: # See max-staleness.rst for explanation of this formula. + assert s.last_write_date and primary.last_write_date # noqa: PT018 staleness = ( - (s.last_update_time - s.last_write_date) - - (primary.last_update_time - primary.last_write_date) + - selection.heartbeat_frequency) + (s.last_update_time - s.last_write_date) + - (primary.last_update_time - primary.last_write_date) + + selection.heartbeat_frequency + ) if staleness <= max_staleness: sds.append(s) @@ -75,7 +82,7 @@ def _with_primary(max_staleness, selection): return selection.with_server_descriptions(sds) -def _no_primary(max_staleness, selection): +def _no_primary(max_staleness: int, selection: Selection) -> Selection: """Apply max_staleness, in seconds, to a Selection with no known primary.""" # Secondary that's replicated the most recent writes. smax = selection.secondary_with_max_last_write_date() @@ -88,9 +95,8 @@ def _no_primary(max_staleness, selection): for s in selection.server_descriptions: if s.server_type == SERVER_TYPE.RSSecondary: # See max-staleness.rst for explanation of this formula. - staleness = (smax.last_write_date - - s.last_write_date + - selection.heartbeat_frequency) + assert smax.last_write_date and s.last_write_date # noqa: PT018 + staleness = smax.last_write_date - s.last_write_date + selection.heartbeat_frequency if staleness <= max_staleness: sds.append(s) @@ -100,7 +106,7 @@ def _no_primary(max_staleness, selection): return selection.with_server_descriptions(sds) -def select(max_staleness, selection): +def select(max_staleness: int, selection: Selection) -> Selection: """Apply max_staleness, in seconds, to a Selection.""" if max_staleness == -1: return selection diff --git a/pymongo/message.py b/pymongo/message.py index 1f34efa952..c04f4a8874 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -13,45 +13,74 @@ # limitations under the License. """Tools for creating `messages -`_ to be sent to +`_ to be sent to MongoDB. .. note:: This module is for internal use and is generally not needed by application developers. """ +from __future__ import annotations import datetime import random import struct +from io import BytesIO as _BytesIO +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Iterable, + Mapping, + MutableMapping, + NoReturn, + Optional, + Union, + cast, +) import bson -from bson import (CodecOptions, - decode, - encode, - _dict_to_bson, - _make_c_string) -from bson.codec_options import DEFAULT_CODEC_OPTIONS -from bson.raw_bson import _inflate_bson, DEFAULT_RAW_BSON_OPTIONS -from bson.py3compat import b, StringIO +from bson import CodecOptions, _decode_selective, _dict_to_bson, _make_c_string, encode +from bson.int64 import Int64 +from bson.raw_bson import ( + _RAW_ARRAY_BSON_OPTIONS, + DEFAULT_RAW_BSON_OPTIONS, + RawBSONDocument, + _inflate_bson, +) from bson.son import SON try: - from pymongo import _cmessage + from pymongo import _cmessage # type: ignore[attr-defined] + _use_c = True except ImportError: _use_c = False -from pymongo.errors import (ConfigurationError, - CursorNotFound, - DocumentTooLarge, - ExecutionTimeout, - InvalidOperation, - NotMasterError, - OperationFailure, - ProtocolError) -from pymongo.read_concern import DEFAULT_READ_CONCERN +from pymongo.errors import ( + ConfigurationError, + CursorNotFound, + DocumentTooLarge, + ExecutionTimeout, + InvalidOperation, + NotPrimaryError, + OperationFailure, + ProtocolError, +) +from pymongo.hello import HelloCompat +from pymongo.helpers import _handle_reauth from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern +if TYPE_CHECKING: + from datetime import timedelta + + from pymongo.client_session import ClientSession + from pymongo.compression_support import SnappyContext, ZlibContext, ZstdContext + from pymongo.mongo_client import MongoClient + from pymongo.monitoring import _EventListeners + from pymongo.pool import Connection + from pymongo.read_concern import ReadConcern + from pymongo.read_preferences import _ServerMode + from pymongo.typings import _Address, _DocumentOut MAX_INT32 = 2147483647 MIN_INT32 = -2147483648 @@ -63,65 +92,57 @@ _UPDATE = 1 _DELETE = 2 -_EMPTY = b'' -_BSONOBJ = b'\x03' -_ZERO_8 = b'\x00' -_ZERO_16 = b'\x00\x00' -_ZERO_32 = b'\x00\x00\x00\x00' -_ZERO_64 = b'\x00\x00\x00\x00\x00\x00\x00\x00' -_SKIPLIM = b'\x00\x00\x00\x00\xff\xff\xff\xff' +_EMPTY = b"" +_BSONOBJ = b"\x03" +_ZERO_8 = b"\x00" +_ZERO_16 = b"\x00\x00" +_ZERO_32 = b"\x00\x00\x00\x00" +_ZERO_64 = b"\x00\x00\x00\x00\x00\x00\x00\x00" +_SKIPLIM = b"\x00\x00\x00\x00\xff\xff\xff\xff" _OP_MAP = { - _INSERT: b'\x04documents\x00\x00\x00\x00\x00', - _UPDATE: b'\x04updates\x00\x00\x00\x00\x00', - _DELETE: b'\x04deletes\x00\x00\x00\x00\x00', -} -_FIELD_MAP = { - 'insert': 'documents', - 'update': 'updates', - 'delete': 'deletes' + _INSERT: b"\x04documents\x00\x00\x00\x00\x00", + _UPDATE: b"\x04updates\x00\x00\x00\x00\x00", + _DELETE: b"\x04deletes\x00\x00\x00\x00\x00", } +_FIELD_MAP = {"insert": "documents", "update": "updates", "delete": "deletes"} -_UJOIN = u"%s.%s" +_UNICODE_REPLACE_CODEC_OPTIONS: CodecOptions[Mapping[str, Any]] = CodecOptions( + unicode_decode_error_handler="replace" +) -_UNICODE_REPLACE_CODEC_OPTIONS = CodecOptions( - unicode_decode_error_handler='replace') - -def _randint(): +def _randint() -> int: """Generate a pseudo random 32 bit integer.""" - return random.randint(MIN_INT32, MAX_INT32) + return random.randint(MIN_INT32, MAX_INT32) # noqa: S311 -def _maybe_add_read_preference(spec, read_preference): +def _maybe_add_read_preference( + spec: MutableMapping[str, Any], read_preference: _ServerMode +) -> MutableMapping[str, Any]: """Add $readPreference to spec when appropriate.""" mode = read_preference.mode - tag_sets = read_preference.tag_sets - max_staleness = read_preference.max_staleness + document = read_preference.document # Only add $readPreference if it's something other than primary to avoid # problems with mongos versions that don't support read preferences. Also, # for maximum backwards compatibility, don't add $readPreference for # secondaryPreferred unless tags or maxStalenessSeconds are in use (setting - # the slaveOkay bit has the same effect). - if mode and ( - mode != ReadPreference.SECONDARY_PREFERRED.mode - or tag_sets != [{}] - or max_staleness != -1): - + # the secondaryOkay bit has the same effect). + if mode and (mode != ReadPreference.SECONDARY_PREFERRED.mode or len(document) > 1): if "$query" not in spec: spec = SON([("$query", spec)]) - spec["$readPreference"] = read_preference.document + spec["$readPreference"] = document return spec -def _convert_exception(exception): +def _convert_exception(exception: Exception) -> dict[str, Any]: """Convert an Exception into a failure document for publishing.""" - return {'errmsg': str(exception), - 'errtype': exception.__class__.__name__} - + return {"errmsg": str(exception), "errtype": exception.__class__.__name__} -def _convert_write_result(operation, command, result): - """Convert a legacy write result to write commmand format.""" +def _convert_write_result( + operation: str, command: Mapping[str, Any], result: Mapping[str, Any] +) -> dict[str, Any]: + """Convert a legacy write result to write command format.""" # Based on _merge_legacy from bulk.py affected = result.get("n", 0) res = {"ok": 1, "n": affected} @@ -129,21 +150,17 @@ def _convert_write_result(operation, command, result): if errmsg: # The write was successful on at least the primary so don't return. if result.get("wtimeout"): - res["writeConcernError"] = {"errmsg": errmsg, - "code": 64, - "errInfo": {"wtimeout": True}} + res["writeConcernError"] = {"errmsg": errmsg, "code": 64, "errInfo": {"wtimeout": True}} else: # The write failed. - error = {"index": 0, - "code": result.get("code", 8), - "errmsg": errmsg} + error = {"index": 0, "code": result.get("code", 8), "errmsg": errmsg} if "errInfo" in result: error["errInfo"] = result["errInfo"] res["writeErrors"] = [error] return res if operation == "insert": # GLE result for insert is always 0 in most MongoDB versions. - res["n"] = len(command['documents']) + res["n"] = len(command["documents"]) elif operation == "update": if "upserted" in result: res["upserted"] = [{"index": 0, "_id": result["upserted"]}] @@ -152,96 +169,158 @@ def _convert_write_result(operation, command, result): elif result.get("updatedExisting") is False and affected == 1: # If _id is in both the update document *and* the query spec # the update document _id takes precedence. - update = command['updates'][0] + update = command["updates"][0] _id = update["u"].get("_id", update["q"].get("_id")) res["upserted"] = [{"index": 0, "_id": _id}] return res -_OPTIONS = SON([ - ('tailable', 2), - ('oplogReplay', 8), - ('noCursorTimeout', 16), - ('awaitData', 32), - ('allowPartialResults', 128)]) - - -_MODIFIERS = SON([ - ('$query', 'filter'), - ('$orderby', 'sort'), - ('$hint', 'hint'), - ('$comment', 'comment'), - ('$maxScan', 'maxScan'), - ('$maxTimeMS', 'maxTimeMS'), - ('$max', 'max'), - ('$min', 'min'), - ('$returnKey', 'returnKey'), - ('$showRecordId', 'showRecordId'), - ('$showDiskLoc', 'showRecordId'), # <= MongoDb 3.0 - ('$snapshot', 'snapshot')]) - - -def _gen_find_command(coll, spec, projection, skip, limit, batch_size, options, - read_concern, collation=None, session=None): +_OPTIONS = SON( + [ + ("tailable", 2), + ("oplogReplay", 8), + ("noCursorTimeout", 16), + ("awaitData", 32), + ("allowPartialResults", 128), + ] +) + + +_MODIFIERS = SON( + [ + ("$query", "filter"), + ("$orderby", "sort"), + ("$hint", "hint"), + ("$comment", "comment"), + ("$maxScan", "maxScan"), + ("$maxTimeMS", "maxTimeMS"), + ("$max", "max"), + ("$min", "min"), + ("$returnKey", "returnKey"), + ("$showRecordId", "showRecordId"), + ("$showDiskLoc", "showRecordId"), # <= MongoDb 3.0 + ("$snapshot", "snapshot"), + ] +) + + +def _gen_find_command( + coll: str, + spec: Mapping[str, Any], + projection: Optional[Union[Mapping[str, Any], Iterable[str]]], + skip: int, + limit: int, + batch_size: Optional[int], + options: Optional[int], + read_concern: ReadConcern, + collation: Optional[Mapping[str, Any]] = None, + session: Optional[ClientSession] = None, + allow_disk_use: Optional[bool] = None, +) -> SON[str, Any]: """Generate a find command document.""" - cmd = SON([('find', coll)]) - if '$query' in spec: - cmd.update([(_MODIFIERS[key], val) if key in _MODIFIERS else (key, val) - for key, val in spec.items()]) - if '$explain' in cmd: - cmd.pop('$explain') - if '$readPreference' in cmd: - cmd.pop('$readPreference') + cmd: SON[str, Any] = SON([("find", coll)]) + if "$query" in spec: + cmd.update( + [ + (_MODIFIERS[key], val) if key in _MODIFIERS else (key, val) + for key, val in spec.items() + ] + ) + if "$explain" in cmd: + cmd.pop("$explain") + if "$readPreference" in cmd: + cmd.pop("$readPreference") else: - cmd['filter'] = spec + cmd["filter"] = spec if projection: - cmd['projection'] = projection + cmd["projection"] = projection if skip: - cmd['skip'] = skip + cmd["skip"] = skip if limit: - cmd['limit'] = abs(limit) + cmd["limit"] = abs(limit) if limit < 0: - cmd['singleBatch'] = True + cmd["singleBatch"] = True if batch_size: - cmd['batchSize'] = batch_size + cmd["batchSize"] = batch_size if read_concern.level and not (session and session.in_transaction): - cmd['readConcern'] = read_concern.document + cmd["readConcern"] = read_concern.document if collation: - cmd['collation'] = collation + cmd["collation"] = collation + if allow_disk_use is not None: + cmd["allowDiskUse"] = allow_disk_use if options: - cmd.update([(opt, True) - for opt, val in _OPTIONS.items() - if options & val]) + cmd.update([(opt, True) for opt, val in _OPTIONS.items() if options & val]) + return cmd -def _gen_get_more_command(cursor_id, coll, batch_size, max_await_time_ms): +def _gen_get_more_command( + cursor_id: Optional[int], + coll: str, + batch_size: Optional[int], + max_await_time_ms: Optional[int], + comment: Optional[Any], + conn: Connection, +) -> SON[str, Any]: """Generate a getMore command document.""" - cmd = SON([('getMore', cursor_id), - ('collection', coll)]) + cmd: SON[str, Any] = SON([("getMore", cursor_id), ("collection", coll)]) if batch_size: - cmd['batchSize'] = batch_size + cmd["batchSize"] = batch_size if max_await_time_ms is not None: - cmd['maxTimeMS'] = max_await_time_ms + cmd["maxTimeMS"] = max_await_time_ms + if comment is not None and conn.max_wire_version >= 9: + cmd["comment"] = comment return cmd -class _Query(object): +class _Query: """A query operation.""" - __slots__ = ('flags', 'db', 'coll', 'ntoskip', 'spec', - 'fields', 'codec_options', 'read_preference', 'limit', - 'batch_size', 'name', 'read_concern', 'collation', - 'session', 'client', '_as_command') + __slots__ = ( + "flags", + "db", + "coll", + "ntoskip", + "spec", + "fields", + "codec_options", + "read_preference", + "limit", + "batch_size", + "name", + "read_concern", + "collation", + "session", + "client", + "allow_disk_use", + "_as_command", + "exhaust", + ) # For compatibility with the _GetMore class. - exhaust_mgr = None + conn_mgr = None cursor_id = None - def __init__(self, flags, db, coll, ntoskip, spec, fields, - codec_options, read_preference, limit, - batch_size, read_concern, collation, session, client): + def __init__( + self, + flags: int, + db: str, + coll: str, + ntoskip: int, + spec: Mapping[str, Any], + fields: Optional[Mapping[str, Any]], + codec_options: CodecOptions, + read_preference: _ServerMode, + limit: int, + batch_size: int, + read_concern: ReadConcern, + collation: Optional[Mapping[str, Any]], + session: Optional[ClientSession], + client: MongoClient, + allow_disk_use: Optional[bool], + exhaust: bool, + ): self.flags = flags self.db = db self.coll = coll @@ -256,72 +335,85 @@ def __init__(self, flags, db, coll, ntoskip, spec, fields, self.collation = collation self.session = session self.client = client - self.name = 'find' + self.allow_disk_use = allow_disk_use + self.name = "find" + self._as_command: Optional[tuple[SON[str, Any], str]] = None + self.exhaust = exhaust + + def reset(self) -> None: self._as_command = None - def namespace(self): - return _UJOIN % (self.db, self.coll) + def namespace(self) -> str: + return f"{self.db}.{self.coll}" - def use_command(self, sock_info, exhaust): + def use_command(self, conn: Connection) -> bool: use_find_cmd = False - if sock_info.max_wire_version >= 4: - if not exhaust: - use_find_cmd = True + if not self.exhaust: + use_find_cmd = True + elif conn.max_wire_version >= 8: + # OP_MSG supports exhaust on MongoDB 4.2+ + use_find_cmd = True elif not self.read_concern.ok_for_legacy: raise ConfigurationError( - 'read concern level of %s is not valid ' - 'with a max wire version of %d.' - % (self.read_concern.level, - sock_info.max_wire_version)) - - if sock_info.max_wire_version < 5 and self.collation is not None: - raise ConfigurationError( - 'Specifying a collation is unsupported with a max wire ' - 'version of %d.' % (sock_info.max_wire_version,)) - - sock_info.validate_session(self.client, self.session) + "read concern level of %s is not valid " + "with a max wire version of %d." % (self.read_concern.level, conn.max_wire_version) + ) + conn.validate_session(self.client, self.session) return use_find_cmd - def as_command(self, sock_info): + def as_command( + self, conn: Connection, apply_timeout: bool = False + ) -> tuple[SON[str, Any], str]: """Return a find command document for this query.""" # We use the command twice: on the wire and for command monitoring. # Generate it once, for speed and to avoid repeating side-effects. if self._as_command is not None: return self._as_command - explain = '$explain' in self.spec - cmd = _gen_find_command( - self.coll, self.spec, self.fields, self.ntoskip, - self.limit, self.batch_size, self.flags, self.read_concern, - self.collation, self.session) + explain = "$explain" in self.spec + cmd: SON[str, Any] = _gen_find_command( + self.coll, + self.spec, + self.fields, + self.ntoskip, + self.limit, + self.batch_size, + self.flags, + self.read_concern, + self.collation, + self.session, + self.allow_disk_use, + ) if explain: - self.name = 'explain' - cmd = SON([('explain', cmd)]) + self.name = "explain" + cmd = SON([("explain", cmd)]) session = self.session + conn.add_server_api(cmd) if session: - session._apply_to(cmd, False, self.read_preference) + session._apply_to(cmd, False, self.read_preference, conn) # Explain does not support readConcern. - if (not explain and session.options.causal_consistency - and session.operation_time is not None - and not session.in_transaction): - cmd.setdefault( - 'readConcern', {})[ - 'afterClusterTime'] = session.operation_time - sock_info.send_cluster_time(cmd, session, self.client) + if not explain and not session.in_transaction: + session._update_read_concern(cmd, conn) + conn.send_cluster_time(cmd, session, self.client) # Support auto encryption client = self.client - if (client._encrypter and - not client._encrypter._bypass_auto_encryption): - cmd = client._encrypter.encrypt( - self.db, cmd, False, self.codec_options) + if client._encrypter and not client._encrypter._bypass_auto_encryption: + cmd = cast(SON[str, Any], client._encrypter.encrypt(self.db, cmd, self.codec_options)) + # Support CSOT + if apply_timeout: + conn.apply_timeout(client, cmd) self._as_command = cmd, self.db return self._as_command - def get_message(self, set_slave_ok, sock_info, use_cmd=False): - """Get a query message, possibly setting the slaveOk bit.""" - if set_slave_ok: - # Set the slaveOk bit. + def get_message( + self, read_preference: _ServerMode, conn: Connection, use_cmd: bool = False + ) -> tuple[int, bytes, int]: + """Get a query message, possibly setting the secondaryOk bit.""" + # Use the read_preference decided by _socket_from_server. + self.read_preference = read_preference + if read_preference.mode: + # Set the secondaryOk bit. flags = self.flags | 4 else: flags = self.flags @@ -330,47 +422,79 @@ def get_message(self, set_slave_ok, sock_info, use_cmd=False): spec = self.spec if use_cmd: - spec = self.as_command(sock_info)[0] - if sock_info.op_msg_enabled: - request_id, msg, size, _ = _op_msg( - 0, spec, self.db, self.read_preference, - set_slave_ok, False, self.codec_options, - ctx=sock_info.compression_context) - return request_id, msg, size - ns = _UJOIN % (self.db, "$cmd") - ntoreturn = -1 # All DB commands return 1 document - else: - # OP_QUERY treats ntoreturn of -1 and 1 the same, return - # one document and close the cursor. We have to use 2 for - # batch size if 1 is specified. - ntoreturn = self.batch_size == 1 and 2 or self.batch_size - if self.limit: - if ntoreturn: - ntoreturn = min(self.limit, ntoreturn) - else: - ntoreturn = self.limit + spec = self.as_command(conn, apply_timeout=True)[0] + request_id, msg, size, _ = _op_msg( + 0, + spec, + self.db, + read_preference, + self.codec_options, + ctx=conn.compression_context, + ) + return request_id, msg, size + + # OP_QUERY treats ntoreturn of -1 and 1 the same, return + # one document and close the cursor. We have to use 2 for + # batch size if 1 is specified. + ntoreturn = self.batch_size == 1 and 2 or self.batch_size + if self.limit: + if ntoreturn: + ntoreturn = min(self.limit, ntoreturn) + else: + ntoreturn = self.limit - if sock_info.is_mongos: - spec = _maybe_add_read_preference(spec, - self.read_preference) + if conn.is_mongos: + assert isinstance(spec, MutableMapping) + spec = _maybe_add_read_preference(spec, read_preference) - return query(flags, ns, self.ntoskip, ntoreturn, - spec, None if use_cmd else self.fields, - self.codec_options, ctx=sock_info.compression_context) + return _query( + flags, + ns, + self.ntoskip, + ntoreturn, + spec, + None if use_cmd else self.fields, + self.codec_options, + ctx=conn.compression_context, + ) -class _GetMore(object): +class _GetMore: """A getmore operation.""" - __slots__ = ('db', 'coll', 'ntoreturn', 'cursor_id', 'max_await_time_ms', - 'codec_options', 'read_preference', 'session', 'client', - 'exhaust_mgr', '_as_command') - - name = 'getMore' - - def __init__(self, db, coll, ntoreturn, cursor_id, codec_options, - read_preference, session, client, max_await_time_ms, - exhaust_mgr): + __slots__ = ( + "db", + "coll", + "ntoreturn", + "cursor_id", + "max_await_time_ms", + "codec_options", + "read_preference", + "session", + "client", + "conn_mgr", + "_as_command", + "exhaust", + "comment", + ) + + name = "getMore" + + def __init__( + self, + db: str, + coll: str, + ntoreturn: int, + cursor_id: int, + codec_options: CodecOptions, + read_preference: _ServerMode, + session: Optional[ClientSession], + client: MongoClient, + max_await_time_ms: Optional[int], + conn_mgr: Any, + exhaust: bool, + comment: Any, + ): self.db = db self.coll = coll self.ntoreturn = ntoreturn @@ -380,143 +504,159 @@ def __init__(self, db, coll, ntoreturn, cursor_id, codec_options, self.session = session self.client = client self.max_await_time_ms = max_await_time_ms - self.exhaust_mgr = exhaust_mgr + self.conn_mgr = conn_mgr + self._as_command: Optional[tuple[SON[str, Any], str]] = None + self.exhaust = exhaust + self.comment = comment + + def reset(self) -> None: self._as_command = None - def namespace(self): - return _UJOIN % (self.db, self.coll) + def namespace(self) -> str: + return f"{self.db}.{self.coll}" + + def use_command(self, conn: Connection) -> bool: + use_cmd = False + if not self.exhaust: + use_cmd = True + elif conn.max_wire_version >= 8: + # OP_MSG supports exhaust on MongoDB 4.2+ + use_cmd = True - def use_command(self, sock_info, exhaust): - sock_info.validate_session(self.client, self.session) - return sock_info.max_wire_version >= 4 and not exhaust + conn.validate_session(self.client, self.session) + return use_cmd - def as_command(self, sock_info): + def as_command( + self, conn: Connection, apply_timeout: bool = False + ) -> tuple[SON[str, Any], str]: """Return a getMore command document for this query.""" # See _Query.as_command for an explanation of this caching. if self._as_command is not None: return self._as_command - cmd = _gen_get_more_command(self.cursor_id, self.coll, - self.ntoreturn, - self.max_await_time_ms) - + cmd: SON[str, Any] = _gen_get_more_command( + self.cursor_id, + self.coll, + self.ntoreturn, + self.max_await_time_ms, + self.comment, + conn, + ) if self.session: - self.session._apply_to(cmd, False, self.read_preference) - sock_info.send_cluster_time(cmd, self.session, self.client) + self.session._apply_to(cmd, False, self.read_preference, conn) + conn.add_server_api(cmd) + conn.send_cluster_time(cmd, self.session, self.client) # Support auto encryption client = self.client - if (client._encrypter and - not client._encrypter._bypass_auto_encryption): - cmd = client._encrypter.encrypt( - self.db, cmd, False, self.codec_options) + if client._encrypter and not client._encrypter._bypass_auto_encryption: + cmd = cast(SON[str, Any], client._encrypter.encrypt(self.db, cmd, self.codec_options)) + # Support CSOT + if apply_timeout: + conn.apply_timeout(client, cmd=None) self._as_command = cmd, self.db return self._as_command - def get_message(self, dummy0, sock_info, use_cmd=False): + def get_message( + self, dummy0: Any, conn: Connection, use_cmd: bool = False + ) -> Union[tuple[int, bytes, int], tuple[int, bytes]]: """Get a getmore message.""" - ns = self.namespace() - ctx = sock_info.compression_context + ctx = conn.compression_context if use_cmd: - spec = self.as_command(sock_info)[0] - if sock_info.op_msg_enabled: - request_id, msg, size, _ = _op_msg( - 0, spec, self.db, None, - False, False, self.codec_options, - ctx=sock_info.compression_context) - return request_id, msg, size - ns = _UJOIN % (self.db, "$cmd") - return query(0, ns, 0, -1, spec, None, self.codec_options, ctx=ctx) + spec = self.as_command(conn, apply_timeout=True)[0] + if self.conn_mgr and self.exhaust: + flags = _OpMsg.EXHAUST_ALLOWED + else: + flags = 0 + request_id, msg, size, _ = _op_msg( + flags, spec, self.db, None, self.codec_options, ctx=conn.compression_context + ) + return request_id, msg, size - return get_more(ns, self.ntoreturn, self.cursor_id, ctx) + return _get_more(ns, self.ntoreturn, self.cursor_id, ctx) -# TODO: Use OP_MSG once the server is able to respond with document streams. class _RawBatchQuery(_Query): - def use_command(self, socket_info, exhaust): + def use_command(self, conn: Connection) -> bool: # Compatibility checks. - super(_RawBatchQuery, self).use_command(socket_info, exhaust) - + super().use_command(conn) + if conn.max_wire_version >= 8: + # MongoDB 4.2+ supports exhaust over OP_MSG + return True + elif not self.exhaust: + return True return False - def get_message(self, set_slave_ok, sock_info, use_cmd=False): - # Always pass False for use_cmd. - return super(_RawBatchQuery, self).get_message( - set_slave_ok, sock_info, False) - class _RawBatchGetMore(_GetMore): - def use_command(self, socket_info, exhaust): + def use_command(self, conn: Connection) -> bool: + # Compatibility checks. + super().use_command(conn) + if conn.max_wire_version >= 8: + # MongoDB 4.2+ supports exhaust over OP_MSG + return True + elif not self.exhaust: + return True return False - def get_message(self, set_slave_ok, sock_info, use_cmd=False): - # Always pass False for use_cmd. - return super(_RawBatchGetMore, self).get_message( - set_slave_ok, sock_info, False) - class _CursorAddress(tuple): """The server address (host, port) of a cursor, with namespace property.""" - def __new__(cls, address, namespace): + __namespace: Any + + def __new__(cls, address: _Address, namespace: str) -> _CursorAddress: self = tuple.__new__(cls, address) self.__namespace = namespace return self @property - def namespace(self): + def namespace(self) -> str: """The namespace this cursor.""" return self.__namespace - def __hash__(self): + def __hash__(self) -> int: # Two _CursorAddress instances with different namespaces # must not hash the same. - return (self + (self.__namespace,)).__hash__() + return ((*self, self.__namespace)).__hash__() - def __eq__(self, other): + def __eq__(self, other: object) -> bool: if isinstance(other, _CursorAddress): - return (tuple(self) == tuple(other) - and self.namespace == other.namespace) + return tuple(self) == tuple(other) and self.namespace == other.namespace return NotImplemented - def __ne__(self, other): + def __ne__(self, other: object) -> bool: return not self == other _pack_compression_header = struct.Struct(" tuple[int, bytes]: """Takes message data, compresses it, and adds an OP_COMPRESSED header.""" compressed = ctx.compress(data) request_id = _randint() header = _pack_compression_header( - _COMPRESSION_HEADER_SIZE + len(compressed), # Total message length - request_id, # Request id - 0, # responseTo - 2012, # operation id - operation, # original operation id - len(data), # uncompressed message length - ctx.compressor_id) # compressor id + _COMPRESSION_HEADER_SIZE + len(compressed), # Total message length + request_id, # Request id + 0, # responseTo + 2012, # operation id + operation, # original operation id + len(data), # uncompressed message length + ctx.compressor_id, + ) # compressor id return request_id, header + compressed -def __last_error(namespace, args): - """Data to send to do a lastError. - """ - cmd = SON([("getlasterror", 1)]) - cmd.update(args) - splitns = namespace.split('.', 1) - return query(0, splitns[0] + '.$cmd', 0, -1, cmd, - None, DEFAULT_CODEC_OPTIONS) - - _pack_header = struct.Struct(" tuple[int, bytes]: """Takes message data and adds a message header based on the operation. Returns the resultant message string. @@ -527,115 +667,17 @@ def __pack_message(operation, data): _pack_int = struct.Struct(" tuple[bytes, int, int]: """Get a OP_MSG message. Note: this method handles multiple documents in a type one payload but @@ -647,377 +689,429 @@ def _op_msg_no_header(flags, command, identifier, docs, check_keys, opts): flags_type = _pack_op_msg_flags_type(flags, 0) total_size = len(encoded) max_doc_size = 0 - if identifier: + if identifier and docs is not None: type_one = _pack_byte(1) cstring = _make_c_string(identifier) - encoded_docs = [_dict_to_bson(doc, check_keys, opts) for doc in docs] + encoded_docs = [_dict_to_bson(doc, False, opts) for doc in docs] size = len(cstring) + sum(len(doc) for doc in encoded_docs) + 4 encoded_size = _pack_int(size) total_size += size max_doc_size = max(len(doc) for doc in encoded_docs) - data = ([flags_type, encoded, type_one, encoded_size, cstring] + - encoded_docs) + data = [flags_type, encoded, type_one, encoded_size, cstring, *encoded_docs] else: data = [flags_type, encoded] - return b''.join(data), total_size, max_doc_size + return b"".join(data), total_size, max_doc_size -def _op_msg_compressed(flags, command, identifier, docs, check_keys, opts, - ctx): +def _op_msg_compressed( + flags: int, + command: Mapping[str, Any], + identifier: str, + docs: Optional[list[Mapping[str, Any]]], + opts: CodecOptions, + ctx: Union[SnappyContext, ZlibContext, ZstdContext], +) -> tuple[int, bytes, int, int]: """Internal OP_MSG message helper.""" - msg, total_size, max_bson_size = _op_msg_no_header( - flags, command, identifier, docs, check_keys, opts) + msg, total_size, max_bson_size = _op_msg_no_header(flags, command, identifier, docs, opts) rid, msg = _compress(2013, msg, ctx) return rid, msg, total_size, max_bson_size -def _op_msg_uncompressed(flags, command, identifier, docs, check_keys, opts): +def _op_msg_uncompressed( + flags: int, + command: Mapping[str, Any], + identifier: str, + docs: Optional[list[Mapping[str, Any]]], + opts: CodecOptions, +) -> tuple[int, bytes, int, int]: """Internal compressed OP_MSG message helper.""" - data, total_size, max_bson_size = _op_msg_no_header( - flags, command, identifier, docs, check_keys, opts) + data, total_size, max_bson_size = _op_msg_no_header(flags, command, identifier, docs, opts) request_id, op_message = __pack_message(2013, data) return request_id, op_message, total_size, max_bson_size + + if _use_c: _op_msg_uncompressed = _cmessage._op_msg -def _op_msg(flags, command, dbname, read_preference, slave_ok, check_keys, - opts, ctx=None): +def _op_msg( + flags: int, + command: MutableMapping[str, Any], + dbname: str, + read_preference: Optional[_ServerMode], + opts: CodecOptions, + ctx: Union[SnappyContext, ZlibContext, ZstdContext, None] = None, +) -> tuple[int, bytes, int, int]: """Get a OP_MSG message.""" - command['$db'] = dbname + command["$db"] = dbname # getMore commands do not send $readPreference. if read_preference is not None and "$readPreference" not in command: - if slave_ok and not read_preference.mode: - command["$readPreference"] = ( - ReadPreference.PRIMARY_PREFERRED.document) - else: + # Only send $readPreference if it's not primary (the default). + if read_preference.mode: command["$readPreference"] = read_preference.document name = next(iter(command)) try: - identifier = _FIELD_MAP.get(name) + identifier = _FIELD_MAP[name] docs = command.pop(identifier) except KeyError: identifier = "" docs = None try: if ctx: - return _op_msg_compressed( - flags, command, identifier, docs, check_keys, opts, ctx) - return _op_msg_uncompressed( - flags, command, identifier, docs, check_keys, opts) + return _op_msg_compressed(flags, command, identifier, docs, opts, ctx) + return _op_msg_uncompressed(flags, command, identifier, docs, opts) finally: # Add the field back to the command. if identifier: command[identifier] = docs -def _query(options, collection_name, num_to_skip, - num_to_return, query, field_selector, opts, check_keys): +def _query_impl( + options: int, + collection_name: str, + num_to_skip: int, + num_to_return: int, + query: Mapping[str, Any], + field_selector: Optional[Mapping[str, Any]], + opts: CodecOptions, +) -> tuple[bytes, int]: """Get an OP_QUERY message.""" - encoded = _dict_to_bson(query, check_keys, opts) + encoded = _dict_to_bson(query, False, opts) if field_selector: efs = _dict_to_bson(field_selector, False, opts) else: efs = b"" max_bson_size = max(len(encoded), len(efs)) - return b"".join([ - _pack_int(options), - _make_c_string(collection_name), - _pack_int(num_to_skip), - _pack_int(num_to_return), - encoded, - efs]), max_bson_size - - -def _query_compressed(options, collection_name, num_to_skip, - num_to_return, query, field_selector, - opts, check_keys=False, ctx=None): + return ( + b"".join( + [ + _pack_int(options), + _make_c_string(collection_name), + _pack_int(num_to_skip), + _pack_int(num_to_return), + encoded, + efs, + ] + ), + max_bson_size, + ) + + +def _query_compressed( + options: int, + collection_name: str, + num_to_skip: int, + num_to_return: int, + query: Mapping[str, Any], + field_selector: Optional[Mapping[str, Any]], + opts: CodecOptions, + ctx: Union[SnappyContext, ZlibContext, ZstdContext], +) -> tuple[int, bytes, int]: """Internal compressed query message helper.""" - op_query, max_bson_size = _query( - options, - collection_name, - num_to_skip, - num_to_return, - query, - field_selector, - opts, - check_keys) + op_query, max_bson_size = _query_impl( + options, collection_name, num_to_skip, num_to_return, query, field_selector, opts + ) rid, msg = _compress(2004, op_query, ctx) return rid, msg, max_bson_size -def _query_uncompressed(options, collection_name, num_to_skip, - num_to_return, query, field_selector, opts, check_keys=False): +def _query_uncompressed( + options: int, + collection_name: str, + num_to_skip: int, + num_to_return: int, + query: Mapping[str, Any], + field_selector: Optional[Mapping[str, Any]], + opts: CodecOptions, +) -> tuple[int, bytes, int]: """Internal query message helper.""" - op_query, max_bson_size = _query( - options, - collection_name, - num_to_skip, - num_to_return, - query, - field_selector, - opts, - check_keys) + op_query, max_bson_size = _query_impl( + options, collection_name, num_to_skip, num_to_return, query, field_selector, opts + ) rid, msg = __pack_message(2004, op_query) return rid, msg, max_bson_size + + if _use_c: _query_uncompressed = _cmessage._query_message -def query(options, collection_name, num_to_skip, num_to_return, - query, field_selector, opts, check_keys=False, ctx=None): +def _query( + options: int, + collection_name: str, + num_to_skip: int, + num_to_return: int, + query: Mapping[str, Any], + field_selector: Optional[Mapping[str, Any]], + opts: CodecOptions, + ctx: Union[SnappyContext, ZlibContext, ZstdContext, None] = None, +) -> tuple[int, bytes, int]: """Get a **query** message.""" if ctx: - return _query_compressed(options, collection_name, num_to_skip, - num_to_return, query, field_selector, - opts, check_keys, ctx) - return _query_uncompressed(options, collection_name, num_to_skip, - num_to_return, query, field_selector, opts, - check_keys) + return _query_compressed( + options, collection_name, num_to_skip, num_to_return, query, field_selector, opts, ctx + ) + return _query_uncompressed( + options, collection_name, num_to_skip, num_to_return, query, field_selector, opts + ) _pack_long_long = struct.Struct(" bytes: """Get an OP_GET_MORE message.""" - return b"".join([ - _ZERO_32, - _make_c_string(collection_name), - _pack_int(num_to_return), - _pack_long_long(cursor_id)]) - - -def _get_more_compressed(collection_name, num_to_return, cursor_id, ctx): + return b"".join( + [ + _ZERO_32, + _make_c_string(collection_name), + _pack_int(num_to_return), + _pack_long_long(cursor_id), + ] + ) + + +def _get_more_compressed( + collection_name: str, + num_to_return: int, + cursor_id: int, + ctx: Union[SnappyContext, ZlibContext, ZstdContext], +) -> tuple[int, bytes]: """Internal compressed getMore message helper.""" - return _compress( - 2005, _get_more(collection_name, num_to_return, cursor_id), ctx) + return _compress(2005, _get_more_impl(collection_name, num_to_return, cursor_id), ctx) -def _get_more_uncompressed(collection_name, num_to_return, cursor_id): +def _get_more_uncompressed( + collection_name: str, num_to_return: int, cursor_id: int +) -> tuple[int, bytes]: """Internal getMore message helper.""" - return __pack_message( - 2005, _get_more(collection_name, num_to_return, cursor_id)) + return __pack_message(2005, _get_more_impl(collection_name, num_to_return, cursor_id)) + + if _use_c: _get_more_uncompressed = _cmessage._get_more_message -def get_more(collection_name, num_to_return, cursor_id, ctx=None): +def _get_more( + collection_name: str, + num_to_return: int, + cursor_id: int, + ctx: Union[SnappyContext, ZlibContext, ZstdContext, None] = None, +) -> tuple[int, bytes]: """Get a **getMore** message.""" if ctx: - return _get_more_compressed( - collection_name, num_to_return, cursor_id, ctx) + return _get_more_compressed(collection_name, num_to_return, cursor_id, ctx) return _get_more_uncompressed(collection_name, num_to_return, cursor_id) -def _delete(collection_name, spec, opts, flags): - """Get an OP_DELETE message.""" - encoded = _dict_to_bson(spec, False, opts) # Uses extensions. - return b"".join([ - _ZERO_32, - _make_c_string(collection_name), - _pack_int(flags), - encoded]), len(encoded) - - -def _delete_compressed(collection_name, spec, opts, flags, ctx): - """Internal compressed unacknowledged delete message helper.""" - op_delete, max_bson_size = _delete(collection_name, spec, opts, flags) - rid, msg = _compress(2006, op_delete, ctx) - return rid, msg, max_bson_size - - -def _delete_uncompressed( - collection_name, spec, safe, last_error_args, opts, flags=0): - """Internal delete message helper.""" - op_delete, max_bson_size = _delete(collection_name, spec, opts, flags) - rid, msg = __pack_message(2006, op_delete) - if safe: - rid, gle, _ = __last_error(collection_name, last_error_args) - return rid, msg + gle, max_bson_size - return rid, msg, max_bson_size - - -def delete( - collection_name, spec, safe, last_error_args, opts, flags=0, ctx=None): - """Get a **delete** message. - - `opts` is a CodecOptions. `flags` is a bit vector that may contain - the SingleRemove flag or not: - - http://docs.mongodb.org/meta-driver/latest/legacy/mongodb-wire-protocol/#op-delete - """ - if ctx: - return _delete_compressed(collection_name, spec, opts, flags, ctx) - return _delete_uncompressed( - collection_name, spec, safe, last_error_args, opts, flags) - - -def kill_cursors(cursor_ids): - """Get a **killCursors** message. - """ - num_cursors = len(cursor_ids) - pack = struct.Struct(" tuple[int, bytes, list[Mapping[str, Any]]]: + namespace = self.db_name + ".$cmd" + request_id, msg, to_send = _do_batched_op_msg( + namespace, self.op_type, cmd, docs, self.codec, self + ) if not to_send: raise InvalidOperation("cannot do an empty bulk write") return request_id, msg, to_send - def execute(self, docs, client): - request_id, msg, to_send = self._batch_command(docs) - result = self.write_command(request_id, msg, to_send) + def execute( + self, cmd: MutableMapping[str, Any], docs: list[Mapping[str, Any]], client: MongoClient + ) -> tuple[Mapping[str, Any], list[Mapping[str, Any]]]: + request_id, msg, to_send = self.__batch_command(cmd, docs) + result = self.write_command(cmd, request_id, msg, to_send) client._process_response(result, self.session) return result, to_send - def execute_unack(self, docs, client): - request_id, msg, to_send = self._batch_command(docs) + def execute_unack( + self, cmd: MutableMapping[str, Any], docs: list[Mapping[str, Any]], client: MongoClient + ) -> list[Mapping[str, Any]]: + request_id, msg, to_send = self.__batch_command(cmd, docs) # Though this isn't strictly a "legacy" write, the helper # handles publishing commands and sending our message # without receiving a result. Send 0 for max_doc_size # to disable size checking. Size checking is handled while # the documents are encoded to BSON. - self.legacy_write(request_id, msg, 0, False, to_send) + self.unack_write(cmd, request_id, msg, 0, to_send) return to_send @property - def check_keys(self): - """Should we check keys for this operation type?""" - return self.op_type == _INSERT - - @property - def max_bson_size(self): + def max_bson_size(self) -> int: """A proxy for SockInfo.max_bson_size.""" - return self.sock_info.max_bson_size + return self.conn.max_bson_size @property - def max_message_size(self): + def max_message_size(self) -> int: """A proxy for SockInfo.max_message_size.""" if self.compress: # Subtract 16 bytes for the message header. - return self.sock_info.max_message_size - 16 - return self.sock_info.max_message_size + return self.conn.max_message_size - 16 + return self.conn.max_message_size @property - def max_write_batch_size(self): + def max_write_batch_size(self) -> int: """A proxy for SockInfo.max_write_batch_size.""" - return self.sock_info.max_write_batch_size + return self.conn.max_write_batch_size @property - def max_split_size(self): + def max_split_size(self) -> int: """The maximum size of a BSON command before batch splitting.""" return self.max_bson_size - def legacy_bulk_insert( - self, request_id, msg, max_doc_size, acknowledged, docs, compress): - if compress: - request_id, msg = _compress( - 2002, msg, self.sock_info.compression_context) - return self.legacy_write( - request_id, msg, max_doc_size, acknowledged, docs) - - def legacy_write(self, request_id, msg, max_doc_size, acknowledged, docs): - """A proxy for SocketInfo.legacy_write that handles event publishing. - """ + def unack_write( + self, + cmd: MutableMapping[str, Any], + request_id: int, + msg: bytes, + max_doc_size: int, + docs: list[Mapping[str, Any]], + ) -> Optional[Mapping[str, Any]]: + """A proxy for Connection.unack_write that handles event publishing.""" if self.publish: + assert self.start_time is not None duration = datetime.datetime.now() - self.start_time - cmd = self._start(request_id, docs) + cmd = self._start(cmd, request_id, docs) start = datetime.datetime.now() try: - result = self.sock_info.legacy_write( - request_id, msg, max_doc_size, acknowledged) + result = self.conn.unack_write(msg, max_doc_size) # type: ignore[func-returns-value] if self.publish: duration = (datetime.datetime.now() - start) + duration if result is not None: reply = _convert_write_result(self.name, cmd, result) else: # Comply with APM spec. - reply = {'ok': 1} + reply = {"ok": 1} self._succeed(request_id, reply, duration) - except OperationFailure as exc: + except Exception as exc: if self.publish: + assert self.start_time is not None duration = (datetime.datetime.now() - start) + duration - self._fail( - request_id, - _convert_write_result( - self.name, cmd, exc.details), - duration) + if isinstance(exc, OperationFailure): + failure: _DocumentOut = _convert_write_result(self.name, cmd, exc.details) # type: ignore[arg-type] + elif isinstance(exc, NotPrimaryError): + failure = exc.details # type: ignore[assignment] + else: + failure = _convert_exception(exc) + self._fail(request_id, failure, duration) raise finally: self.start_time = datetime.datetime.now() return result - def write_command(self, request_id, msg, docs): - """A proxy for SocketInfo.write_command that handles event publishing. - """ + @_handle_reauth + def write_command( + self, + cmd: MutableMapping[str, Any], + request_id: int, + msg: bytes, + docs: list[Mapping[str, Any]], + ) -> dict[str, Any]: + """A proxy for SocketInfo.write_command that handles event publishing.""" if self.publish: + assert self.start_time is not None duration = datetime.datetime.now() - self.start_time - self._start(request_id, docs) + self._start(cmd, request_id, docs) start = datetime.datetime.now() try: - reply = self.sock_info.write_command(request_id, msg) + reply = self.conn.write_command(request_id, msg, self.codec) if self.publish: duration = (datetime.datetime.now() - start) + duration self._succeed(request_id, reply, duration) - except OperationFailure as exc: + except Exception as exc: if self.publish: duration = (datetime.datetime.now() - start) + duration - self._fail(request_id, exc.details, duration) + if isinstance(exc, (NotPrimaryError, OperationFailure)): + failure: _DocumentOut = exc.details # type: ignore[assignment] + else: + failure = _convert_exception(exc) + self._fail(request_id, failure, duration) raise finally: self.start_time = datetime.datetime.now() return reply - def _start(self, request_id, docs): + def _start( + self, cmd: MutableMapping[str, Any], request_id: int, docs: list[Mapping[str, Any]] + ) -> MutableMapping[str, Any]: """Publish a CommandStartedEvent.""" - cmd = self.command.copy() cmd[self.field] = docs self.listeners.publish_command_start( - cmd, self.db_name, - request_id, self.sock_info.address, self.op_id) + cmd, + self.db_name, + request_id, + self.conn.address, + self.op_id, + self.conn.service_id, + ) return cmd - def _succeed(self, request_id, reply, duration): + def _succeed(self, request_id: int, reply: _DocumentOut, duration: timedelta) -> None: """Publish a CommandSucceededEvent.""" self.listeners.publish_command_success( - duration, reply, self.name, - request_id, self.sock_info.address, self.op_id) - - def _fail(self, request_id, failure, duration): + duration, + reply, + self.name, + request_id, + self.conn.address, + self.op_id, + self.conn.service_id, + database_name=self.db_name, + ) + + def _fail(self, request_id: int, failure: _DocumentOut, duration: timedelta) -> None: """Publish a CommandFailedEvent.""" self.listeners.publish_command_failure( - duration, failure, self.name, - request_id, self.sock_info.address, self.op_id) + duration, + failure, + self.name, + request_id, + self.conn.address, + self.op_id, + self.conn.service_id, + database_name=self.db_name, + ) # From the Client Side Encryption spec: @@ -1030,150 +1124,83 @@ def _fail(self, request_id, failure, duration): class _EncryptedBulkWriteContext(_BulkWriteContext): __slots__ = () - def _batch_command(self, docs): - namespace = self.db_name + '.$cmd' + def __batch_command( + self, cmd: MutableMapping[str, Any], docs: list[Mapping[str, Any]] + ) -> tuple[MutableMapping[str, Any], list[Mapping[str, Any]]]: + namespace = self.db_name + ".$cmd" msg, to_send = _encode_batched_write_command( - namespace, self.op_type, self.command, docs, self.check_keys, - self.codec, self) + namespace, self.op_type, cmd, docs, self.codec, self + ) if not to_send: raise InvalidOperation("cannot do an empty bulk write") # Chop off the OP_QUERY header to get a properly batched write command. cmd_start = msg.index(b"\x00", 4) + 9 - cmd = _inflate_bson(memoryview(msg)[cmd_start:], - DEFAULT_RAW_BSON_OPTIONS) - return cmd, to_send - - def execute(self, docs, client): - cmd, to_send = self._batch_command(docs) - result = self.sock_info.command( - self.db_name, cmd, codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, - session=self.session, client=client) + outgoing = _inflate_bson(memoryview(msg)[cmd_start:], DEFAULT_RAW_BSON_OPTIONS) + return outgoing, to_send + + def execute( + self, cmd: MutableMapping[str, Any], docs: list[Mapping[str, Any]], client: MongoClient + ) -> tuple[Mapping[str, Any], list[Mapping[str, Any]]]: + batched_cmd, to_send = self.__batch_command(cmd, docs) + result: Mapping[str, Any] = self.conn.command( + self.db_name, batched_cmd, codec_options=self.codec, session=self.session, client=client + ) return result, to_send - def execute_unack(self, docs, client): - cmd, to_send = self._batch_command(docs) - self.sock_info.command( - self.db_name, cmd, write_concern=WriteConcern(w=0), - session=self.session, client=client) + def execute_unack( + self, cmd: MutableMapping[str, Any], docs: list[Mapping[str, Any]], client: MongoClient + ) -> list[Mapping[str, Any]]: + batched_cmd, to_send = self.__batch_command(cmd, docs) + self.conn.command( + self.db_name, + batched_cmd, + write_concern=WriteConcern(w=0), + session=self.session, + client=client, + ) return to_send @property - def max_split_size(self): + def max_split_size(self) -> int: """Reduce the batch splitting size.""" return _MAX_SPLIT_SIZE_ENC -def _raise_document_too_large(operation, doc_size, max_size): +def _raise_document_too_large(operation: str, doc_size: int, max_size: int) -> NoReturn: """Internal helper for raising DocumentTooLarge.""" if operation == "insert": - raise DocumentTooLarge("BSON document too large (%d bytes)" - " - the connected server supports" - " BSON document sizes up to %d" - " bytes." % (doc_size, max_size)) + raise DocumentTooLarge( + "BSON document too large (%d bytes)" + " - the connected server supports" + " BSON document sizes up to %d" + " bytes." % (doc_size, max_size) + ) else: # There's nothing intelligent we can say # about size for update and delete - raise DocumentTooLarge("%r command document too large" % (operation,)) - - -def _do_batched_insert(collection_name, docs, check_keys, - safe, last_error_args, continue_on_error, opts, - ctx): - """Insert `docs` using multiple batches. - """ - def _insert_message(insert_message, send_safe): - """Build the insert message with header and GLE. - """ - request_id, final_message = __pack_message(2002, insert_message) - if send_safe: - request_id, error_message, _ = __last_error(collection_name, - last_error_args) - final_message += error_message - return request_id, final_message - - send_safe = safe or not continue_on_error - last_error = None - data = StringIO() - data.write(struct.pack(" ctx.max_bson_size) - - message_length += encoded_length - if message_length < ctx.max_message_size and not too_large: - data.write(encoded) - to_send.append(doc) - has_docs = True - continue - - if has_docs: - # We have enough data, send this message. - try: - if compress: - rid, msg = None, data.getvalue() - else: - rid, msg = _insert_message(data.getvalue(), send_safe) - ctx.legacy_bulk_insert( - rid, msg, 0, send_safe, to_send, compress) - # Exception type could be OperationFailure or a subtype - # (e.g. DuplicateKeyError) - except OperationFailure as exc: - # Like it says, continue on error... - if continue_on_error: - # Store exception details to re-raise after the final batch. - last_error = exc - # With unacknowledged writes just return at the first error. - elif not safe: - return - # With acknowledged writes raise immediately. - else: - raise - - if too_large: - _raise_document_too_large( - "insert", encoded_length, ctx.max_bson_size) - - message_length = begin_loc + encoded_length - data.seek(begin_loc) - data.truncate() - data.write(encoded) - to_send = [doc] + raise DocumentTooLarge(f"{operation!r} command document too large") - if not has_docs: - raise InvalidOperation("cannot do an empty bulk insert") - - if compress: - request_id, msg = None, data.getvalue() - else: - request_id, msg = _insert_message(data.getvalue(), safe) - ctx.legacy_bulk_insert(request_id, msg, 0, safe, to_send, compress) - - # Re-raise any exception stored due to continue_on_error - if last_error is not None: - raise last_error -if _use_c: - _do_batched_insert = _cmessage._do_batched_insert # OP_MSG ------------------------------------------------------------- _OP_MSG_MAP = { - _INSERT: b'documents\x00', - _UPDATE: b'updates\x00', - _DELETE: b'deletes\x00', + _INSERT: b"documents\x00", + _UPDATE: b"updates\x00", + _DELETE: b"deletes\x00", } def _batched_op_msg_impl( - operation, command, docs, check_keys, ack, opts, ctx, buf): + operation: int, + command: Mapping[str, Any], + docs: list[Mapping[str, Any]], + ack: bool, + opts: CodecOptions, + ctx: _BulkWriteContext, + buf: _BytesIO, +) -> tuple[list[Mapping[str, Any]], int]: """Create a batched OP_MSG write.""" max_bson_size = ctx.max_bson_size max_write_batch_size = ctx.max_write_batch_size @@ -1195,30 +1222,26 @@ def _batched_op_msg_impl( try: buf.write(_OP_MSG_MAP[operation]) except KeyError: - raise InvalidOperation('Unknown command') - - if operation in (_UPDATE, _DELETE): - check_keys = False + raise InvalidOperation("Unknown command") from None to_send = [] idx = 0 for doc in docs: # Encode the current operation - value = _dict_to_bson(doc, check_keys, opts) + value = _dict_to_bson(doc, False, opts) doc_length = len(value) new_message_size = buf.tell() + doc_length # Does first document exceed max_message_size? - doc_too_large = (idx == 0 and (new_message_size > max_message_size)) - # When OP_MSG is used unacknowleged we have to check + doc_too_large = idx == 0 and (new_message_size > max_message_size) + # When OP_MSG is used unacknowledged we have to check # document size client side or applications won't be notified. # Otherwise we let the server deal with documents that are too large # since ordered=False causes those documents to be skipped instead of # halting the bulk write operation. - unacked_doc_too_large = (not ack and (doc_length > max_bson_size)) + unacked_doc_too_large = not ack and (doc_length > max_bson_size) if doc_too_large or unacked_doc_too_large: write_op = list(_FIELD_MAP.keys())[operation] - _raise_document_too_large( - write_op, len(value), max_bson_size) + _raise_document_too_large(write_op, len(value), max_bson_size) # We have enough data, return this batch. if new_message_size > max_message_size: break @@ -1238,46 +1261,61 @@ def _batched_op_msg_impl( def _encode_batched_op_msg( - operation, command, docs, check_keys, ack, opts, ctx): + operation: int, + command: Mapping[str, Any], + docs: list[Mapping[str, Any]], + ack: bool, + opts: CodecOptions, + ctx: _BulkWriteContext, +) -> tuple[bytes, list[Mapping[str, Any]]]: """Encode the next batched insert, update, or delete operation as OP_MSG. """ - buf = StringIO() + buf = _BytesIO() - to_send, _ = _batched_op_msg_impl( - operation, command, docs, check_keys, ack, opts, ctx, buf) + to_send, _ = _batched_op_msg_impl(operation, command, docs, ack, opts, ctx, buf) return buf.getvalue(), to_send + + if _use_c: _encode_batched_op_msg = _cmessage._encode_batched_op_msg def _batched_op_msg_compressed( - operation, command, docs, check_keys, ack, opts, ctx): + operation: int, + command: Mapping[str, Any], + docs: list[Mapping[str, Any]], + ack: bool, + opts: CodecOptions, + ctx: _BulkWriteContext, +) -> tuple[int, bytes, list[Mapping[str, Any]]]: """Create the next batched insert, update, or delete operation with OP_MSG, compressed. """ - data, to_send = _encode_batched_op_msg( - operation, command, docs, check_keys, ack, opts, ctx) + data, to_send = _encode_batched_op_msg(operation, command, docs, ack, opts, ctx) - request_id, msg = _compress( - 2013, - data, - ctx.sock_info.compression_context) + assert ctx.conn.compression_context is not None + request_id, msg = _compress(2013, data, ctx.conn.compression_context) return request_id, msg, to_send def _batched_op_msg( - operation, command, docs, check_keys, ack, opts, ctx): + operation: int, + command: Mapping[str, Any], + docs: list[Mapping[str, Any]], + ack: bool, + opts: CodecOptions, + ctx: _BulkWriteContext, +) -> tuple[int, bytes, list[Mapping[str, Any]]]: """OP_MSG implementation entry point.""" - buf = StringIO() + buf = _BytesIO() # Save space for message length and request id buf.write(_ZERO_64) # responseTo, opCode buf.write(b"\x00\x00\x00\x00\xdd\x07\x00\x00") - to_send, length = _batched_op_msg_impl( - operation, command, docs, check_keys, ack, opts, ctx, buf) + to_send, length = _batched_op_msg_impl(operation, command, docs, ack, opts, ctx, buf) # Header - request id and message length buf.seek(4) @@ -1287,106 +1325,64 @@ def _batched_op_msg( buf.write(_pack_int(length)) return request_id, buf.getvalue(), to_send + + if _use_c: _batched_op_msg = _cmessage._batched_op_msg def _do_batched_op_msg( - namespace, operation, command, docs, check_keys, opts, ctx): + namespace: str, + operation: int, + command: MutableMapping[str, Any], + docs: list[Mapping[str, Any]], + opts: CodecOptions, + ctx: _BulkWriteContext, +) -> tuple[int, bytes, list[Mapping[str, Any]]]: """Create the next batched insert, update, or delete operation using OP_MSG. """ - command['$db'] = namespace.split('.', 1)[0] - if 'writeConcern' in command: - ack = bool(command['writeConcern'].get('w', 1)) + command["$db"] = namespace.split(".", 1)[0] + if "writeConcern" in command: + ack = bool(command["writeConcern"].get("w", 1)) else: ack = True - if ctx.sock_info.compression_context: - return _batched_op_msg_compressed( - operation, command, docs, check_keys, ack, opts, ctx) - return _batched_op_msg( - operation, command, docs, check_keys, ack, opts, ctx) + if ctx.conn.compression_context: + return _batched_op_msg_compressed(operation, command, docs, ack, opts, ctx) + return _batched_op_msg(operation, command, docs, ack, opts, ctx) # End OP_MSG ----------------------------------------------------- -def _batched_write_command_compressed( - namespace, operation, command, docs, check_keys, opts, ctx): - """Create the next batched insert, update, or delete command, compressed. - """ - data, to_send = _encode_batched_write_command( - namespace, operation, command, docs, check_keys, opts, ctx) - - request_id, msg = _compress( - 2004, - data, - ctx.sock_info.compression_context) - return request_id, msg, to_send - - def _encode_batched_write_command( - namespace, operation, command, docs, check_keys, opts, ctx): - """Encode the next batched insert, update, or delete command. - """ - buf = StringIO() - - to_send, _ = _batched_write_command_impl( - namespace, operation, command, docs, check_keys, opts, ctx, buf) + namespace: str, + operation: int, + command: MutableMapping[str, Any], + docs: list[Mapping[str, Any]], + opts: CodecOptions, + ctx: _BulkWriteContext, +) -> tuple[bytes, list[Mapping[str, Any]]]: + """Encode the next batched insert, update, or delete command.""" + buf = _BytesIO() + + to_send, _ = _batched_write_command_impl(namespace, operation, command, docs, opts, ctx, buf) return buf.getvalue(), to_send -if _use_c: - _encode_batched_write_command = _cmessage._encode_batched_write_command -def _batched_write_command( - namespace, operation, command, docs, check_keys, opts, ctx): - """Create the next batched insert, update, or delete command. - """ - buf = StringIO() - - # Save space for message length and request id - buf.write(_ZERO_64) - # responseTo, opCode - buf.write(b"\x00\x00\x00\x00\xd4\x07\x00\x00") - - # Write OP_QUERY write command - to_send, length = _batched_write_command_impl( - namespace, operation, command, docs, check_keys, opts, ctx, buf) - - # Header - request id and message length - buf.seek(4) - request_id = _randint() - buf.write(_pack_int(request_id)) - buf.seek(0) - buf.write(_pack_int(length)) - - return request_id, buf.getvalue(), to_send if _use_c: - _batched_write_command = _cmessage._batched_write_command - - -def _do_batched_write_command( - namespace, operation, command, docs, check_keys, opts, ctx): - """Batched write commands entry point.""" - if ctx.sock_info.compression_context: - return _batched_write_command_compressed( - namespace, operation, command, docs, check_keys, opts, ctx) - return _batched_write_command( - namespace, operation, command, docs, check_keys, opts, ctx) - - -def _do_bulk_write_command( - namespace, operation, command, docs, check_keys, opts, ctx): - """Bulk write commands entry point.""" - if ctx.sock_info.max_wire_version > 5: - return _do_batched_op_msg( - namespace, operation, command, docs, check_keys, opts, ctx) - return _do_batched_write_command( - namespace, operation, command, docs, check_keys, opts, ctx) + _encode_batched_write_command = _cmessage._encode_batched_write_command def _batched_write_command_impl( - namespace, operation, command, docs, check_keys, opts, ctx, buf): + namespace: str, + operation: int, + command: MutableMapping[str, Any], + docs: list[Mapping[str, Any]], + opts: CodecOptions, + ctx: _BulkWriteContext, + buf: _BytesIO, +) -> tuple[list[Mapping[str, Any]], int]: """Create a batched OP_QUERY write command.""" max_bson_size = ctx.max_bson_size max_write_batch_size = ctx.max_write_batch_size @@ -1398,7 +1394,7 @@ def _batched_write_command_impl( # No options buf.write(_ZERO_32) # Namespace as C string - buf.write(b(namespace)) + buf.write(namespace.encode("utf8")) buf.write(_ZERO_8) # Skip: 0, Limit: -1 buf.write(_SKIPLIM) @@ -1414,10 +1410,7 @@ def _batched_write_command_impl( try: buf.write(_OP_MAP[operation]) except KeyError: - raise InvalidOperation('Unknown command') - - if operation in (_UPDATE, _DELETE): - check_keys = False + raise InvalidOperation("Unknown command") from None # Where to write list document length list_start = buf.tell() - 4 @@ -1425,18 +1418,16 @@ def _batched_write_command_impl( idx = 0 for doc in docs: # Encode the current operation - key = b(str(idx)) - value = encode(doc, check_keys, opts) + key = str(idx).encode("utf8") + value = _dict_to_bson(doc, False, opts) # Is there enough room to add this document? max_cmd_size accounts for # the two trailing null bytes. doc_too_large = len(value) > max_cmd_size if doc_too_large: write_op = list(_FIELD_MAP.keys())[operation] - _raise_document_too_large( - write_op, len(value), max_bson_size) - enough_data = (idx >= 1 and - (buf.tell() + len(key) + len(value)) >= max_split_size) - enough_documents = (idx >= max_write_batch_size) + _raise_document_too_large(write_op, len(value), max_bson_size) + enough_data = idx >= 1 and (buf.tell() + len(key) + len(value)) >= max_split_size + enough_documents = idx >= max_write_batch_size if enough_data or enough_documents: break buf.write(_BSONOBJ) @@ -1460,7 +1451,7 @@ def _batched_write_command_impl( return to_send, length -class _OpReply(object): +class _OpReply: """A MongoDB OP_REPLY response message.""" __slots__ = ("flags", "cursor_id", "number_returned", "documents") @@ -1468,18 +1459,20 @@ class _OpReply(object): UNPACK_FROM = struct.Struct(" list[bytes]: """Check the response header from the database, without decoding BSON. Check the response for errors and unpack. - Can raise CursorNotFound, NotMasterError, ExecutionTimeout, or + Can raise CursorNotFound, NotPrimaryError, ExecutionTimeout, or OperationFailure. :Parameters: @@ -1498,30 +1491,38 @@ def raw_response(self, cursor_id=None): errobj = {"ok": 0, "errmsg": msg, "code": 43} raise CursorNotFound(msg, 43, errobj) elif self.flags & 2: - error_object = bson.BSON(self.documents).decode() + error_object: dict = bson.BSON(self.documents).decode() # Fake the ok field if it doesn't exist. error_object.setdefault("ok", 0) - if error_object["$err"].startswith("not master"): - raise NotMasterError(error_object["$err"], error_object) + if error_object["$err"].startswith(HelloCompat.LEGACY_ERROR): + raise NotPrimaryError(error_object["$err"], error_object) elif error_object.get("code") == 50: - raise ExecutionTimeout(error_object.get("$err"), - error_object.get("code"), - error_object) - raise OperationFailure("database error: %s" % - error_object.get("$err"), - error_object.get("code"), - error_object) - return [self.documents] - - def unpack_response(self, cursor_id=None, - codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, - user_fields=None, legacy_response=False): + default_msg = "operation exceeded time limit" + raise ExecutionTimeout( + error_object.get("$err", default_msg), error_object.get("code"), error_object + ) + raise OperationFailure( + "database error: %s" % error_object.get("$err"), + error_object.get("code"), + error_object, + ) + if self.documents: + return [self.documents] + return [] + + def unpack_response( + self, + cursor_id: Optional[int] = None, + codec_options: CodecOptions = _UNICODE_REPLACE_CODEC_OPTIONS, + user_fields: Optional[Mapping[str, Any]] = None, + legacy_response: bool = False, + ) -> list[dict[str, Any]]: """Unpack a response from the database and decode the BSON document(s). Check the response for errors and unpack, returning a dictionary containing the response data. - Can raise CursorNotFound, NotMasterError, ExecutionTimeout, or + Can raise CursorNotFound, NotPrimaryError, ExecutionTimeout, or OperationFailure. :Parameters: @@ -1530,37 +1531,42 @@ def unpack_response(self, cursor_id=None, valid at server response - `codec_options` (optional): an instance of :class:`~bson.codec_options.CodecOptions` + - `user_fields` (optional): Response fields that should be decoded + using the TypeDecoders from codec_options, passed to + bson._decode_all_selective. """ self.raw_response(cursor_id) if legacy_response: return bson.decode_all(self.documents, codec_options) - return bson._decode_all_selective( - self.documents, codec_options, user_fields) + return bson._decode_all_selective(self.documents, codec_options, user_fields) - def command_response(self): + def command_response(self, codec_options: CodecOptions) -> dict[str, Any]: """Unpack a command response.""" - docs = self.unpack_response() + docs = self.unpack_response(codec_options=codec_options) assert self.number_returned == 1 return docs[0] - def raw_command_response(self): + def raw_command_response(self) -> NoReturn: """Return the bytes of the command response.""" # This should never be called on _OpReply. raise NotImplementedError + @property + def more_to_come(self) -> bool: + """Is the moreToCome bit set on this response?""" + return False + @classmethod - def unpack(cls, msg): + def unpack(cls, msg: bytes) -> _OpReply: """Construct an _OpReply from raw bytes.""" # PYTHON-945: ignore starting_from field. flags, cursor_id, _, number_returned = cls.UNPACK_FROM(msg) - # Convert Python 3 memoryview to bytes. Note we should call - # memoryview.tobytes() if we start using memoryview in Python 2.7. - documents = bytes(msg[20:]) + documents = msg[20:] return cls(flags, cursor_id, number_returned, documents) -class _OpMsg(object): +class _OpMsg: """A MongoDB OP_MSG response message.""" __slots__ = ("flags", "cursor_id", "number_returned", "payload_document") @@ -1568,113 +1574,84 @@ class _OpMsg(object): UNPACK_FROM = struct.Struct(" list[Mapping[str, Any]]: + """ + cursor_id is ignored + user_fields is used to determine which fields must not be decoded + """ + inflated_response = _decode_selective( + RawBSONDocument(self.payload_document), user_fields, _RAW_ARRAY_BSON_OPTIONS + ) + return [inflated_response] + + def unpack_response( + self, + cursor_id: Optional[int] = None, + codec_options: CodecOptions = _UNICODE_REPLACE_CODEC_OPTIONS, + user_fields: Optional[Mapping[str, Any]] = None, + legacy_response: bool = False, + ) -> list[dict[str, Any]]: """Unpack a OP_MSG command response. :Parameters: - `cursor_id` (optional): Ignored, for compatibility with _OpReply. - `codec_options` (optional): an instance of :class:`~bson.codec_options.CodecOptions` + - `user_fields` (optional): Response fields that should be decoded + using the TypeDecoders from codec_options, passed to + bson._decode_all_selective. """ # If _OpMsg is in-use, this cannot be a legacy response. assert not legacy_response - return bson._decode_all_selective( - self.payload_document, codec_options, user_fields) + return bson._decode_all_selective(self.payload_document, codec_options, user_fields) - def command_response(self): + def command_response(self, codec_options: CodecOptions) -> dict[str, Any]: """Unpack a command response.""" - return self.unpack_response()[0] + return self.unpack_response(codec_options=codec_options)[0] - def raw_command_response(self): + def raw_command_response(self) -> bytes: """Return the bytes of the command response.""" return self.payload_document + @property + def more_to_come(self) -> bool: + """Is the moreToCome bit set on this response?""" + return bool(self.flags & self.MORE_TO_COME) + @classmethod - def unpack(cls, msg): + def unpack(cls, msg: bytes) -> _OpMsg: """Construct an _OpMsg from raw bytes.""" flags, first_payload_type, first_payload_size = cls.UNPACK_FROM(msg) if flags != 0: - raise ProtocolError("Unsupported OP_MSG flags (%r)" % (flags,)) + if flags & cls.CHECKSUM_PRESENT: + raise ProtocolError(f"Unsupported OP_MSG flag checksumPresent: 0x{flags:x}") + + if flags ^ cls.MORE_TO_COME: + raise ProtocolError(f"Unsupported OP_MSG flags: 0x{flags:x}") if first_payload_type != 0: - raise ProtocolError( - "Unsupported OP_MSG payload type (%r)" % (first_payload_type,)) + raise ProtocolError(f"Unsupported OP_MSG payload type: 0x{first_payload_type:x}") if len(msg) != first_payload_size + 5: raise ProtocolError("Unsupported OP_MSG reply: >1 section") - # Convert Python 3 memoryview to bytes. Note we should call - # memoryview.tobytes() if we start using memoryview in Python 2.7. - payload_document = bytes(msg[5:]) + payload_document = msg[5:] return cls(flags, payload_document) -_UNPACK_REPLY = { +_UNPACK_REPLY: dict[int, Callable[[bytes], Union[_OpReply, _OpMsg]]] = { _OpReply.OP_CODE: _OpReply.unpack, _OpMsg.OP_CODE: _OpMsg.unpack, } - - -def _first_batch(sock_info, db, coll, query, ntoreturn, - slave_ok, codec_options, read_preference, cmd, listeners): - """Simple query helper for retrieving a first (and possibly only) batch.""" - query = _Query( - 0, db, coll, 0, query, None, codec_options, - read_preference, ntoreturn, 0, DEFAULT_READ_CONCERN, None, None, - None) - - name = next(iter(cmd)) - publish = listeners.enabled_for_commands - if publish: - start = datetime.datetime.now() - - request_id, msg, max_doc_size = query.get_message(slave_ok, sock_info) - - if publish: - encoding_duration = datetime.datetime.now() - start - listeners.publish_command_start( - cmd, db, request_id, sock_info.address) - start = datetime.datetime.now() - - sock_info.send_message(msg, max_doc_size) - reply = sock_info.receive_message(request_id) - try: - docs = reply.unpack_response(None, codec_options) - except Exception as exc: - if publish: - duration = (datetime.datetime.now() - start) + encoding_duration - if isinstance(exc, (NotMasterError, OperationFailure)): - failure = exc.details - else: - failure = _convert_exception(exc) - listeners.publish_command_failure( - duration, failure, name, request_id, sock_info.address) - raise - # listIndexes - if 'cursor' in cmd: - result = { - u'cursor': { - u'firstBatch': docs, - u'id': reply.cursor_id, - u'ns': u'%s.%s' % (db, coll) - }, - u'ok': 1.0 - } - # fsyncUnlock, currentOp - else: - result = docs[0] if docs else {} - result[u'ok'] = 1.0 - if publish: - duration = (datetime.datetime.now() - start) + encoding_duration - listeners.publish_command_success( - duration, result, name, request_id, sock_info.address) - - return result diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index edee1afb68..089f68b77a 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -26,58 +26,118 @@ >>> from pymongo import MongoClient >>> c = MongoClient() >>> c.test_database - Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True), u'test_database') - >>> c['test-database'] - Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True), u'test-database') + Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True), 'test_database') + >>> c["test-database"] + Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True), 'test-database') """ +from __future__ import annotations import contextlib -import datetime -import threading -import warnings +import os import weakref - from collections import defaultdict - -from bson.codec_options import DEFAULT_CODEC_OPTIONS -from bson.py3compat import (integer_types, - string_type) +from typing import ( + TYPE_CHECKING, + Any, + Callable, + ContextManager, + FrozenSet, + Generic, + Iterator, + Mapping, + MutableMapping, + NoReturn, + Optional, + Sequence, + Type, + TypeVar, + Union, + cast, +) + +import bson +from bson.codec_options import DEFAULT_CODEC_OPTIONS, TypeRegistry from bson.son import SON -from pymongo import (common, - database, - helpers, - message, - periodic_executor, - uri_parser, - client_session) -from pymongo.change_stream import ClusterChangeStream +from bson.timestamp import Timestamp +from pymongo import ( + _csot, + client_session, + common, + database, + helpers, + message, + periodic_executor, + uri_parser, +) +from pymongo.change_stream import ChangeStream, ClusterChangeStream from pymongo.client_options import ClientOptions +from pymongo.client_session import _EmptyServerSession from pymongo.command_cursor import CommandCursor -from pymongo.cursor_manager import CursorManager -from pymongo.errors import (AutoReconnect, - BulkWriteError, - ConfigurationError, - ConnectionFailure, - InvalidOperation, - NetworkTimeout, - NotMasterError, - OperationFailure, - PyMongoError, - ServerSelectionTimeoutError) -from pymongo.read_preferences import ReadPreference -from pymongo.server_selectors import (writable_preferred_server_selector, - writable_server_selector) +from pymongo.errors import ( + AutoReconnect, + BulkWriteError, + ConfigurationError, + ConnectionFailure, + InvalidOperation, + NotPrimaryError, + OperationFailure, + PyMongoError, + ServerSelectionTimeoutError, + WaitQueueTimeoutError, +) +from pymongo.lock import _HAS_REGISTER_AT_FORK, _create_lock, _release_locks +from pymongo.pool import ConnectionClosedReason +from pymongo.read_preferences import ReadPreference, _ServerMode +from pymongo.server_selectors import writable_server_selector from pymongo.server_type import SERVER_TYPE -from pymongo.topology import Topology -from pymongo.topology_description import TOPOLOGY_TYPE from pymongo.settings import TopologySettings -from pymongo.uri_parser import (_handle_option_deprecations, - _handle_security_options, - _normalize_options) -from pymongo.write_concern import DEFAULT_WRITE_CONCERN - - -class MongoClient(common.BaseObject): +from pymongo.topology import Topology, _ErrorContext +from pymongo.topology_description import TOPOLOGY_TYPE, TopologyDescription +from pymongo.typings import ( + ClusterTime, + _Address, + _CollationIn, + _DocumentType, + _DocumentTypeArg, + _Pipeline, +) +from pymongo.uri_parser import ( + _check_options, + _handle_option_deprecations, + _handle_security_options, + _normalize_options, +) +from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern + +if TYPE_CHECKING: + import sys + from types import TracebackType + + from bson.objectid import ObjectId + from pymongo.bulk import _Bulk + from pymongo.client_session import ClientSession, _ServerSession + from pymongo.cursor import _ConnectionManager + from pymongo.database import Database + from pymongo.message import _CursorAddress, _GetMore, _Query + from pymongo.pool import Connection + from pymongo.read_concern import ReadConcern + from pymongo.response import Response + from pymongo.server import Server + from pymongo.server_selectors import Selection + + if sys.version_info[:2] >= (3, 9): + from collections.abc import Generator + else: + # Deprecated since version 3.9: collections.abc.Generator now supports []. + from typing import Generator + +T = TypeVar("T") + +_WriteCall = Callable[[Optional["ClientSession"], "Connection", bool], T] +_ReadCall = Callable[[Optional["ClientSession"], "Server", "Connection", _ServerMode], T] + + +class MongoClient(common.BaseObject, Generic[_DocumentType]): """ A client-side representation of a MongoDB cluster. @@ -87,23 +147,31 @@ class MongoClient(common.BaseObject): resources related to this, including background threads for monitoring, and connection pools. """ + HOST = "localhost" PORT = 27017 # Define order to retrieve options from ClientOptions for __repr__. # No host/port; these are retrieved from TopologySettings. - _constructor_args = ('document_class', 'tz_aware', 'connect') + _constructor_args = ("document_class", "tz_aware", "connect") + _clients: weakref.WeakValueDictionary = weakref.WeakValueDictionary() def __init__( - self, - host=None, - port=None, - document_class=dict, - tz_aware=None, - connect=None, - type_registry=None, - **kwargs): + self, + host: Optional[Union[str, Sequence[str]]] = None, + port: Optional[int] = None, + document_class: Optional[Type[_DocumentType]] = None, + tz_aware: Optional[bool] = None, + connect: Optional[bool] = None, + type_registry: Optional[TypeRegistry] = None, + **kwargs: Any, + ) -> None: """Client for a MongoDB instance, a replica set, or a set of mongoses. + .. warning:: Starting in PyMongo 4.0, ``directConnection`` now has a default value of + False instead of None. + For more details, see the relevant section of the PyMongo 4.x migration guide: + :ref:`pymongo4-migration-direct-connection`. + The client object is thread-safe and has connection-pooling built in. If an operation fails because of a network error, :class:`~pymongo.errors.ConnectionFailure` is raised and the client @@ -113,20 +181,13 @@ def __init__( The `host` parameter can be a full `mongodb URI `_, in addition to - a simple hostname. It can also be a list of hostnames or - URIs. Any port specified in the host string(s) will override - the `port` parameter. If multiple mongodb URIs containing - database or auth information are passed, the last database, - username, and password present will be used. For username and + a simple hostname. It can also be a list of hostnames but no more + than one URI. Any port specified in the host string(s) will override + the `port` parameter. For username and passwords reserved characters like ':', '/', '+' and '@' must be percent encoded following RFC 2396:: - try: - # Python 3.x - from urllib.parse import quote_plus - except ImportError: - # Python 2.x - from urllib import quote_plus + from urllib.parse import quote_plus uri = "mongodb://%s:%s@%s" % ( quote_plus(user), quote_plus(password), host) @@ -173,8 +234,8 @@ def __init__( from pymongo.errors import ConnectionFailure client = MongoClient() try: - # The ismaster command is cheap and does not require auth. - client.admin.command('ismaster') + # The ping command is cheap and does not require auth. + client.admin.command('ping') except ConnectionFailure: print("Server not available") @@ -188,16 +249,14 @@ def __init__( :Parameters: - `host` (optional): hostname or IP address or Unix domain socket path of a single mongod or mongos instance to connect to, or a - mongodb URI, or a list of hostnames / mongodb URIs. If `host` is - an IPv6 literal it must be enclosed in '[' and ']' characters + mongodb URI, or a list of hostnames (but no more than one mongodb + URI). If `host` is an IPv6 literal it must be enclosed in '[' + and ']' characters following the RFC2732 URL syntax (e.g. '[::1]' for localhost). Multihomed and round robin DNS addresses are **not** supported. - `port` (optional): port number on which to connect - `document_class` (optional): default class to use for documents returned from queries on this client - - `type_registry` (optional): instance of - :class:`~bson.codec_options.TypeRegistry` to enable encoding - and decoding of custom types. - `tz_aware` (optional): if ``True``, :class:`~datetime.datetime` instances returned as values in a document by this :class:`MongoClient` will be timezone @@ -205,27 +264,54 @@ def __init__( - `connect` (optional): if ``True`` (the default), immediately begin connecting to MongoDB in the background. Otherwise connect on the first operation. + - `type_registry` (optional): instance of + :class:`~bson.codec_options.TypeRegistry` to enable encoding + and decoding of custom types. + - `datetime_conversion`: Specifies how UTC datetimes should be decoded + within BSON. Valid options include 'datetime_ms' to return as a + DatetimeMS, 'datetime' to return as a datetime.datetime and + raising a ValueError for out-of-range values, 'datetime_auto' to + return DatetimeMS objects when the underlying datetime is + out-of-range and 'datetime_clamp' to clamp to the minimum and + maximum possible datetimes. Defaults to 'datetime'. See + :ref:`handling-out-of-range-datetimes` for details. | **Other optional parameters can be passed as keyword arguments:** + - `directConnection` (optional): if ``True``, forces this client to + connect directly to the specified MongoDB host as a standalone. + If ``false``, the client connects to the entire replica set of + which the given MongoDB host(s) is a part. If this is ``True`` + and a mongodb+srv:// URI or a URI containing multiple seeds is + provided, an exception will be raised. - `maxPoolSize` (optional): The maximum allowable number of concurrent connections to each connected server. Requests to a server will block if there are `maxPoolSize` outstanding - connections to the requested server. Defaults to 100. Cannot be 0. + connections to the requested server. Defaults to 100. Can be + either 0 or None, in which case there is no limit on the number + of concurrent connections. - `minPoolSize` (optional): The minimum required number of concurrent connections that the pool will maintain to each connected server. Default is 0. - `maxIdleTimeMS` (optional): The maximum number of milliseconds that a connection can remain idle in the pool before being removed and replaced. Defaults to `None` (no limit). + - `maxConnecting` (optional): The maximum number of connections that + each pool can establish concurrently. Defaults to `2`. + - `timeoutMS`: (integer or None) Controls how long (in + milliseconds) the driver will wait when executing an operation + (including retry attempts) before raising a timeout error. + ``0`` or ``None`` means no timeout. - `socketTimeoutMS`: (integer or None) Controls how long (in milliseconds) the driver will wait for a response after sending an ordinary (non-monitoring) database operation before concluding that - a network error has occurred. Defaults to ``None`` (no timeout). + a network error has occurred. ``0`` or ``None`` means no timeout. + Defaults to ``None`` (no timeout). - `connectTimeoutMS`: (integer or None) Controls how long (in milliseconds) the driver will wait during server monitoring when connecting a new socket to a server before concluding the server - is unavailable. Defaults to ``20000`` (20 seconds). + is unavailable. ``0`` or ``None`` means no timeout. + Defaults to ``20000`` (20 seconds). - `server_selector`: (callable or None) Optional, user-provided function that augments server selection rules. The function should accept as an argument a list of @@ -241,17 +327,15 @@ def __init__( - `waitQueueTimeoutMS`: (integer or None) How long (in milliseconds) a thread will wait for a socket from the pool if the pool has no free sockets. Defaults to ``None`` (no timeout). - - `waitQueueMultiple`: (integer or None) Multiplied by maxPoolSize - to give the number of threads allowed to wait for a socket at one - time. Defaults to ``None`` (no limit). - `heartbeatFrequencyMS`: (optional) The number of milliseconds between periodic server checks, or None to accept the default frequency of 10 seconds. + - `serverMonitoringMode`: (optional) The server monitoring mode to use. + Valid values are the strings: "auto", "stream", "poll". Defaults to "auto". - `appname`: (string or None) The name of the application that - created this MongoClient instance. MongoDB 3.4 and newer will - print this value in the server log upon establishing each - connection. It is also recorded in the slow query log and - profile collections. + created this MongoClient instance. The server will log this value + upon establishing each connection. It is also recorded in the slow + query log and profile collections. - `driver`: (pair or None) A driver implemented on top of PyMongo can pass a :class:`~pymongo.driver_info.DriverInfo` to add its name, version, and platform to the message printed in the server log when @@ -260,7 +344,7 @@ def __init__( :mod:`~pymongo.monitoring` for details. - `retryWrites`: (boolean) Whether supported write operations executed within this MongoClient will be retried once after a - network error on MongoDB 3.6+. Defaults to ``True``. + network error. Defaults to ``True``. The supported write operations are: - :meth:`~pymongo.collection.Collection.bulk_write`, as long as @@ -282,7 +366,7 @@ def __init__( https://github.com/mongodb/specifications/blob/master/source/retryable-writes/retryable-writes.rst - `retryReads`: (boolean) Whether supported read operations executed within this MongoClient will be retried once after a - network error on MongoDB 3.6+. Defaults to ``True``. + network error. Defaults to ``True``. The supported read operations are: :meth:`~pymongo.collection.Collection.find`, :meth:`~pymongo.collection.Collection.find_one`, @@ -298,11 +382,9 @@ def __init__( :meth:`pymongo.mongo_client.MongoClient.watch`, and :meth:`~pymongo.mongo_client.MongoClient.list_databases`. - Unsupported read operations include, but are not limited to: - :meth:`~pymongo.collection.Collection.map_reduce`, - :meth:`~pymongo.collection.Collection.inline_map_reduce`, - :meth:`~pymongo.database.Database.command`, - and any getMore operation on a cursor. + Unsupported read operations include, but are not limited to + :meth:`~pymongo.database.Database.command` and any getMore + operation on a cursor. Enabling retryable reads makes applications more resilient to transient errors such as network failures, database upgrades, and @@ -310,10 +392,6 @@ def __init__( trigger a retry, see the `retryable reads specification `_. - - `socketKeepAlive`: (boolean) **DEPRECATED** Whether to send - periodic keep-alive packets on connected sockets. Defaults to - ``True``. Disabling it is not recommended, see - https://docs.mongodb.com/manual/faq/diagnostics/#does-tcp-keepalive-time-affect-mongodb-deployments", - `compressors`: Comma separated list of compressors for wire protocol compression. The list is used to negotiate a compressor with the server. Currently supported options are "snappy", "zlib" @@ -322,9 +400,9 @@ def __init__( zlib support requires the Python standard library zlib module. zstd requires the `zstandard `_ package. By default no compression is used. Compression support - must also be enabled on the server. MongoDB 3.4+ supports snappy - compression. MongoDB 3.6 adds support for zlib. MongoDB 4.2 adds - support for zstd. + must also be enabled on the server. MongoDB 3.6+ supports snappy + and zlib compression. MongoDB 4.2+ adds support for zstd. + See :ref:`network-compression-example` for details. - `zlibCompressionLevel`: (int) The zlib compression level to use when zlib is used as the wire protocol compressor. Supported values are -1 through 9. -1 tells the zlib library to use its default @@ -332,9 +410,24 @@ def __init__( speed. 9 is best compression. Defaults to -1. - `uuidRepresentation`: The BSON representation to use when encoding from and decoding to instances of :class:`~uuid.UUID`. Valid - values are `pythonLegacy` (the default), `javaLegacy`, - `csharpLegacy` and `standard`. New applications should consider - setting this to `standard` for cross language compatibility. + values are the strings: "standard", "pythonLegacy", "javaLegacy", + "csharpLegacy", and "unspecified" (the default). New applications + should consider setting this to "standard" for cross language + compatibility. See :ref:`handling-uuid-data-example` for details. + - `unicode_decode_error_handler`: The error handler to apply when + a Unicode-related error occurs during BSON decoding that would + otherwise raise :exc:`UnicodeDecodeError`. Valid options include + 'strict', 'replace', 'backslashreplace', 'surrogateescape', and + 'ignore'. Defaults to 'strict'. + - `srvServiceName`: (string) The SRV service name to use for + "mongodb+srv://" URIs. Defaults to "mongodb". Use it like so:: + + MongoClient("mongodb+srv://example.com/?srvServiceName=customname") + - `srvMaxHosts`: (int) limits the number of mongos-like hosts a client will + connect to. More specifically, when a "mongodb+srv://" connection string + resolves to more than srvMaxHosts number of hosts, the client will randomly + choose an srvMaxHosts sized subset of hosts. + | **Write Concern options:** | (Only set if passed. No default values.) @@ -352,10 +445,8 @@ def __init__( will cause **write operations to wait indefinitely**. - `journal`: If ``True`` block until write operations have been committed to the journal. Cannot be used in combination with - `fsync`. Prior to MongoDB 2.6 this option was ignored if the server - was running without journaling. Starting with MongoDB 2.6 write - operations will fail with an exception if this option is used when - the server is running without journaling. + `fsync`. Write operations will fail with an exception if this + option is used when the server is running without journaling. - `fsync`: If ``True`` and the server is running without journaling, blocks until the server has synced all data files to disk. If the server is running with journaling, this acts the same as the `j` @@ -403,15 +494,15 @@ def __init__( - `authSource`: The database to authenticate on. Defaults to the database specified in the URI, if provided, or to "admin". - `authMechanism`: See :data:`~pymongo.auth.MECHANISMS` for options. - If no mechanism is specified, PyMongo automatically uses MONGODB-CR - when connected to a pre-3.0 version of MongoDB, SCRAM-SHA-1 when - connected to MongoDB 3.0 through 3.6, and negotiates the mechanism - to use (SCRAM-SHA-1 or SCRAM-SHA-256) when connected to MongoDB - 4.0+. + If no mechanism is specified, PyMongo automatically SCRAM-SHA-1 + when connected to MongoDB 3.6 and negotiates the mechanism to use + (SCRAM-SHA-1 or SCRAM-SHA-256) when connected to MongoDB 4.0+. - `authMechanismProperties`: Used to specify authentication mechanism specific options. To specify the service name for GSSAPI authentication pass authMechanismProperties='SERVICE_NAME:' + name>'. + To specify the session token for MONGODB-AWS authentication pass + ``authMechanismProperties='AWS_SESSION_TOKEN:'``. .. seealso:: :doc:`/examples/authentication` @@ -435,36 +526,30 @@ def __init__( ``tlsAllowInvalidCertificates=False`` implies ``tls=True``. Defaults to ``False``. Think very carefully before setting this to ``True`` as that could make your application vulnerable to - man-in-the-middle attacks. + on-path attackers. - `tlsAllowInvalidHostnames`: (boolean) If ``True``, disables TLS hostname verification. ``tlsAllowInvalidHostnames=False`` implies ``tls=True``. Defaults to ``False``. Think very carefully before setting this to ``True`` as that could make your application - vulnerable to man-in-the-middle attacks. + vulnerable to on-path attackers. - `tlsCAFile`: A file containing a single or a bundle of "certification authority" certificates, which are used to validate certificates passed from the other end of the connection. Implies ``tls=True``. Defaults to ``None``. - `tlsCertificateKeyFile`: A file containing the client certificate - and private key. If you want to pass the certificate and private - key as separate files, use the ``ssl_certfile`` and ``ssl_keyfile`` - options instead. Implies ``tls=True``. Defaults to ``None``. + and private key. Implies ``tls=True``. Defaults to ``None``. - `tlsCRLFile`: A file containing a PEM or DER formatted - certificate revocation list. Only supported by python 2.7.9+ - (pypy 2.5.1+). Implies ``tls=True``. Defaults to ``None``. + certificate revocation list. Implies ``tls=True``. Defaults to + ``None``. - `tlsCertificateKeyFilePassword`: The password or passphrase for - decrypting the private key in ``tlsCertificateKeyFile`` or - ``ssl_keyfile``. Only necessary if the private key is encrypted. - Only supported by python 2.7.9+ (pypy 2.5.1+) and 3.3+. Defaults - to ``None``. + decrypting the private key in ``tlsCertificateKeyFile``. Only + necessary if the private key is encrypted. Defaults to ``None``. + - `tlsDisableOCSPEndpointCheck`: (boolean) If ``True``, disables + certificate revocation status checking via the OCSP responder + specified on the server certificate. + ``tlsDisableOCSPEndpointCheck=False`` implies ``tls=True``. + Defaults to ``False``. - `ssl`: (boolean) Alias for ``tls``. - - `ssl_certfile`: The certificate file used to identify the local - connection against mongod. Implies ``tls=True``. Defaults to - ``None``. - - `ssl_keyfile`: The private keyfile used to identify the local - connection against mongod. Can be omitted if the keyfile is - included with the ``tlsCertificateKeyFile``. Implies ``tls=True``. - Defaults to ``None``. | **Read Concern options:** | (If not set explicitly, this will use the server default) @@ -483,8 +568,56 @@ def __init__( configures this client to automatically encrypt collection commands and automatically decrypt results. See :ref:`automatic-client-side-encryption` for an example. + If a :class:`MongoClient` is configured with + ``auto_encryption_opts`` and a non-None ``maxPoolSize``, a + separate internal ``MongoClient`` is created if any of the + following are true: + + - A ``key_vault_client`` is not passed to + :class:`~pymongo.encryption_options.AutoEncryptionOpts` + - ``bypass_auto_encrpytion=False`` is passed to + :class:`~pymongo.encryption_options.AutoEncryptionOpts` + + | **Stable API options:** + | (If not set explicitly, Stable API will not be enabled.) + + - `server_api`: A + :class:`~pymongo.server_api.ServerApi` which configures this + client to use Stable API. See :ref:`versioned-api-ref` for + details. - .. mongodoc:: connections + .. seealso:: The MongoDB documentation on `connections `_. + + .. versionchanged:: 4.5 + Added the ``serverMonitoringMode`` keyword argument. + + .. versionchanged:: 4.2 + Added the ``timeoutMS`` keyword argument. + + .. versionchanged:: 4.0 + + - Removed the fsync, unlock, is_locked, database_names, and + close_cursor methods. + See the :ref:`pymongo4-migration-guide`. + - Removed the ``waitQueueMultiple`` and ``socketKeepAlive`` + keyword arguments. + - The default for `uuidRepresentation` was changed from + ``pythonLegacy`` to ``unspecified``. + - Added the ``srvServiceName``, ``maxConnecting``, and ``srvMaxHosts`` URI and + keyword arguments. + + .. versionchanged:: 3.12 + Added the ``server_api`` keyword argument. + The following keyword arguments were deprecated: + + - ``ssl_certfile`` and ``ssl_keyfile`` were deprecated in favor + of ``tlsCertificateKeyFile``. + + .. versionchanged:: 3.11 + Added the following keyword arguments and URI options: + + - ``tlsDisableOCSPEndpointCheck`` + - ``directConnection`` .. versionchanged:: 3.9 Added the ``retryReads`` keyword argument and URI option. @@ -520,7 +653,7 @@ def __init__( .. versionchanged:: 3.5 Add ``username`` and ``password`` options. Document the - ``authSource``, ``authMechanism``, and ``authMechanismProperties `` + ``authSource``, ``authMechanism``, and ``authMechanismProperties`` options. Deprecated the ``socketKeepAlive`` keyword argument and URI option. ``socketKeepAlive`` now defaults to ``True``. @@ -584,9 +717,20 @@ def __init__( client.__my_database__ """ + doc_class = document_class or dict + self.__init_kwargs: dict[str, Any] = { + "host": host, + "port": port, + "document_class": doc_class, + "tz_aware": tz_aware, + "connect": connect, + "type_registry": type_registry, + **kwargs, + } + if host is None: host = self.HOST - if isinstance(host, string_type): + if isinstance(host, str): host = [host] if port is None: port = self.PORT @@ -595,30 +739,45 @@ def __init__( # _pool_class, _monitor_class, and _condition_class are for deep # customization of PyMongo, e.g. Motor. - pool_class = kwargs.pop('_pool_class', None) - monitor_class = kwargs.pop('_monitor_class', None) - condition_class = kwargs.pop('_condition_class', None) + pool_class = kwargs.pop("_pool_class", None) + monitor_class = kwargs.pop("_monitor_class", None) + condition_class = kwargs.pop("_condition_class", None) # Parse options passed as kwargs. keyword_opts = common._CaseInsensitiveDictionary(kwargs) - keyword_opts['document_class'] = document_class + keyword_opts["document_class"] = doc_class seeds = set() username = None password = None dbase = None - opts = {} + opts = common._CaseInsensitiveDictionary() fqdn = None + srv_service_name = keyword_opts.get("srvservicename") + srv_max_hosts = keyword_opts.get("srvmaxhosts") + if len([h for h in host if "/" in h]) > 1: + raise ConfigurationError("host must not contain multiple MongoDB URIs") for entity in host: - if "://" in entity: + # A hostname can only include a-z, 0-9, '-' and '.'. If we find a '/' + # it must be a URI, + # https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names + if "/" in entity: # Determine connection timeout from kwargs. timeout = keyword_opts.get("connecttimeoutms") if timeout is not None: - timeout = common.validate_timeout_or_none( - keyword_opts.cased_key("connecttimeoutms"), timeout) + timeout = common.validate_timeout_or_none_or_zero( + keyword_opts.cased_key("connecttimeoutms"), timeout + ) res = uri_parser.parse_uri( - entity, port, validate=True, warn=True, normalize=False, - connect_timeout=timeout) + entity, + port, + validate=True, + warn=True, + normalize=False, + connect_timeout=timeout, + srv_service_name=srv_service_name, + srv_max_hosts=srv_max_hosts, + ) seeds.update(res["nodelist"]) username = res["username"] or username password = res["password"] or password @@ -632,60 +791,50 @@ def __init__( # Add options with named keyword arguments to the parsed kwarg options. if type_registry is not None: - keyword_opts['type_registry'] = type_registry + keyword_opts["type_registry"] = type_registry if tz_aware is None: - tz_aware = opts.get('tz_aware', False) + tz_aware = opts.get("tz_aware", False) if connect is None: - connect = opts.get('connect', True) - keyword_opts['tz_aware'] = tz_aware - keyword_opts['connect'] = connect + connect = opts.get("connect", True) + keyword_opts["tz_aware"] = tz_aware + keyword_opts["connect"] = connect # Handle deprecated options in kwarg options. keyword_opts = _handle_option_deprecations(keyword_opts) # Validate kwarg options. keyword_opts = common._CaseInsensitiveDictionary( - dict(common.validate(k, v) for k, v in keyword_opts.items())) + dict(common.validate(keyword_opts.cased_key(k), v) for k, v in keyword_opts.items()) + ) # Override connection string options with kwarg options. opts.update(keyword_opts) + + if srv_service_name is None: + srv_service_name = opts.get("srvServiceName", common.SRV_SERVICE_NAME) + + srv_max_hosts = srv_max_hosts or opts.get("srvmaxhosts") # Handle security-option conflicts in combined options. opts = _handle_security_options(opts) # Normalize combined options. opts = _normalize_options(opts) + _check_options(seeds, opts) # Username and password passed as kwargs override user info in URI. username = opts.get("username", username) password = opts.get("password", password) - if 'socketkeepalive' in opts: - warnings.warn( - "The socketKeepAlive option is deprecated. It now" - "defaults to true and disabling it is not recommended, see " - "https://docs.mongodb.com/manual/faq/diagnostics/" - "#does-tcp-keepalive-time-affect-mongodb-deployments", - DeprecationWarning, stacklevel=2) - self.__options = options = ClientOptions( - username, password, dbase, opts) + self.__options = options = ClientOptions(username, password, dbase, opts) self.__default_database_name = dbase - self.__lock = threading.Lock() - self.__cursor_manager = None - self.__kill_cursors_queue = [] - - self._event_listeners = options.pool_options.event_listeners - - # Cache of existing indexes used by ensure_index ops. - self.__index_cache = {} - self.__index_cache_lock = threading.Lock() - - super(MongoClient, self).__init__(options.codec_options, - options.read_preference, - options.write_concern, - options.read_concern) + self.__lock = _create_lock() + self.__kill_cursors_queue: list = [] - self.__all_credentials = {} - creds = options.credentials - if creds: - self._cache_credentials(creds.source, creds) + self._event_listeners = options.pool_options._event_listeners + super().__init__( + options.codec_options, + options.read_preference, + options.write_concern, + options.read_concern, + ) self._topology_settings = TopologySettings( seeds=seeds, @@ -698,13 +847,35 @@ def __init__( server_selection_timeout=options.server_selection_timeout, server_selector=options.server_selector, heartbeat_frequency=options.heartbeat_frequency, - fqdn=fqdn) + fqdn=fqdn, + direct_connection=options.direct_connection, + load_balanced=options.load_balanced, + srv_service_name=srv_service_name, + srv_max_hosts=srv_max_hosts, + server_monitoring_mode=options.server_monitoring_mode, + ) + + self._init_background() - self._topology = Topology(self._topology_settings) if connect: - self._topology.open() + self._get_topology() + + self._encrypter = None + if self.__options.auto_encryption_opts: + from pymongo.encryption import _Encrypter + + self._encrypter = _Encrypter(self, self.__options.auto_encryption_opts) + self._timeout = self.__options.timeout + + if _HAS_REGISTER_AT_FORK: + # Add this client to the list of weakly referenced items. + # This will be used later if we fork. + MongoClient._clients[self._topology._topology_id] = self + + def _init_background(self) -> None: + self._topology = Topology(self._topology_settings) - def target(): + def target() -> bool: client = self_ref() if client is None: return False # Stop the executor. @@ -713,109 +884,26 @@ def target(): executor = periodic_executor.PeriodicExecutor( interval=common.KILL_CURSOR_FREQUENCY, - min_interval=0.5, + min_interval=common.MIN_HEARTBEAT_INTERVAL, target=target, - name="pymongo_kill_cursors_thread") + name="pymongo_kill_cursors_thread", + ) # We strongly reference the executor and it weakly references us via # this closure. When the client is freed, stop the executor soon. - self_ref = weakref.ref(self, executor.close) + self_ref: Any = weakref.ref(self, executor.close) self._kill_cursors_executor = executor - executor.open() - - self._encrypter = None - if self.__options.auto_encryption_opts: - from pymongo.encryption import _Encrypter - self._encrypter = _Encrypter.create( - self, self.__options.auto_encryption_opts) - - def _cache_credentials(self, source, credentials, connect=False): - """Save a set of authentication credentials. - - The credentials are used to login a socket whenever one is created. - If `connect` is True, verify the credentials on the server first. - """ - # Don't let other threads affect this call's data. - all_credentials = self.__all_credentials.copy() - - if source in all_credentials: - # Nothing to do if we already have these credentials. - if credentials == all_credentials[source]: - return - raise OperationFailure('Another user is already authenticated ' - 'to this database. You must logout first.') - - if connect: - server = self._get_topology().select_server( - writable_preferred_server_selector) - - # get_socket() logs out of the database if logged in with old - # credentials, and logs in with new ones. - with server.get_socket(all_credentials) as sock_info: - sock_info.authenticate(credentials) - - # If several threads run _cache_credentials at once, last one wins. - self.__all_credentials[source] = credentials - - def _purge_credentials(self, source): - """Purge credentials from the authentication cache.""" - self.__all_credentials.pop(source, None) - - def _cached(self, dbname, coll, index): - """Test if `index` is cached.""" - cache = self.__index_cache - now = datetime.datetime.utcnow() - with self.__index_cache_lock: - return (dbname in cache and - coll in cache[dbname] and - index in cache[dbname][coll] and - now < cache[dbname][coll][index]) - - def _cache_index(self, dbname, collection, index, cache_for): - """Add an index to the index cache for ensure_index operations.""" - now = datetime.datetime.utcnow() - expire = datetime.timedelta(seconds=cache_for) + now - - with self.__index_cache_lock: - if dbname not in self.__index_cache: - self.__index_cache[dbname] = {} - self.__index_cache[dbname][collection] = {} - self.__index_cache[dbname][collection][index] = expire - - elif collection not in self.__index_cache[dbname]: - self.__index_cache[dbname][collection] = {} - self.__index_cache[dbname][collection][index] = expire - - else: - self.__index_cache[dbname][collection][index] = expire - - def _purge_index(self, database_name, - collection_name=None, index_name=None): - """Purge an index from the index cache. - - If `index_name` is None purge an entire collection. - If `collection_name` is None purge an entire database. - """ - with self.__index_cache_lock: - if not database_name in self.__index_cache: - return - - if collection_name is None: - del self.__index_cache[database_name] - return - - if not collection_name in self.__index_cache[database_name]: - return - - if index_name is None: - del self.__index_cache[database_name][collection_name] - return + def _after_fork(self) -> None: + """Resets topology in a child after successfully forking.""" + self._init_background() - if index_name in self.__index_cache[database_name][collection_name]: - del self.__index_cache[database_name][collection_name][index_name] + def _duplicate(self, **kwargs: Any) -> MongoClient: + args = self.__init_kwargs.copy() + args.update(kwargs) + return MongoClient(**args) - def _server_property(self, attr_name): + def _server_property(self, attr_name: str) -> Any: """An attribute of the current server's description. If the client is not connected, this will block until a connection is @@ -826,14 +914,25 @@ def _server_property(self, attr_name): the server may change. In such cases, store a local reference to a ServerDescription first, then use its properties. """ - server = self._topology.select_server( - writable_server_selector) + server = self._topology.select_server(writable_server_selector) return getattr(server.description, attr_name) - def watch(self, pipeline=None, full_document=None, resume_after=None, - max_await_time_ms=None, batch_size=None, collation=None, - start_at_operation_time=None, session=None, start_after=None): + def watch( + self, + pipeline: Optional[_Pipeline] = None, + full_document: Optional[str] = None, + resume_after: Optional[Mapping[str, Any]] = None, + max_await_time_ms: Optional[int] = None, + batch_size: Optional[int] = None, + collation: Optional[_CollationIn] = None, + start_at_operation_time: Optional[Timestamp] = None, + session: Optional[client_session.ClientSession] = None, + start_after: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + full_document_before_change: Optional[str] = None, + show_expanded_events: Optional[bool] = None, + ) -> ChangeStream[_DocumentType]: """Watch changes on this cluster. Performs an aggregation with an implicit initial ``$changeStream`` @@ -861,14 +960,13 @@ def watch(self, pipeline=None, full_document=None, resume_after=None, .. code-block:: python try: - with client.watch( - [{'$match': {'operationType': 'insert'}}]) as stream: + with client.watch([{"$match": {"operationType": "insert"}}]) as stream: for insert_change in stream: print(insert_change) except pymongo.errors.PyMongoError: # The ChangeStream encountered an unrecoverable error or the # resume attempt failed to recreate the cursor. - logging.error('...') + logging.error("...") For a precise description of the resume process see the `change streams specification`_. @@ -879,11 +977,15 @@ def watch(self, pipeline=None, full_document=None, resume_after=None, pipeline stages are valid after a ``$changeStream`` stage, see the MongoDB documentation on change streams for the supported stages. - `full_document` (optional): The fullDocument to pass as an option - to the ``$changeStream`` stage. Allowed values: 'updateLookup'. - When set to 'updateLookup', the change notification for partial - updates will include both a delta describing the changes to the - document, as well as a copy of the entire document that was - changed from some time after the change occurred. + to the ``$changeStream`` stage. Allowed values: 'updateLookup', + 'whenAvailable', 'required'. When set to 'updateLookup', the + change notification for partial updates will include both a delta + describing the changes to the document, as well as a copy of the + entire document that was changed from some time after the change + occurred. + - `full_document_before_change`: Allowed values: 'whenAvailable' + and 'required'. Change events may now result in a + 'fullDocumentBeforeChange' response field. - `resume_after` (optional): A resume token. If provided, the change stream will start returning changes that occur directly after the operation specified in the resume token. A resume token @@ -904,35 +1006,72 @@ def watch(self, pipeline=None, full_document=None, resume_after=None, - `start_after` (optional): The same as `resume_after` except that `start_after` can resume notifications after an invalidate event. This option and `resume_after` are mutually exclusive. + - `comment` (optional): A user-provided comment to attach to this + command. + - `show_expanded_events` (optional): Include expanded events such as DDL events like `dropIndexes`. :Returns: A :class:`~pymongo.change_stream.ClusterChangeStream` cursor. + .. versionchanged:: 4.3 + Added `show_expanded_events` parameter. + + .. versionchanged:: 4.2 + Added ``full_document_before_change`` parameter. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + .. versionchanged:: 3.9 Added the ``start_after`` parameter. .. versionadded:: 3.7 - .. mongodoc:: changeStreams + .. seealso:: The MongoDB documentation on `changeStreams `_. .. _change streams specification: - https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.rst + https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.md """ return ClusterChangeStream( - self.admin, pipeline, full_document, resume_after, max_await_time_ms, - batch_size, collation, start_at_operation_time, session, - start_after) + self.admin, + pipeline, + full_document, + resume_after, + max_await_time_ms, + batch_size, + collation, + start_at_operation_time, + session, + start_after, + comment, + full_document_before_change, + show_expanded_events=show_expanded_events, + ) @property - def event_listeners(self): - """The event listeners registered for this client. + def topology_description(self) -> TopologyDescription: + """The description of the connected MongoDB deployment. + + >>> client.topology_description + , , ]> + >>> client.topology_description.topology_type_name + 'ReplicaSetWithPrimary' + + Note that the description is periodically updated in the background + but the returned object itself is immutable. Access this property again + to get a more recent + :class:`~pymongo.topology_description.TopologyDescription`. + + :Returns: + An instance of + :class:`~pymongo.topology_description.TopologyDescription`. - See :mod:`~pymongo.monitoring` for details. + .. versionadded:: 4.0 """ - return self._event_listeners.event_listeners + return self._topology.description @property - def address(self): + def address(self) -> Optional[tuple[str, int]]: """(host, port) of the current standalone, primary, or mongos, or None. Accessing :attr:`address` raises :exc:`~.errors.InvalidOperation` if @@ -946,17 +1085,25 @@ def address(self): .. versionadded:: 3.0 """ topology_type = self._topology._description.topology_type - if topology_type == TOPOLOGY_TYPE.Sharded: + if ( + topology_type == TOPOLOGY_TYPE.Sharded + and len(self.topology_description.server_descriptions()) > 1 + ): raise InvalidOperation( 'Cannot use "address" property when load balancing among' - ' mongoses, use "nodes" instead.') - if topology_type not in (TOPOLOGY_TYPE.ReplicaSetWithPrimary, - TOPOLOGY_TYPE.Single): + ' mongoses, use "nodes" instead.' + ) + if topology_type not in ( + TOPOLOGY_TYPE.ReplicaSetWithPrimary, + TOPOLOGY_TYPE.Single, + TOPOLOGY_TYPE.LoadBalanced, + TOPOLOGY_TYPE.Sharded, + ): return None - return self._server_property('address') + return self._server_property("address") @property - def primary(self): + def primary(self) -> Optional[tuple[str, int]]: """The (host, port) of the current primary of the replica set. Returns ``None`` if this client is not connected to a replica set, @@ -964,13 +1111,12 @@ def primary(self): `replicaSet` option. .. versionadded:: 3.0 - MongoClient gained this property in version 3.0 when - MongoReplicaSetClient's functionality was merged in. + MongoClient gained this property in version 3.0. """ - return self._topology.get_primary() + return self._topology.get_primary() # type: ignore[return-value] @property - def secondaries(self): + def secondaries(self) -> set[_Address]: """The secondary members known to this client. A sequence of (host, port) pairs. Empty if this client is not @@ -978,13 +1124,12 @@ def secondaries(self): client was created without the `replicaSet` option. .. versionadded:: 3.0 - MongoClient gained this property in version 3.0 when - MongoReplicaSetClient's functionality was merged in. + MongoClient gained this property in version 3.0. """ return self._topology.get_secondaries() @property - def arbiters(self): + def arbiters(self) -> set[_Address]: """Arbiters in the replica set. A sequence of (host, port) pairs. Empty if this client is not @@ -994,7 +1139,7 @@ def arbiters(self): return self._topology.get_arbiters() @property - def is_primary(self): + def is_primary(self) -> bool: """If this client is connected to a server that can accept writes. True if the current server is a standalone, mongos, or the primary of @@ -1002,51 +1147,18 @@ def is_primary(self): connection is established or raise ServerSelectionTimeoutError if no server is available. """ - return self._server_property('is_writable') + return self._server_property("is_writable") @property - def is_mongos(self): + def is_mongos(self) -> bool: """If this client is connected to mongos. If the client is not connected, this will block until a connection is established or raise - ServerSelectionTimeoutError if no server is available.. - """ - return self._server_property('server_type') == SERVER_TYPE.Mongos - - @property - def max_pool_size(self): - """The maximum allowable number of concurrent connections to each - connected server. Requests to a server will block if there are - `maxPoolSize` outstanding connections to the requested server. - Defaults to 100. Cannot be 0. - - When a server's pool has reached `max_pool_size`, operations for that - server block waiting for a socket to be returned to the pool. If - ``waitQueueTimeoutMS`` is set, a blocked operation will raise - :exc:`~pymongo.errors.ConnectionFailure` after a timeout. - By default ``waitQueueTimeoutMS`` is not set. - """ - return self.__options.pool_options.max_pool_size - - @property - def min_pool_size(self): - """The minimum required number of concurrent connections that the pool - will maintain to each connected server. Default is 0. + ServerSelectionTimeoutError if no server is available. """ - return self.__options.pool_options.min_pool_size - - @property - def max_idle_time_ms(self): - """The maximum number of milliseconds that a connection can remain - idle in the pool before being removed and replaced. Defaults to - `None` (no limit). - """ - seconds = self.__options.pool_options.max_idle_time_seconds - if seconds is None: - return None - return 1000 * seconds + return self._server_property("server_type") == SERVER_TYPE.Mongos @property - def nodes(self): + def nodes(self) -> FrozenSet[_Address]: """Set of all currently connected servers. .. warning:: When connected to a replica set the value of :attr:`nodes` @@ -1060,104 +1172,47 @@ def nodes(self): return frozenset(s.address for s in description.known_servers) @property - def max_bson_size(self): - """The largest BSON object the connected server accepts in bytes. - - If the client is not connected, this will block until a connection is - established or raise ServerSelectionTimeoutError if no server is - available. - """ - return self._server_property('max_bson_size') - - @property - def max_message_size(self): - """The largest message the connected server accepts in bytes. - - If the client is not connected, this will block until a connection is - established or raise ServerSelectionTimeoutError if no server is - available. - """ - return self._server_property('max_message_size') - - @property - def max_write_batch_size(self): - """The maxWriteBatchSize reported by the server. - - If the client is not connected, this will block until a connection is - established or raise ServerSelectionTimeoutError if no server is - available. - - Returns a default value when connected to server versions prior to - MongoDB 2.6. - """ - return self._server_property('max_write_batch_size') - - @property - def local_threshold_ms(self): - """The local threshold for this instance.""" - return self.__options.local_threshold_ms - - @property - def server_selection_timeout(self): - """The server selection timeout for this instance in seconds.""" - return self.__options.server_selection_timeout - - @property - def retry_writes(self): - """If this instance should retry supported write operations.""" - return self.__options.retry_writes + def options(self) -> ClientOptions: + """The configuration options for this client. - @property - def retry_reads(self): - """If this instance should retry supported write operations.""" - return self.__options.retry_reads + :Returns: + An instance of :class:`~pymongo.client_options.ClientOptions`. - def _is_writable(self): - """Attempt to connect to a writable server, or return False. + .. versionadded:: 4.0 """ - topology = self._get_topology() # Starts monitors if necessary. - try: - svr = topology.select_server(writable_server_selector) - - # When directly connected to a secondary, arbiter, etc., - # select_server returns it, whatever the selector. Check - # again if the server is writable. - return svr.description.is_writable - except ConnectionFailure: - return False + return self.__options - def _end_sessions(self, session_ids): + def _end_sessions(self, session_ids: list[_ServerSession]) -> None: """Send endSessions command(s) with the given session ids.""" try: - # Use SocketInfo.command directly to avoid implicitly creating + # Use Connection.command directly to avoid implicitly creating # another session. - with self._socket_for_reads( - ReadPreference.PRIMARY_PREFERRED, - None) as (sock_info, slave_ok): - if not sock_info.supports_sessions: + with self._conn_for_reads(ReadPreference.PRIMARY_PREFERRED, None) as ( + conn, + read_pref, + ): + if not conn.supports_sessions: return for i in range(0, len(session_ids), common._MAX_END_SESSIONS): - spec = SON([('endSessions', - session_ids[i:i + common._MAX_END_SESSIONS])]) - sock_info.command( - 'admin', spec, slave_ok=slave_ok, client=self) + spec = SON([("endSessions", session_ids[i : i + common._MAX_END_SESSIONS])]) + conn.command("admin", spec, read_preference=read_pref, client=self) except PyMongoError: # Drivers MUST ignore any errors returned by the endSessions # command. pass - def close(self): + def close(self) -> None: """Cleanup client resources and disconnect from MongoDB. - On MongoDB >= 3.6, end all server sessions created by this client by - sending one or more endSessions commands. + End all server sessions created by this client by sending one or more + endSessions commands. Close all sockets in the connection pools and stop the monitor threads. - If this instance is used again it will be automatically re-opened and - the threads restarted unless auto encryption is enabled. A client - enabled with auto encryption cannot be used again after being closed; - any attempt will raise :exc:`~.errors.InvalidOperation`. + + .. versionchanged:: 4.0 + Once closed, the client cannot be used again and any attempt will + raise :exc:`~pymongo.errors.InvalidOperation`. .. versionchanged:: 3.6 End all server sessions created by this client. @@ -1174,36 +1229,7 @@ def close(self): # TODO: PYTHON-1921 Encrypted MongoClients cannot be re-opened. self._encrypter.close() - def set_cursor_manager(self, manager_class): - """DEPRECATED - Set this client's cursor manager. - - Raises :class:`TypeError` if `manager_class` is not a subclass of - :class:`~pymongo.cursor_manager.CursorManager`. A cursor manager - handles closing cursors. Different managers can implement different - policies in terms of when to actually kill a cursor that has - been closed. - - :Parameters: - - `manager_class`: cursor manager to use - - .. versionchanged:: 3.3 - Deprecated, for real this time. - - .. versionchanged:: 3.0 - Undeprecated. - """ - warnings.warn( - "set_cursor_manager is Deprecated", - DeprecationWarning, - stacklevel=2) - manager = manager_class(self) - if not isinstance(manager, CursorManager): - raise TypeError("manager_class must be a subclass of " - "CursorManager") - - self.__cursor_manager = manager - - def _get_topology(self): + def _get_topology(self) -> Topology: """Get the internal :class:`~pymongo.topology.Topology` object. If this client was created with "connect=False", calling _get_topology @@ -1215,21 +1241,43 @@ def _get_topology(self): return self._topology @contextlib.contextmanager - def _get_socket(self, server, session, exhaust=False): - with _MongoClientErrorHandler( - self, server.description.address, session) as err_handler: - with server.get_socket( - self.__all_credentials, checkout=exhaust) as sock_info: - err_handler.contribute_socket(sock_info) - if (self._encrypter and - not self._encrypter._bypass_auto_encryption and - sock_info.max_wire_version < 8): + def _checkout(self, server: Server, session: Optional[ClientSession]) -> Iterator[Connection]: + in_txn = session and session.in_transaction + with _MongoClientErrorHandler(self, server, session) as err_handler: + # Reuse the pinned connection, if it exists. + if in_txn and session and session._pinned_connection: + err_handler.contribute_socket(session._pinned_connection) + yield session._pinned_connection + return + with server.checkout(handler=err_handler) as conn: + # Pin this session to the selected server or connection. + if ( + in_txn + and session + and server.description.server_type + in ( + SERVER_TYPE.Mongos, + SERVER_TYPE.LoadBalancer, + ) + ): + session._pin(server, conn) + err_handler.contribute_socket(conn) + if ( + self._encrypter + and not self._encrypter._bypass_auto_encryption + and conn.max_wire_version < 8 + ): raise ConfigurationError( - 'Auto-encryption requires a minimum MongoDB version ' - 'of 4.2') - yield sock_info - - def _select_server(self, server_selector, session, address=None): + "Auto-encryption requires a minimum MongoDB version of 4.2" + ) + yield conn + + def _select_server( + self, + server_selector: Callable[[Selection], Selection], + session: Optional[ClientSession], + address: Optional[_Address] = None, + ) -> Server: """Select a server to run an operation on this client. :Parameters: @@ -1242,304 +1290,286 @@ def _select_server(self, server_selector, session, address=None): """ try: topology = self._get_topology() - address = address or (session and session._pinned_address) + if session and not session.in_transaction: + session._transaction.reset() + if not address and session: + address = session._pinned_address if address: # We're running a getMore or this session is pinned to a mongos. server = topology.select_server_by_address(address) if not server: - raise AutoReconnect('server %s:%d no longer available' - % address) + raise AutoReconnect("server %s:%s no longer available" % address) # noqa: UP031 else: server = topology.select_server(server_selector) - # Pin this session to the selected server if it's performing a - # sharded transaction. - if server.description.mongos and (session and - session.in_transaction): - session._pin_mongos(server) return server except PyMongoError as exc: - if session and exc.has_error_label("TransientTransactionError"): - session._unpin_mongos() + # Server selection errors in a transaction are transient. + if session and session.in_transaction: + exc._add_error_label("TransientTransactionError") + session._unpin() raise - def _socket_for_writes(self, session): + def _conn_for_writes(self, session: Optional[ClientSession]) -> ContextManager[Connection]: server = self._select_server(writable_server_selector, session) - return self._get_socket(server, session) + return self._checkout(server, session) @contextlib.contextmanager - def _slaveok_for_server(self, read_preference, server, session, - exhaust=False): + def _conn_from_server( + self, read_preference: _ServerMode, server: Server, session: Optional[ClientSession] + ) -> Iterator[tuple[Connection, _ServerMode]]: assert read_preference is not None, "read_preference must not be None" - # Get a socket for a server matching the read preference, and yield - # sock_info, slave_ok. Server Selection Spec: "slaveOK must be sent to - # mongods with topology type Single. If the server type is Mongos, - # follow the rules for passing read preference to mongos, even for - # topology type Single." + # Get a connection for a server matching the read preference, and yield + # conn with the effective read preference. The Server Selection + # Spec says not to send any $readPreference to standalones and to + # always send primaryPreferred when directly connected to a repl set + # member. # Thread safe: if the type is single it cannot change. topology = self._get_topology() single = topology.description.topology_type == TOPOLOGY_TYPE.Single - with self._get_socket(server, session, exhaust=exhaust) as sock_info: - slave_ok = (single and not sock_info.is_mongos) or ( - read_preference != ReadPreference.PRIMARY) - yield sock_info, slave_ok - - @contextlib.contextmanager - def _socket_for_reads(self, read_preference, session): + with self._checkout(server, session) as conn: + if single: + if conn.is_repl and not (session and session.in_transaction): + # Use primary preferred to ensure any repl set member + # can handle the request. + read_preference = ReadPreference.PRIMARY_PREFERRED + elif conn.is_standalone: + # Don't send read preference to standalones. + read_preference = ReadPreference.PRIMARY + yield conn, read_preference + + def _conn_for_reads( + self, read_preference: _ServerMode, session: Optional[ClientSession] + ) -> ContextManager[tuple[Connection, _ServerMode]]: assert read_preference is not None, "read_preference must not be None" - # Get a socket for a server matching the read preference, and yield - # sock_info, slave_ok. Server Selection Spec: "slaveOK must be sent to - # mongods with topology type Single. If the server type is Mongos, - # follow the rules for passing read preference to mongos, even for - # topology type Single." - # Thread safe: if the type is single it cannot change. - topology = self._get_topology() - single = topology.description.topology_type == TOPOLOGY_TYPE.Single + _ = self._get_topology() server = self._select_server(read_preference, session) - - with self._get_socket(server, session) as sock_info: - slave_ok = (single and not sock_info.is_mongos) or ( - read_preference != ReadPreference.PRIMARY) - yield sock_info, slave_ok - - def _run_operation_with_response(self, operation, unpack_res, - exhaust=False, address=None): + return self._conn_from_server(read_preference, server, session) + + def _should_pin_cursor(self, session: Optional[ClientSession]) -> Optional[bool]: + return self.__options.load_balanced and not (session and session.in_transaction) + + @_csot.apply + def _run_operation( + self, + operation: Union[_Query, _GetMore], + unpack_res: Callable, + address: Optional[_Address] = None, + ) -> Response: """Run a _Query/_GetMore operation and return a Response. :Parameters: - `operation`: a _Query or _GetMore object. - `unpack_res`: A callable that decodes the wire protocol response. - - `exhaust` (optional): If True, the socket used stays checked out. - It is returned along with its Pool in the Response. - `address` (optional): Optional address when sending a message to a specific server, used for getMore. """ - if operation.exhaust_mgr: + if operation.conn_mgr: server = self._select_server( - operation.read_preference, operation.session, address=address) - - with _MongoClientErrorHandler( - self, server.description.address, - operation.session) as err_handler: - err_handler.contribute_socket(operation.exhaust_mgr.sock) - return server.run_operation_with_response( - operation.exhaust_mgr.sock, - operation, - True, - self._event_listeners, - exhaust, - unpack_res) - - def _cmd(session, server, sock_info, slave_ok): - return server.run_operation_with_response( - sock_info, - operation, - slave_ok, - self._event_listeners, - exhaust, - unpack_res) + operation.read_preference, operation.session, address=address + ) + + with operation.conn_mgr.lock: + with _MongoClientErrorHandler(self, server, operation.session) as err_handler: + err_handler.contribute_socket(operation.conn_mgr.conn) + return server.run_operation( + operation.conn_mgr.conn, + operation, + operation.read_preference, + self._event_listeners, + unpack_res, + ) + + def _cmd( + _session: Optional[ClientSession], + server: Server, + conn: Connection, + read_preference: _ServerMode, + ) -> Response: + operation.reset() # Reset op in case of retry. + return server.run_operation( + conn, operation, read_preference, self._event_listeners, unpack_res + ) return self._retryable_read( - _cmd, operation.read_preference, operation.session, + _cmd, + operation.read_preference, + operation.session, address=address, retryable=isinstance(operation, message._Query), - exhaust=exhaust) - - def _retry_with_session(self, retryable, func, session, bulk): + ) + + def _retry_with_session( + self, + retryable: bool, + func: _WriteCall[T], + session: Optional[ClientSession], + bulk: Optional[_Bulk], + ) -> T: """Execute an operation with at most one consecutive retries Returns func()'s return value on success. On error retries the same - command once. + command. Re-raises any exception thrown by func(). """ - retryable = (retryable and self.retry_writes - and session and not session.in_transaction) - last_error = None - retrying = False - - def is_retrying(): - return bulk.retrying if bulk else retrying - # Increment the transaction id up front to ensure any retry attempt - # will use the proper txnNumber, even if server or socket selection - # fails before the command can be sent. - if retryable: - session._start_retryable_write() - if bulk: - bulk.started_retryable_write = True + # Ensure that the options supports retry_writes and there is a valid session not in + # transaction, otherwise, we will not support retry behavior for this txn. + retryable = bool( + retryable and self.options.retry_writes and session and not session.in_transaction + ) + return self._retry_internal( + func=func, + session=session, + bulk=bulk, + retryable=retryable, + ) + + @_csot.apply + def _retry_internal( + self, + func: _WriteCall[T] | _ReadCall[T], + session: Optional[ClientSession], + bulk: Optional[_Bulk], + is_read: bool = False, + address: Optional[_Address] = None, + read_pref: Optional[_ServerMode] = None, + retryable: bool = False, + ) -> T: + """Internal retryable helper for all client transactions. - while True: - try: - server = self._select_server(writable_server_selector, session) - supports_session = ( - session is not None and - server.description.retryable_writes_supported) - with self._get_socket(server, session) as sock_info: - if retryable and not supports_session: - if is_retrying(): - # A retry is not possible because this server does - # not support sessions raise the last error. - raise last_error - retryable = False - return func(session, sock_info, retryable) - except ServerSelectionTimeoutError: - if is_retrying(): - # The application may think the write was never attempted - # if we raise ServerSelectionTimeoutError on the retry - # attempt. Raise the original exception instead. - raise last_error - # A ServerSelectionTimeoutError error indicates that there may - # be a persistent outage. Attempting to retry in this case will - # most likely be a waste of time. - raise - except ConnectionFailure as exc: - if not retryable or is_retrying(): - raise - if bulk: - bulk.retrying = True - else: - retrying = True - last_error = exc - except BulkWriteError as exc: - if not retryable or is_retrying(): - raise - # Check the last writeConcernError to determine if this - # BulkWriteError is retryable. - wces = exc.details['writeConcernErrors'] - wce = wces[-1] if wces else {} - if wce.get('code', 0) not in helpers._RETRYABLE_ERROR_CODES: - raise - if bulk: - bulk.retrying = True - else: - retrying = True - last_error = exc - except OperationFailure as exc: - # retryWrites on MMAPv1 should raise an actionable error. - if (exc.code == 20 and - str(exc).startswith("Transaction numbers")): - errmsg = ( - "This MongoDB deployment does not support " - "retryable writes. Please add retryWrites=false " - "to your connection string.") - raise OperationFailure(errmsg, exc.code, exc.details) - if not retryable or is_retrying(): - raise - if exc.code not in helpers._RETRYABLE_ERROR_CODES: - raise - if bulk: - bulk.retrying = True - else: - retrying = True - last_error = exc + :Parameters: + - `func`: Callback function we want to retry + - `session`: Client Session on which the transaction should occur + - `bulk`: Abstraction to handle bulk write operations + - `is_read`: If this is an exclusive read transaction, defaults to False + - `address`: Server Address, defaults to None + - `read_pref`: Topology of read operation, defaults to None + - `retryable`: If the operation should be retried once, defaults to None - def _retryable_read(self, func, read_pref, session, address=None, - retryable=True, exhaust=False): - """Execute an operation with at most one consecutive retries + :Returns: + Output of the calling func() + """ + return _ClientConnectionRetryable( + mongo_client=self, + func=func, + bulk=bulk, + is_read=is_read, + session=session, + read_pref=read_pref, + address=address, + retryable=retryable, + ).run() + + def _retryable_read( + self, + func: _ReadCall[T], + read_pref: _ServerMode, + session: Optional[ClientSession], + address: Optional[_Address] = None, + retryable: bool = True, + ) -> T: + """Execute an operation with consecutive retries if possible Returns func()'s return value on success. On error retries the same - command once. + command. Re-raises any exception thrown by func(). + + - `func`: Read call we want to execute + - `read_pref`: Desired topology of read operation + - `session`: Client session we should use to execute operation + - `address`: Optional address when sending a message, defaults to None + - `retryable`: if we should attempt retries + (may not always be supported even if supplied), defaults to False """ - retryable = (retryable and - self.retry_reads - and not (session and session.in_transaction)) - last_error = None - retrying = False - while True: - try: - server = self._select_server( - read_pref, session, address=address) - if not server.description.retryable_reads_supported: - retryable = False - with self._slaveok_for_server(read_pref, server, session, - exhaust=exhaust) as (sock_info, - slave_ok): - if retrying and not retryable: - # A retry is not possible because this server does - # not support retryable reads, raise the last error. - raise last_error - return func(session, server, sock_info, slave_ok) - except ServerSelectionTimeoutError: - if retrying: - # The application may think the write was never attempted - # if we raise ServerSelectionTimeoutError on the retry - # attempt. Raise the original exception instead. - raise last_error - # A ServerSelectionTimeoutError error indicates that there may - # be a persistent outage. Attempting to retry in this case will - # most likely be a waste of time. - raise - except ConnectionFailure as exc: - if not retryable or retrying: - raise - retrying = True - last_error = exc - except OperationFailure as exc: - if not retryable or retrying: - raise - if exc.code not in helpers._RETRYABLE_ERROR_CODES: - raise - retrying = True - last_error = exc + # Ensure that the client supports retrying on reads and there is no session in + # transaction, otherwise, we will not support retry behavior for this call. + retryable = bool( + retryable and self.options.retry_reads and not (session and session.in_transaction) + ) + return self._retry_internal( + func, + session, + None, + is_read=True, + address=address, + read_pref=read_pref, + retryable=retryable, + ) + + def _retryable_write( + self, + retryable: bool, + func: _WriteCall[T], + session: Optional[ClientSession], + bulk: Optional[_Bulk] = None, + ) -> T: + """Execute an operation with consecutive retries if possible - def _retryable_write(self, retryable, func, session): - """Internal retryable write helper.""" - with self._tmp_session(session) as s: - return self._retry_with_session(retryable, func, s, None) + Returns func()'s return value on success. On error retries the same + command. - def _reset_server(self, address): - """Clear our connection pool for a server and mark it Unknown.""" - self._topology.reset_server(address) + Re-raises any exception thrown by func(). - def _reset_server_and_request_check(self, address): - """Clear our pool for a server, mark it Unknown, and check it soon.""" - self._topology.reset_server_and_request_check(address) + :Parameters: + - `retryable`: if we should attempt retries (may not always be supported) + - `func`: write call we want to execute during a session + - `session`: Client session we will use to execute write operation + - `bulk`: bulk abstraction to execute operations in bulk, defaults to None + """ + with self._tmp_session(session) as s: + return self._retry_with_session(retryable, func, s, bulk) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, self.__class__): - return self.address == other.address + return self._topology == other._topology return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other - def _repr_helper(self): - def option_repr(option, value): + def __hash__(self) -> int: + return hash(self._topology) + + def _repr_helper(self) -> str: + def option_repr(option: str, value: Any) -> str: """Fix options whose __repr__ isn't usable in a constructor.""" - if option == 'document_class': + if option == "document_class": if value is dict: - return 'document_class=dict' + return "document_class=dict" else: - return 'document_class=%s.%s' % (value.__module__, - value.__name__) + return f"document_class={value.__module__}.{value.__name__}" if option in common.TIMEOUT_OPTIONS and value is not None: - return "%s=%s" % (option, int(value * 1000)) + return f"{option}={int(value * 1000)}" - return '%s=%r' % (option, value) + return f"{option}={value!r}" # Host first... - options = ['host=%r' % [ - '%s:%d' % (host, port) if port is not None else host - for host, port in self._topology_settings.seeds]] + options = [ + "host=%r" + % [ + "%s:%d" % (host, port) if port is not None else host + for host, port in self._topology_settings.seeds + ] + ] # ... then everything in self._constructor_args... options.extend( - option_repr(key, self.__options._options[key]) - for key in self._constructor_args) + option_repr(key, self.__options._options[key]) for key in self._constructor_args + ) # ... then everything else. options.extend( option_repr(key, self.__options._options[key]) for key in self.__options._options - if key not in set(self._constructor_args) - and key != 'username' and key != 'password') - return ', '.join(options) + if key not in set(self._constructor_args) and key != "username" and key != "password" + ) + return ", ".join(options) - def __repr__(self): - return ("MongoClient(%s)" % (self._repr_helper(),)) + def __repr__(self) -> str: + return f"MongoClient({self._repr_helper()})" - def __getattr__(self, name): + def __getattr__(self, name: str) -> database.Database[_DocumentType]: """Get a database by name. Raises :class:`~pymongo.errors.InvalidName` if an invalid @@ -1548,13 +1578,14 @@ def __getattr__(self, name): :Parameters: - `name`: the name of the database to get """ - if name.startswith('_'): + if name.startswith("_"): raise AttributeError( - "MongoClient has no attribute %r. To access the %s" - " database, use client[%r]." % (name, name, name)) + f"MongoClient has no attribute {name!r}. To access the {name}" + f" database, use client[{name!r}]." + ) return self.__getitem__(name) - def __getitem__(self, name): + def __getitem__(self, name: str) -> database.Database[_DocumentType]: """Get a database by name. Raises :class:`~pymongo.errors.InvalidName` if an invalid @@ -1565,221 +1596,197 @@ def __getitem__(self, name): """ return database.Database(self, name) - def close_cursor(self, cursor_id, address=None): - """DEPRECATED - Send a kill cursors message soon with the given id. - - Raises :class:`TypeError` if `cursor_id` is not an instance of - ``(int, long)``. What closing the cursor actually means - depends on this client's cursor manager. - - This method may be called from a :class:`~pymongo.cursor.Cursor` - destructor during garbage collection, so it isn't safe to take a - lock or do network I/O. Instead, we schedule the cursor to be closed - soon on a background thread. + def _cleanup_cursor( + self, + locks_allowed: bool, + cursor_id: int, + address: Optional[_CursorAddress], + conn_mgr: _ConnectionManager, + session: Optional[ClientSession], + explicit_session: bool, + ) -> None: + """Cleanup a cursor from cursor.close() or __del__. + + This method handles cleanup for Cursors/CommandCursors including any + pinned connection or implicit session attached at the time the cursor + was closed or garbage collected. :Parameters: - - `cursor_id`: id of cursor to close - - `address` (optional): (host, port) pair of the cursor's server. - If it is not provided, the client attempts to close the cursor on - the primary or standalone, or a mongos server. - - .. versionchanged:: 3.7 - Deprecated. - - .. versionchanged:: 3.0 - Added ``address`` parameter. - """ - warnings.warn( - "close_cursor is deprecated.", - DeprecationWarning, - stacklevel=2) - if not isinstance(cursor_id, integer_types): - raise TypeError("cursor_id must be an instance of (int, long)") - - self._close_cursor(cursor_id, address) - - def _close_cursor(self, cursor_id, address): - """Send a kill cursors message with the given id. - - What closing the cursor actually means depends on this client's - cursor manager. If there is none, the cursor is closed asynchronously - on a background thread. + - `locks_allowed`: True if we are allowed to acquire locks. + - `cursor_id`: The cursor id which may be 0. + - `address`: The _CursorAddress. + - `conn_mgr`: The _ConnectionManager for the pinned connection or None. + - `session`: The cursor's session. + - `explicit_session`: True if the session was passed explicitly. """ - if self.__cursor_manager is not None: - self.__cursor_manager.close(cursor_id, address) + if locks_allowed: + if cursor_id: + if conn_mgr and conn_mgr.more_to_come: + # If this is an exhaust cursor and we haven't completely + # exhausted the result set we *must* close the socket + # to stop the server from sending more data. + assert conn_mgr.conn is not None + conn_mgr.conn.close_conn(ConnectionClosedReason.ERROR) + else: + self._close_cursor_now(cursor_id, address, session=session, conn_mgr=conn_mgr) + if conn_mgr: + conn_mgr.close() else: - self.__kill_cursors_queue.append((address, [cursor_id])) - - def _close_cursor_now(self, cursor_id, address=None, session=None): + # The cursor will be closed later in a different session. + if cursor_id or conn_mgr: + self._close_cursor_soon(cursor_id, address, conn_mgr) + if session and not explicit_session: + session._end_session(lock=locks_allowed) + + def _close_cursor_soon( + self, + cursor_id: int, + address: Optional[_CursorAddress], + conn_mgr: Optional[_ConnectionManager] = None, + ) -> None: + """Request that a cursor and/or connection be cleaned up soon.""" + self.__kill_cursors_queue.append((address, cursor_id, conn_mgr)) + + def _close_cursor_now( + self, + cursor_id: int, + address: Optional[_CursorAddress], + session: Optional[ClientSession] = None, + conn_mgr: Optional[_ConnectionManager] = None, + ) -> None: """Send a kill cursors message with the given id. - What closing the cursor actually means depends on this client's - cursor manager. If there is none, the cursor is closed synchronously - on the current thread. - """ - if not isinstance(cursor_id, integer_types): - raise TypeError("cursor_id must be an instance of (int, long)") - - if self.__cursor_manager is not None: - self.__cursor_manager.close(cursor_id, address) - else: - try: - self._kill_cursors( - [cursor_id], address, self._get_topology(), session) - except PyMongoError: - # Make another attempt to kill the cursor later. - self.__kill_cursors_queue.append((address, [cursor_id])) - - def kill_cursors(self, cursor_ids, address=None): - """DEPRECATED - Send a kill cursors message soon with the given ids. - - Raises :class:`TypeError` if `cursor_ids` is not an instance of - ``list``. - - :Parameters: - - `cursor_ids`: list of cursor ids to kill - - `address` (optional): (host, port) pair of the cursor's server. - If it is not provided, the client attempts to close the cursor on - the primary or standalone, or a mongos server. - - .. versionchanged:: 3.3 - Deprecated. - - .. versionchanged:: 3.0 - Now accepts an `address` argument. Schedules the cursors to be - closed on a background thread instead of sending the message - immediately. + The cursor is closed synchronously on the current thread. """ - warnings.warn( - "kill_cursors is deprecated.", - DeprecationWarning, - stacklevel=2) - - if not isinstance(cursor_ids, list): - raise TypeError("cursor_ids must be a list") - - # "Atomic", needs no lock. - self.__kill_cursors_queue.append((address, cursor_ids)) + if not isinstance(cursor_id, int): + raise TypeError("cursor_id must be an instance of int") - def _kill_cursors(self, cursor_ids, address, topology, session): + try: + if conn_mgr: + with conn_mgr.lock: + # Cursor is pinned to LB outside of a transaction. + assert address is not None + assert conn_mgr.conn is not None + self._kill_cursor_impl([cursor_id], address, session, conn_mgr.conn) + else: + self._kill_cursors([cursor_id], address, self._get_topology(), session) + except PyMongoError: + # Make another attempt to kill the cursor later. + self._close_cursor_soon(cursor_id, address) + + def _kill_cursors( + self, + cursor_ids: Sequence[int], + address: Optional[_CursorAddress], + topology: Topology, + session: Optional[ClientSession], + ) -> None: """Send a kill cursors message with the given ids.""" - listeners = self._event_listeners - publish = listeners.enabled_for_commands if address: # address could be a tuple or _CursorAddress, but # select_server_by_address needs (host, port). - server = topology.select_server_by_address(tuple(address)) + server = topology.select_server_by_address(tuple(address)) # type: ignore[arg-type] else: # Application called close_cursor() with no address. server = topology.select_server(writable_server_selector) - try: - namespace = address.namespace - db, coll = namespace.split('.', 1) - except AttributeError: - namespace = None - db = coll = "OP_KILL_CURSORS" - - spec = SON([('killCursors', coll), ('cursors', cursor_ids)]) - with server.get_socket(self.__all_credentials) as sock_info: - if sock_info.max_wire_version >= 4 and namespace is not None: - sock_info.command(db, spec, session=session, client=self) - else: - if publish: - start = datetime.datetime.now() - request_id, msg = message.kill_cursors(cursor_ids) - if publish: - duration = datetime.datetime.now() - start - # Here and below, address could be a tuple or - # _CursorAddress. We always want to publish a - # tuple to match the rest of the monitoring - # API. - listeners.publish_command_start( - spec, db, request_id, tuple(address)) - start = datetime.datetime.now() - - try: - sock_info.send_message(msg, 0) - except Exception as exc: - if publish: - dur = ((datetime.datetime.now() - start) + duration) - listeners.publish_command_failure( - dur, message._convert_exception(exc), - 'killCursors', request_id, - tuple(address)) - raise - - if publish: - duration = ((datetime.datetime.now() - start) + duration) - # OP_KILL_CURSORS returns no reply, fake one. - reply = {'cursorsUnknown': cursor_ids, 'ok': 1} - listeners.publish_command_success( - duration, reply, 'killCursors', request_id, - tuple(address)) - - def _process_kill_cursors(self): + with self._checkout(server, session) as conn: + assert address is not None + self._kill_cursor_impl(cursor_ids, address, session, conn) + + def _kill_cursor_impl( + self, + cursor_ids: Sequence[int], + address: _CursorAddress, + session: Optional[ClientSession], + conn: Connection, + ) -> None: + namespace = address.namespace + db, coll = namespace.split(".", 1) + spec = SON([("killCursors", coll), ("cursors", cursor_ids)]) + conn.command(db, spec, session=session, client=self) + + def _process_kill_cursors(self) -> None: """Process any pending kill cursors requests.""" address_to_cursor_ids = defaultdict(list) + pinned_cursors = [] # Other threads or the GC may append to the queue concurrently. while True: try: - address, cursor_ids = self.__kill_cursors_queue.pop() + address, cursor_id, conn_mgr = self.__kill_cursors_queue.pop() except IndexError: break - address_to_cursor_ids[address].extend(cursor_ids) + if conn_mgr: + pinned_cursors.append((address, cursor_id, conn_mgr)) + else: + address_to_cursor_ids[address].append(cursor_id) + + for address, cursor_id, conn_mgr in pinned_cursors: + try: + self._cleanup_cursor(True, cursor_id, address, conn_mgr, None, False) + except Exception as exc: + if isinstance(exc, InvalidOperation) and self._topology._closed: + # Raise the exception when client is closed so that it + # can be caught in _process_periodic_tasks + raise + else: + helpers._handle_exception() # Don't re-open topology if it's closed and there's no pending cursors. if address_to_cursor_ids: topology = self._get_topology() for address, cursor_ids in address_to_cursor_ids.items(): try: - self._kill_cursors( - cursor_ids, address, topology, session=None) - except Exception: - helpers._handle_exception() + self._kill_cursors(cursor_ids, address, topology, session=None) + except Exception as exc: + if isinstance(exc, InvalidOperation) and self._topology._closed: + raise + else: + helpers._handle_exception() # This method is run periodically by a background thread. - def _process_periodic_tasks(self): + def _process_periodic_tasks(self) -> None: """Process any pending kill cursors requests and - maintain connection pool parameters.""" - self._process_kill_cursors() + maintain connection pool parameters. + """ try: + self._process_kill_cursors() self._topology.update_pool() - except Exception: - helpers._handle_exception() - - def __start_session(self, implicit, **kwargs): - # Driver Sessions Spec: "If startSession is called when multiple users - # are authenticated drivers MUST raise an error with the error message - # 'Cannot call startSession when multiple users are authenticated.'" - authset = set(self.__all_credentials.values()) - if len(authset) > 1: - raise InvalidOperation("Cannot call start_session when" - " multiple users are authenticated") + except Exception as exc: + if isinstance(exc, InvalidOperation) and self._topology._closed: + return + else: + helpers._handle_exception() + def __start_session(self, implicit: bool, **kwargs: Any) -> ClientSession: # Raises ConfigurationError if sessions are not supported. - server_session = self._get_server_session() + if implicit: + self._topology._check_implicit_session_support() + server_session: Union[_EmptyServerSession, _ServerSession] = _EmptyServerSession() + else: + server_session = self._get_server_session() opts = client_session.SessionOptions(**kwargs) - return client_session.ClientSession( - self, server_session, opts, authset, implicit) - - def start_session(self, - causal_consistency=True, - default_transaction_options=None): + return client_session.ClientSession(self, server_session, opts, implicit) + + def start_session( + self, + causal_consistency: Optional[bool] = None, + default_transaction_options: Optional[client_session.TransactionOptions] = None, + snapshot: Optional[bool] = False, + ) -> client_session.ClientSession: """Start a logical session. This method takes the same parameters as :class:`~pymongo.client_session.SessionOptions`. See the :mod:`~pymongo.client_session` module for details and examples. - Requires MongoDB 3.6. It is an error to call :meth:`start_session` - if this client has been authenticated to multiple databases using the - deprecated method :meth:`~pymongo.database.Database.authenticate`. - A :class:`~pymongo.client_session.ClientSession` may only be used with - the MongoClient that started it. + the MongoClient that started it. :class:`ClientSession` instances are + **not thread-safe or fork-safe**. They can only be used by one thread + or process at a time. A single :class:`ClientSession` cannot be used + to run multiple operations concurrently. :Returns: An instance of :class:`~pymongo.client_session.ClientSession`. @@ -1789,17 +1796,23 @@ def start_session(self, return self.__start_session( False, causal_consistency=causal_consistency, - default_transaction_options=default_transaction_options) + default_transaction_options=default_transaction_options, + snapshot=snapshot, + ) - def _get_server_session(self): + def _get_server_session(self) -> _ServerSession: """Internal: start or resume a _ServerSession.""" return self._topology.get_server_session() - def _return_server_session(self, server_session, lock): + def _return_server_session( + self, server_session: Union[_ServerSession, _EmptyServerSession], lock: bool + ) -> None: """Internal: return a _ServerSession to the pool.""" + if isinstance(server_session, _EmptyServerSession): + return None return self._topology.return_server_session(server_session, lock) - def _ensure_session(self, session=None): + def _ensure_session(self, session: Optional[ClientSession] = None) -> Optional[ClientSession]: """If provided session is None, lend a temporary session.""" if session: return session @@ -1809,51 +1822,60 @@ def _ensure_session(self, session=None): # should always opt-in. return self.__start_session(True, causal_consistency=False) except (ConfigurationError, InvalidOperation): - # Sessions not supported, or multiple users authenticated. + # Sessions not supported. return None @contextlib.contextmanager - def _tmp_session(self, session, close=True): + def _tmp_session( + self, session: Optional[client_session.ClientSession], close: bool = True + ) -> Generator[Optional[client_session.ClientSession], None, None]: """If provided session is None, lend a temporary session.""" - if session: + if session is not None: + if not isinstance(session, client_session.ClientSession): + raise ValueError("'session' argument must be a ClientSession or None.") # Don't call end_session. yield session return s = self._ensure_session(session) - if s and close: - with s: - # Call end_session when we exit this scope. - yield s - elif s: + if s: try: - # Only call end_session on error. yield s - except Exception: + except Exception as exc: + if isinstance(exc, ConnectionFailure): + s._server_session.mark_dirty() + + # Always call end_session on error. s.end_session() raise + finally: + # Call end_session when we exit this scope. + if close: + s.end_session() else: yield None - def _send_cluster_time(self, command, session): + def _send_cluster_time( + self, command: MutableMapping[str, Any], session: Optional[ClientSession] + ) -> None: topology_time = self._topology.max_cluster_time() session_time = session.cluster_time if session else None if topology_time and session_time: - if topology_time['clusterTime'] > session_time['clusterTime']: - cluster_time = topology_time + if topology_time["clusterTime"] > session_time["clusterTime"]: + cluster_time: Optional[ClusterTime] = topology_time else: cluster_time = session_time else: cluster_time = topology_time or session_time if cluster_time: - command['$clusterTime'] = cluster_time + command["$clusterTime"] = cluster_time - def _process_response(self, reply, session): - self._topology.receive_cluster_time(reply.get('$clusterTime')) + def _process_response(self, reply: Mapping[str, Any], session: Optional[ClientSession]) -> None: + self._topology.receive_cluster_time(reply.get("$clusterTime")) if session is not None: session._process_response(reply) - def server_info(self, session=None): + def server_info(self, session: Optional[client_session.ClientSession] = None) -> dict[str, Any]: """Get information about the MongoDB server we're connected to. :Parameters: @@ -1863,22 +1885,33 @@ def server_info(self, session=None): .. versionchanged:: 3.6 Added ``session`` parameter. """ - return self.admin.command("buildinfo", - read_preference=ReadPreference.PRIMARY, - session=session) - - def list_databases(self, session=None, **kwargs): + return cast( + dict, + self.admin.command( + "buildinfo", read_preference=ReadPreference.PRIMARY, session=session + ), + ) + + def list_databases( + self, + session: Optional[client_session.ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> CommandCursor[dict[str, Any]]: """Get a cursor over the databases of the connected server. :Parameters: - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): Optional parameters of the `listDatabases command - `_ + `_ can be passed as keyword arguments to this method. The supported options differ by server version. + :Returns: An instance of :class:`~pymongo.command_cursor.CommandCursor`. @@ -1886,6 +1919,8 @@ def list_databases(self, session=None, **kwargs): """ cmd = SON([("listDatabases", 1)]) cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment admin = self._database_default_options("admin") res = admin._retryable_read_command(cmd, session=session) # listDatabases doesn't return a cursor (yet). Fake one. @@ -1894,44 +1929,39 @@ def list_databases(self, session=None, **kwargs): "firstBatch": res["databases"], "ns": "admin.$cmd", } - return CommandCursor(admin["$cmd"], cursor, None) + return CommandCursor(admin["$cmd"], cursor, None, comment=comment) - def list_database_names(self, session=None): + def list_database_names( + self, + session: Optional[client_session.ClientSession] = None, + comment: Optional[Any] = None, + ) -> list[str]: """Get a list of the names of all databases on the connected server. :Parameters: - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. - .. versionadded:: 3.6 - """ - return [doc["name"] - for doc in self.list_databases(session, nameOnly=True)] - - def database_names(self, session=None): - """**DEPRECATED**: Get a list of the names of all databases on the - connected server. - - :Parameters: - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - .. versionchanged:: 3.7 - Deprecated. Use :meth:`list_database_names` instead. + .. versionchanged:: 4.1 + Added ``comment`` parameter. - .. versionchanged:: 3.6 - Added ``session`` parameter. + .. versionadded:: 3.6 """ - warnings.warn("database_names is deprecated. Use list_database_names " - "instead.", DeprecationWarning, stacklevel=2) - return self.list_database_names(session) - - def drop_database(self, name_or_database, session=None): + return [doc["name"] for doc in self.list_databases(session, nameOnly=True, comment=comment)] + + @_csot.apply + def drop_database( + self, + name_or_database: Union[str, database.Database[_DocumentTypeArg]], + session: Optional[client_session.ClientSession] = None, + comment: Optional[Any] = None, + ) -> None: """Drop a database. Raises :class:`TypeError` if `name_or_database` is not an instance of - :class:`basestring` (:class:`str` in python 3) or - :class:`~pymongo.database.Database`. + :class:`str` or :class:`~pymongo.database.Database`. :Parameters: - `name_or_database`: the name of a database to drop, or a @@ -1939,13 +1969,17 @@ def drop_database(self, name_or_database, session=None): database to drop - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. .. versionchanged:: 3.6 Added ``session`` parameter. .. note:: The :attr:`~pymongo.mongo_client.MongoClient.write_concern` of - this client is automatically applied to this operation when using - MongoDB >= 3.4. + this client is automatically applied to this operation. .. versionchanged:: 3.4 Apply this client's write concern automatically to this operation @@ -1956,22 +1990,27 @@ def drop_database(self, name_or_database, session=None): if isinstance(name, database.Database): name = name.name - if not isinstance(name, string_type): - raise TypeError("name_or_database must be an instance " - "of %s or a Database" % (string_type.__name__,)) + if not isinstance(name, str): + raise TypeError("name_or_database must be an instance of str or a Database") - self._purge_index(name) - with self._socket_for_writes(session) as sock_info: + with self._conn_for_writes(session) as conn: self[name]._command( - sock_info, - "dropDatabase", + conn, + {"dropDatabase": 1, "comment": comment}, read_preference=ReadPreference.PRIMARY, write_concern=self._write_concern_for(session), parse_write_concern_error=True, - session=session) - - def get_default_database(self, default=None, codec_options=None, - read_preference=None, write_concern=None, read_concern=None): + session=session, + ) + + def get_default_database( + self, + default: Optional[str] = None, + codec_options: Optional[bson.CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> database.Database[_DocumentType]: """Get the database named in the MongoDB connection URI. >>> uri = 'mongodb://host/my_database' @@ -2003,6 +2042,11 @@ def get_default_database(self, default=None, codec_options=None, :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the default) the :attr:`read_concern` of this :class:`MongoClient` is used. + - `comment` (optional): A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. .. versionchanged:: 3.8 Undeprecated. Added the ``default``, ``codec_options``, @@ -2013,15 +2057,21 @@ def get_default_database(self, default=None, codec_options=None, Deprecated, use :meth:`get_database` instead. """ if self.__default_database_name is None and default is None: - raise ConfigurationError( - 'No default database name defined or provided.') + raise ConfigurationError("No default database name defined or provided.") + name = cast(str, self.__default_database_name or default) return database.Database( - self, self.__default_database_name or default, codec_options, - read_preference, write_concern, read_concern) - - def get_database(self, name=None, codec_options=None, read_preference=None, - write_concern=None, read_concern=None): + self, name, codec_options, read_preference, write_concern, read_concern + ) + + def get_database( + self, + name: Optional[str] = None, + codec_options: Optional[bson.CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> database.Database[_DocumentType]: """Get a :class:`~pymongo.database.Database` with the given name and options. @@ -2067,160 +2117,348 @@ def get_database(self, name=None, codec_options=None, read_preference=None, """ if name is None: if self.__default_database_name is None: - raise ConfigurationError('No default database defined') + raise ConfigurationError("No default database defined") name = self.__default_database_name return database.Database( - self, name, codec_options, read_preference, - write_concern, read_concern) + self, name, codec_options, read_preference, write_concern, read_concern + ) - def _database_default_options(self, name): + def _database_default_options(self, name: str) -> Database: """Get a Database instance with the default settings.""" return self.get_database( - name, codec_options=DEFAULT_CODEC_OPTIONS, + name, + codec_options=DEFAULT_CODEC_OPTIONS, read_preference=ReadPreference.PRIMARY, - write_concern=DEFAULT_WRITE_CONCERN) + write_concern=DEFAULT_WRITE_CONCERN, + ) - @property - def is_locked(self): - """Is this server locked? While locked, all write operations - are blocked, although read operations may still be allowed. - Use :meth:`unlock` to unlock. - """ - ops = self._database_default_options('admin')._current_op() - return bool(ops.get('fsyncLock', 0)) + def __enter__(self) -> MongoClient[_DocumentType]: + return self + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + self.close() - def fsync(self, **kwargs): - """Flush all pending writes to datafiles. + # See PYTHON-3084. + __iter__ = None - Optional parameters can be passed as keyword arguments: - - `lock`: If True lock the server to disallow writes. - - `async`: If True don't block while synchronizing. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. + def __next__(self) -> NoReturn: + raise TypeError("'MongoClient' object is not iterable") - .. note:: Starting with Python 3.7 `async` is a reserved keyword. - The async option to the fsync command can be passed using a - dictionary instead:: + next = __next__ - options = {'async': True} - client.fsync(**options) - .. versionchanged:: 3.6 - Added ``session`` parameter. +def _retryable_error_doc(exc: PyMongoError) -> Optional[Mapping[str, Any]]: + """Return the server response from PyMongo exception or None.""" + if isinstance(exc, BulkWriteError): + # Check the last writeConcernError to determine if this + # BulkWriteError is retryable. + wces = exc.details["writeConcernErrors"] + return wces[-1] if wces else None + if isinstance(exc, (NotPrimaryError, OperationFailure)): + return cast(Mapping[str, Any], exc.details) + return None + + +def _add_retryable_write_error(exc: PyMongoError, max_wire_version: int) -> None: + doc = _retryable_error_doc(exc) + if doc: + code = doc.get("code", 0) + # retryWrites on MMAPv1 should raise an actionable error. + if code == 20 and str(exc).startswith("Transaction numbers"): + errmsg = ( + "This MongoDB deployment does not support " + "retryable writes. Please add retryWrites=false " + "to your connection string." + ) + raise OperationFailure(errmsg, code, exc.details) # type: ignore[attr-defined] + if max_wire_version >= 9: + # In MongoDB 4.4+, the server reports the error labels. + for label in doc.get("errorLabels", []): + exc._add_error_label(label) + else: + if code in helpers._RETRYABLE_ERROR_CODES: + exc._add_error_label("RetryableWriteError") + + # Connection errors are always retryable except NotPrimaryError and WaitQueueTimeoutError which is + # handled above. + if isinstance(exc, ConnectionFailure) and not isinstance( + exc, (NotPrimaryError, WaitQueueTimeoutError) + ): + exc._add_error_label("RetryableWriteError") + + +class _MongoClientErrorHandler: + """Handle errors raised when executing an operation.""" + + __slots__ = ( + "client", + "server_address", + "session", + "max_wire_version", + "sock_generation", + "completed_handshake", + "service_id", + "handled", + ) + + def __init__(self, client: MongoClient, server: Server, session: Optional[ClientSession]): + self.client = client + self.server_address = server.description.address + self.session = session + self.max_wire_version = common.MIN_WIRE_VERSION + # XXX: When get_socket fails, this generation could be out of date: + # "Note that when a network error occurs before the handshake + # completes then the error's generation number is the generation + # of the pool at the time the connection attempt was started." + self.sock_generation = server.pool.gen.get_overall() + self.completed_handshake = False + self.service_id: Optional[ObjectId] = None + self.handled = False + + def contribute_socket(self, conn: Connection, completed_handshake: bool = True) -> None: + """Provide socket information to the error handler.""" + self.max_wire_version = conn.max_wire_version + self.sock_generation = conn.generation + self.service_id = conn.service_id + self.completed_handshake = completed_handshake + + def handle( + self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException] + ) -> None: + if self.handled or exc_val is None: + return + self.handled = True + if self.session: + if isinstance(exc_val, ConnectionFailure): + if self.session.in_transaction: + exc_val._add_error_label("TransientTransactionError") + self.session._server_session.mark_dirty() + + if isinstance(exc_val, PyMongoError): + if exc_val.has_error_label("TransientTransactionError") or exc_val.has_error_label( + "RetryableWriteError" + ): + self.session._unpin() + err_ctx = _ErrorContext( + exc_val, + self.max_wire_version, + self.sock_generation, + self.completed_handshake, + self.service_id, + ) + self.client._topology.handle_error(self.server_address, err_ctx) + + def __enter__(self) -> _MongoClientErrorHandler: + return self - .. warning:: `async` and `lock` can not be used together. + def __exit__( + self, + exc_type: Optional[Type[Exception]], + exc_val: Optional[Exception], + exc_tb: Optional[TracebackType], + ) -> None: + return self.handle(exc_type, exc_val) - .. warning:: MongoDB does not support the `async` option - on Windows and will raise an exception on that - platform. - """ - self.admin.command("fsync", - read_preference=ReadPreference.PRIMARY, **kwargs) - def unlock(self, session=None): - """Unlock a previously locked server. +class _ClientConnectionRetryable(Generic[T]): + """Responsible for executing retryable connections on read or write operations""" - :Parameters: - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. + def __init__( + self, + mongo_client: MongoClient, + func: _WriteCall[T] | _ReadCall[T], + bulk: Optional[_Bulk], + is_read: bool = False, + session: Optional[ClientSession] = None, + read_pref: Optional[_ServerMode] = None, + address: Optional[_Address] = None, + retryable: bool = False, + ): + self._last_error: Optional[Exception] = None + self._retrying = False + self._multiple_retries = _csot.get_timeout() is not None + self._client = mongo_client + + self._func = func + self._bulk = bulk + self._session = session + self._is_read = is_read + self._retryable = retryable + self._read_pref = read_pref + self._server_selector: Callable[[Selection], Selection] = ( + read_pref if is_read else writable_server_selector # type: ignore + ) + self._address = address + self._server: Server = None # type: ignore - .. versionchanged:: 3.6 - Added ``session`` parameter. + def run(self) -> T: + """Runs the supplied func() and attempts a retry + + :Raises: + self._last_error: Last exception raised + + :Returns: + Result of the func() call """ - cmd = SON([("fsyncUnlock", 1)]) - with self._socket_for_writes(session) as sock_info: - if sock_info.max_wire_version >= 4: - try: - with self._tmp_session(session) as s: - sock_info.command( - "admin", cmd, session=s, client=self) - except OperationFailure as exc: - # Ignore "DB not locked" to replicate old behavior - if exc.code != 125: + # Increment the transaction id up front to ensure any retry attempt + # will use the proper txnNumber, even if server or socket selection + # fails before the command can be sent. + if self._is_session_state_retryable() and self._retryable and not self._is_read: + self._session._start_retryable_write() # type: ignore + if self._bulk: + self._bulk.started_retryable_write = True + + while True: + self._check_last_error(check_csot=True) + try: + return self._read() if self._is_read else self._write() + except ServerSelectionTimeoutError: + # The application may think the write was never attempted + # if we raise ServerSelectionTimeoutError on the retry + # attempt. Raise the original exception instead. + self._check_last_error() + # A ServerSelectionTimeoutError error indicates that there may + # be a persistent outage. Attempting to retry in this case will + # most likely be a waste of time. + raise + except PyMongoError as exc: + # Execute specialized catch on read + if self._is_read: + if isinstance(exc, (ConnectionFailure, OperationFailure)): + # ConnectionFailures do not supply a code property + exc_code = getattr(exc, "code", None) + if self._is_not_eligible_for_retry() or ( + isinstance(exc, OperationFailure) + and exc_code not in helpers._RETRYABLE_ERROR_CODES + ): + raise + self._retrying = True + self._last_error = exc + else: raise - else: - message._first_batch(sock_info, "admin", "$cmd.sys.unlock", - {}, -1, True, self.codec_options, - ReadPreference.PRIMARY, cmd, - self._event_listeners) - def __enter__(self): - return self + # Specialized catch on write operation + if not self._is_read: + if not self._retryable: + raise + retryable_write_error_exc = exc.has_error_label("RetryableWriteError") + if retryable_write_error_exc: + assert self._session + self._session._unpin() + if not retryable_write_error_exc or self._is_not_eligible_for_retry(): + if exc.has_error_label("NoWritesPerformed") and self._last_error: + raise self._last_error from exc + else: + raise + if self._bulk: + self._bulk.retrying = True + else: + self._retrying = True + if not exc.has_error_label("NoWritesPerformed"): + self._last_error = exc + if self._last_error is None: + self._last_error = exc + + def _is_not_eligible_for_retry(self) -> bool: + """Checks if the exchange is not eligible for retry""" + return not self._retryable or (self._is_retrying() and not self._multiple_retries) + + def _is_retrying(self) -> bool: + """Checks if the exchange is currently undergoing a retry""" + return self._bulk.retrying if self._bulk else self._retrying + + def _is_session_state_retryable(self) -> bool: + """Checks if provided session is eligible for retry + + reads: Make sure there is no ongoing transaction (if provided a session) + writes: Make sure there is a session without an active transaction + """ + if self._is_read: + return not (self._session and self._session.in_transaction) + return bool(self._session and not self._session.in_transaction) - def __exit__(self, exc_type, exc_val, exc_tb): - self.close() + def _check_last_error(self, check_csot: bool = False) -> None: + """Checks if the ongoing client exchange experienced a exception previously. + If so, raise last error - def __iter__(self): - return self + :Parameters: + - `check_csot`: Checks CSOT to ensure we are retrying with time remaining defaults to False + """ + if self._is_retrying(): + remaining = _csot.remaining() + if not check_csot or (remaining is not None and remaining <= 0): + assert self._last_error is not None + raise self._last_error - def __next__(self): - raise TypeError("'MongoClient' object is not iterable") + def _get_server(self) -> Server: + """Retrieves a server object based on provided object context - next = __next__ + :Returns: + Abstraction to connect to server + """ + return self._client._select_server( + self._server_selector, self._session, address=self._address + ) + def _write(self) -> T: + """Wrapper method for write-type retryable client executions -class _MongoClientErrorHandler(object): - """Error handler for MongoClient.""" - __slots__ = ('_client', '_server_address', '_session', '_max_wire_version') + :Returns: + Output for func()'s call + """ + try: + max_wire_version = 0 + self._server = self._get_server() + supports_session = ( + self._session is not None and self._server.description.retryable_writes_supported + ) + with self._client._checkout(self._server, self._session) as conn: + max_wire_version = conn.max_wire_version + if self._retryable and not supports_session: + # A retry is not possible because this server does + # not support sessions raise the last error. + self._check_last_error() + self._retryable = False + return self._func(self._session, conn, self._retryable) # type: ignore + except PyMongoError as exc: + if not self._retryable: + raise + # Add the RetryableWriteError label, if applicable. + _add_retryable_write_error(exc, max_wire_version) + raise - def __init__(self, client, server_address, session): - self._client = client - self._server_address = server_address - self._session = session - self._max_wire_version = None + def _read(self) -> T: + """Wrapper method for read-type retryable client executions - def contribute_socket(self, sock_info): - """Provide socket information to the error handler.""" - # Currently, we only extract the max_wire_version information. - self._max_wire_version = sock_info.max_wire_version + :Returns: + Output for func()'s call + """ + self._server = self._get_server() + assert self._read_pref is not None, "Read Preference required on read calls" + with self._client._conn_from_server(self._read_pref, self._server, self._session) as ( + conn, + read_pref, + ): + if self._retrying and not self._retryable: + self._check_last_error() + return self._func(self._session, self._server, conn, read_pref) # type: ignore + + +def _after_fork_child() -> None: + """Releases the locks in child process and resets the + topologies in all MongoClients. + """ + # Reinitialize locks + _release_locks() - def __enter__(self): - return self + # Perform cleanup in clients (i.e. get rid of topology) + for _, client in MongoClient._clients.items(): + client._after_fork() - def __exit__(self, exc_type, exc_val, exc_tb): - if exc_type is None: - return - if issubclass(exc_type, PyMongoError): - if self._session and exc_val.has_error_label( - "TransientTransactionError"): - self._session._unpin_mongos() - - if issubclass(exc_type, NetworkTimeout): - # The socket has been closed. Don't reset the server. - # Server Discovery And Monitoring Spec: "When an application - # operation fails because of any network error besides a socket - # timeout...." - if self._session: - self._session._server_session.mark_dirty() - elif issubclass(exc_type, NotMasterError): - # As per the SDAM spec if: - # - the server sees a "not master" error, and - # - the server is not shutting down, and - # - the server version is >= 4.2, then - # we keep the existing connection pool, but mark the server type - # as Unknown and request an immediate check of the server. - # Otherwise, we clear the connection pool, mark the server as - # Unknown and request an immediate check of the server. - err_code = exc_val.details.get('code', -1) - is_shutting_down = err_code in helpers._SHUTDOWN_CODES - if (is_shutting_down or (self._max_wire_version is None) or - (self._max_wire_version <= 7)): - # Clear the pool, mark server Unknown and request check. - self._client._reset_server_and_request_check( - self._server_address) - else: - self._client._topology.mark_server_unknown_and_request_check( - self._server_address) - elif issubclass(exc_type, ConnectionFailure): - # "Client MUST replace the server's description with type Unknown - # ... MUST NOT request an immediate check of the server." - self._client._reset_server(self._server_address) - if self._session: - self._session._server_session.mark_dirty() - elif issubclass(exc_type, OperationFailure): - # Do not request an immediate check since the server is likely - # shutting down. - if exc_val.code in helpers._RETRYABLE_ERROR_CODES: - self._client._reset_server(self._server_address) +if _HAS_REGISTER_AT_FORK: + # This will run in the same thread as the fork was called. + # If we fork in a critical region on the same thread, it should break. + # This is fine since we would never call fork directly from a critical region. + os.register_at_fork(after_in_child=_after_fork_child) diff --git a/pymongo/mongo_replica_set_client.py b/pymongo/mongo_replica_set_client.py deleted file mode 100644 index c9436c24e5..0000000000 --- a/pymongo/mongo_replica_set_client.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2011-2015 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you -# may not use this file except in compliance with the License. You -# may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. See the License for the specific language governing -# permissions and limitations under the License. - -"""Deprecated. See :doc:`/examples/high_availability`.""" - -import warnings - -from pymongo import mongo_client - - -class MongoReplicaSetClient(mongo_client.MongoClient): - """Deprecated alias for :class:`~pymongo.mongo_client.MongoClient`. - - :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient` - will be removed in a future version of PyMongo. - - .. versionchanged:: 3.0 - :class:`~pymongo.mongo_client.MongoClient` is now the one and only - client class for a standalone server, mongos, or replica set. - It includes the functionality that had been split into - :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient`: it - can connect to a replica set, discover all its members, and monitor - the set for stepdowns, elections, and reconfigs. - - The ``refresh`` method is removed from - :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient`, - as are the ``seeds`` and ``hosts`` properties. - """ - def __init__(self, *args, **kwargs): - warnings.warn('MongoReplicaSetClient is deprecated, use MongoClient' - ' to connect to a replica set', - DeprecationWarning, stacklevel=2) - - super(MongoReplicaSetClient, self).__init__(*args, **kwargs) - - def __repr__(self): - return "MongoReplicaSetClient(%s)" % (self._repr_helper(),) diff --git a/pymongo/monitor.py b/pymongo/monitor.py index 23af967ffe..92b12f7317 100644 --- a/pymongo/monitor.py +++ b/pymongo/monitor.py @@ -14,52 +14,106 @@ """Class to monitor a MongoDB server on a background thread.""" +from __future__ import annotations + +import atexit +import time import weakref +from typing import TYPE_CHECKING, Any, Mapping, Optional, cast from pymongo import common, periodic_executor -from pymongo.errors import OperationFailure -from pymongo.monotonic import time as _time +from pymongo._csot import MovingMinimum +from pymongo.errors import NotPrimaryError, OperationFailure, _OperationCancelled +from pymongo.hello import Hello +from pymongo.lock import _create_lock +from pymongo.periodic_executor import _shutdown_executors +from pymongo.pool import _is_faas from pymongo.read_preferences import MovingAverage from pymongo.server_description import ServerDescription -from pymongo.server_type import SERVER_TYPE from pymongo.srv_resolver import _SrvResolver +if TYPE_CHECKING: + from pymongo.pool import Connection, Pool, _CancellationContext + from pymongo.settings import TopologySettings + from pymongo.topology import Topology + + +def _sanitize(error: Exception) -> None: + """PYTHON-2433 Clear error traceback info.""" + error.__traceback__ = None + error.__context__ = None + error.__cause__ = None + + +class MonitorBase: + def __init__(self, topology: Topology, name: str, interval: int, min_interval: float): + """Base class to do periodic work on a background thread. + + The background thread is signaled to stop when the Topology or + this instance is freed. + """ + # We strongly reference the executor and it weakly references us via + # this closure. When the monitor is freed, stop the executor soon. + def target() -> bool: + monitor = self_ref() + if monitor is None: + return False # Stop the executor. + monitor._run() # type:ignore[attr-defined] + return True -class MonitorBase(object): - def __init__(self, *args, **kwargs): - """Override this method to create an executor.""" - raise NotImplementedError + executor = periodic_executor.PeriodicExecutor( + interval=interval, min_interval=min_interval, target=target, name=name + ) - def open(self): + self._executor = executor + + def _on_topology_gc(dummy: Optional[Topology] = None) -> None: + # This prevents GC from waiting 10 seconds for hello to complete + # See test_cleanup_executors_on_client_del. + monitor = self_ref() + if monitor: + monitor.gc_safe_close() + + # Avoid cycles. When self or topology is freed, stop executor soon. + self_ref = weakref.ref(self, executor.close) + self._topology = weakref.proxy(topology, _on_topology_gc) + _register(self) + + def open(self) -> None: """Start monitoring, or restart after a fork. Multiple calls have no effect. """ self._executor.open() - def close(self): + def gc_safe_close(self) -> None: + """GC safe close.""" + self._executor.close() + + def close(self) -> None: """Close and stop monitoring. open() restarts the monitor after closing. """ - self._executor.close() + self.gc_safe_close() - def join(self, timeout=None): + def join(self, timeout: Optional[int] = None) -> None: """Wait for the monitor to stop.""" self._executor.join(timeout) - def request_check(self): + def request_check(self) -> None: """If the monitor is sleeping, wake it soon.""" self._executor.wake() class Monitor(MonitorBase): def __init__( - self, - server_description, - topology, - pool, - topology_settings): + self, + server_description: ServerDescription, + topology: Topology, + pool: Pool, + topology_settings: TopologySettings, + ): """Class to monitor a MongoDB server on a background thread. Pass an initial ServerDescription, a Topology, a Pool, and @@ -68,167 +122,218 @@ def __init__( The Topology is weakly referenced. The Pool must be exclusive to this Monitor. """ + super().__init__( + topology, + "pymongo_server_monitor_thread", + topology_settings.heartbeat_frequency, + common.MIN_HEARTBEAT_INTERVAL, + ) self._server_description = server_description self._pool = pool self._settings = topology_settings - self._avg_round_trip_time = MovingAverage() - self._listeners = self._settings._pool_options.event_listeners - pub = self._listeners is not None - self._publish = pub and self._listeners.enabled_for_server_heartbeat - - # We strongly reference the executor and it weakly references us via - # this closure. When the monitor is freed, stop the executor soon. - def target(): - monitor = self_ref() - if monitor is None: - return False # Stop the executor. - Monitor._run(monitor) - return True - - executor = periodic_executor.PeriodicExecutor( - interval=self._settings.heartbeat_frequency, - min_interval=common.MIN_HEARTBEAT_INTERVAL, - target=target, - name="pymongo_server_monitor_thread") - - self._executor = executor + self._listeners = self._settings._pool_options._event_listeners + self._publish = self._listeners is not None and self._listeners.enabled_for_server_heartbeat + self._cancel_context: Optional[_CancellationContext] = None + self._rtt_monitor = _RttMonitor( + topology, + topology_settings, + topology._create_pool_for_monitor(server_description.address), + ) + if topology_settings.server_monitoring_mode == "stream": + self._stream = True + elif topology_settings.server_monitoring_mode == "poll": + self._stream = False + else: + self._stream = not _is_faas() - # Avoid cycles. When self or topology is freed, stop executor soon. - self_ref = weakref.ref(self, executor.close) - self._topology = weakref.proxy(topology, executor.close) + def cancel_check(self) -> None: + """Cancel any concurrent hello check. - def close(self): - super(Monitor, self).close() + Note: this is called from a weakref.proxy callback and MUST NOT take + any locks. + """ + context = self._cancel_context + if context: + # Note: we cannot close the socket because doing so may cause + # concurrent reads/writes to hang until a timeout occurs + # (depending on the platform). + context.cancel() + + def _start_rtt_monitor(self) -> None: + """Start an _RttMonitor that periodically runs ping.""" + # If this monitor is closed directly before (or during) this open() + # call, the _RttMonitor will not be closed. Checking if this monitor + # was closed directly after resolves the race. + self._rtt_monitor.open() + if self._executor._stopped: + self._rtt_monitor.close() + + def gc_safe_close(self) -> None: + self._executor.close() + self._rtt_monitor.gc_safe_close() + self.cancel_check() - # Increment the pool_id and maybe close the socket. If the executor + def close(self) -> None: + self.gc_safe_close() + self._rtt_monitor.close() + # Increment the generation and maybe close the socket. If the executor # thread has the socket checked out, it will be closed when checked in. + self._reset_connection() + + def _reset_connection(self) -> None: + # Clear our pooled connection. self._pool.reset() - def _run(self): + def _run(self) -> None: try: - self._server_description = self._check_with_retry() - self._topology.on_change(self._server_description) + prev_sd = self._server_description + try: + self._server_description = self._check_server() + except _OperationCancelled as exc: + _sanitize(exc) + # Already closed the connection, wait for the next check. + self._server_description = ServerDescription( + self._server_description.address, error=exc + ) + if prev_sd.is_server_type_known: + # Immediately retry since we've already waited 500ms to + # discover that we've been cancelled. + self._executor.skip_sleep() + return + + # Update the Topology and clear the server pool on error. + self._topology.on_change( + self._server_description, reset_pool=self._server_description.error + ) + + if self._stream and ( + self._server_description.is_server_type_known + and self._server_description.topology_version + ): + self._start_rtt_monitor() + # Immediately check for the next streaming response. + self._executor.skip_sleep() + + if self._server_description.error and prev_sd.is_server_type_known: + # Immediately retry on network errors. + self._executor.skip_sleep() except ReferenceError: # Topology was garbage-collected. self.close() - def _check_with_retry(self): - """Call ismaster once or twice. Reset server's pool on error. + def _check_server(self) -> ServerDescription: + """Call hello or read the next streaming response. Returns a ServerDescription. """ - # According to the spec, if an ismaster call fails we reset the - # server's pool. If a server was once connected, change its type - # to Unknown only after retrying once. - address = self._server_description.address - retry = True - if self._server_description.server_type == SERVER_TYPE.Unknown: - retry = False - - start = _time() + start = time.monotonic() try: - return self._check_once() + try: + return self._check_once() + except (OperationFailure, NotPrimaryError) as exc: + # Update max cluster time even when hello fails. + details = cast(Mapping[str, Any], exc.details) + self._topology.receive_cluster_time(details.get("$clusterTime")) + raise except ReferenceError: raise except Exception as error: - error_time = _time() - start + _sanitize(error) + sd = self._server_description + address = sd.address + duration = time.monotonic() - start if self._publish: - self._listeners.publish_server_heartbeat_failed( - address, error_time, error) - self._topology.reset_pool(address) - default = ServerDescription(address, error=error) - if not retry: - self._avg_round_trip_time.reset() - # Server type defaults to Unknown. - return default - - # Try a second and final time. If it fails return original error. - # Always send metadata: this is a new connection. - start = _time() - try: - return self._check_once() - except ReferenceError: + awaited = bool(self._stream and sd.is_server_type_known and sd.topology_version) + assert self._listeners is not None + self._listeners.publish_server_heartbeat_failed(address, duration, error, awaited) + self._reset_connection() + if isinstance(error, _OperationCancelled): raise - except Exception as error: - error_time = _time() - start - if self._publish: - self._listeners.publish_server_heartbeat_failed( - address, error_time, error) - self._avg_round_trip_time.reset() - return default + self._rtt_monitor.reset() + # Server type defaults to Unknown. + return ServerDescription(address, error=error) - def _check_once(self): - """A single attempt to call ismaster. + def _check_once(self) -> ServerDescription: + """A single attempt to call hello. Returns a ServerDescription, or raises an exception. """ address = self._server_description.address if self._publish: - self._listeners.publish_server_heartbeat_started(address) - with self._pool.get_socket({}) as sock_info: - response, round_trip_time = self._check_with_socket(sock_info) - self._avg_round_trip_time.add_sample(round_trip_time) - sd = ServerDescription( - address=address, - ismaster=response, - round_trip_time=self._avg_round_trip_time.get()) + assert self._listeners is not None + sd = self._server_description + # XXX: "awaited" could be incorrectly set to True in the rare case + # the pool checkout closes and recreates a connection. + awaited = bool( + self._pool.conns + and self._stream + and sd.is_server_type_known + and sd.topology_version + ) + self._listeners.publish_server_heartbeat_started(address, awaited) + + if self._cancel_context and self._cancel_context.cancelled: + self._reset_connection() + with self._pool.checkout() as conn: + self._cancel_context = conn.cancel_context + response, round_trip_time = self._check_with_socket(conn) + if not response.awaitable: + self._rtt_monitor.add_sample(round_trip_time) + + avg_rtt, min_rtt = self._rtt_monitor.get() + sd = ServerDescription(address, response, avg_rtt, min_round_trip_time=min_rtt) if self._publish: + assert self._listeners is not None self._listeners.publish_server_heartbeat_succeeded( - address, round_trip_time, response) - + address, round_trip_time, response, response.awaitable + ) return sd - def _check_with_socket(self, sock_info): - """Return (IsMaster, round_trip_time). + def _check_with_socket(self, conn: Connection) -> tuple[Hello, float]: + """Return (Hello, round_trip_time). Can raise ConnectionFailure or OperationFailure. """ - start = _time() - try: - return (sock_info.ismaster(self._pool.opts.metadata, - self._topology.max_cluster_time()), - _time() - start) - except OperationFailure as exc: - # Update max cluster time even when isMaster fails. - self._topology.receive_cluster_time( - exc.details.get('$clusterTime')) - raise + cluster_time = self._topology.max_cluster_time() + start = time.monotonic() + if conn.more_to_come: + # Read the next streaming hello (MongoDB 4.4+). + response = Hello(conn._next_reply(), awaitable=True) + elif ( + self._stream and conn.performed_handshake and self._server_description.topology_version + ): + # Initiate streaming hello (MongoDB 4.4+). + response = conn._hello( + cluster_time, + self._server_description.topology_version, + self._settings.heartbeat_frequency, + ) + else: + # New connection handshake or polling hello (MongoDB <4.4). + response = conn._hello(cluster_time, None, None) + return response, time.monotonic() - start class SrvMonitor(MonitorBase): - def __init__(self, topology, topology_settings): + def __init__(self, topology: Topology, topology_settings: TopologySettings): """Class to poll SRV records on a background thread. Pass a Topology and a TopologySettings. The Topology is weakly referenced. """ + super().__init__( + topology, + "pymongo_srv_polling_thread", + common.MIN_SRV_RESCAN_INTERVAL, + topology_settings.heartbeat_frequency, + ) self._settings = topology_settings self._seedlist = self._settings._seeds - self._fqdn = self._settings.fqdn - - # We strongly reference the executor and it weakly references us via - # this closure. When the monitor is freed, stop the executor soon. - def target(): - monitor = self_ref() - if monitor is None: - return False # Stop the executor. - SrvMonitor._run(monitor) - return True + assert isinstance(self._settings.fqdn, str) + self._fqdn: str = self._settings.fqdn - executor = periodic_executor.PeriodicExecutor( - interval=common.MIN_SRV_RESCAN_INTERVAL, - min_interval=self._settings.heartbeat_frequency, - target=target, - name="pymongo_srv_polling_thread") - - self._executor = executor - - # Avoid cycles. When self or topology is freed, stop executor soon. - self_ref = weakref.ref(self, executor.close) - self._topology = weakref.proxy(topology, executor.close) - - def _run(self): + def _run(self) -> None: seedlist = self._get_seedlist() if seedlist: self._seedlist = seedlist @@ -238,13 +343,18 @@ def _run(self): # Topology was garbage-collected. self.close() - def _get_seedlist(self): + def _get_seedlist(self) -> Optional[list[tuple[str, Any]]]: """Poll SRV records for a seedlist. Returns a list of ServerDescriptions. """ try: - seedlist, ttl = _SrvResolver(self._fqdn).get_hosts_and_min_ttl() + resolver = _SrvResolver( + self._fqdn, + self._settings.pool_options.connect_timeout, + self._settings.srv_service_name, + ) + seedlist, ttl = resolver.get_hosts_and_min_ttl() if len(seedlist) == 0: # As per the spec: this should be treated as a failure. raise Exception @@ -256,6 +366,113 @@ def _get_seedlist(self): self.request_check() return None else: - self._executor.update_interval( - max(ttl, common.MIN_SRV_RESCAN_INTERVAL)) + self._executor.update_interval(max(ttl, common.MIN_SRV_RESCAN_INTERVAL)) return seedlist + + +class _RttMonitor(MonitorBase): + def __init__(self, topology: Topology, topology_settings: TopologySettings, pool: Pool): + """Maintain round trip times for a server. + + The Topology is weakly referenced. + """ + super().__init__( + topology, + "pymongo_server_rtt_thread", + topology_settings.heartbeat_frequency, + common.MIN_HEARTBEAT_INTERVAL, + ) + + self._pool = pool + self._moving_average = MovingAverage() + self._moving_min = MovingMinimum() + self._lock = _create_lock() + + def close(self) -> None: + self.gc_safe_close() + # Increment the generation and maybe close the socket. If the executor + # thread has the socket checked out, it will be closed when checked in. + self._pool.reset() + + def add_sample(self, sample: float) -> None: + """Add a RTT sample.""" + with self._lock: + self._moving_average.add_sample(sample) + self._moving_min.add_sample(sample) + + def get(self) -> tuple[Optional[float], float]: + """Get the calculated average, or None if no samples yet and the min.""" + with self._lock: + return self._moving_average.get(), self._moving_min.get() + + def reset(self) -> None: + """Reset the average RTT.""" + with self._lock: + self._moving_average.reset() + self._moving_min.reset() + + def _run(self) -> None: + try: + # NOTE: This thread is only run when using the streaming + # heartbeat protocol (MongoDB 4.4+). + # XXX: Skip check if the server is unknown? + rtt = self._ping() + self.add_sample(rtt) + except ReferenceError: + # Topology was garbage-collected. + self.close() + except Exception: + self._pool.reset() + + def _ping(self) -> float: + """Run a "hello" command and return the RTT.""" + with self._pool.checkout() as conn: + if self._executor._stopped: + raise Exception("_RttMonitor closed") + start = time.monotonic() + conn.hello() + return time.monotonic() - start + + +# Close monitors to cancel any in progress streaming checks before joining +# executor threads. For an explanation of how this works see the comment +# about _EXECUTORS in periodic_executor.py. +_MONITORS = set() + + +def _register(monitor: MonitorBase) -> None: + ref = weakref.ref(monitor, _unregister) + _MONITORS.add(ref) + + +def _unregister(monitor_ref: weakref.ReferenceType[MonitorBase]) -> None: + _MONITORS.remove(monitor_ref) + + +def _shutdown_monitors() -> None: + if _MONITORS is None: + return + + # Copy the set. Closing monitors removes them. + monitors = list(_MONITORS) + + # Close all monitors. + for ref in monitors: + monitor = ref() + if monitor: + monitor.gc_safe_close() + + monitor = None + + +def _shutdown_resources() -> None: + # _shutdown_monitors/_shutdown_executors may already be GC'd at shutdown. + shutdown = _shutdown_monitors + if shutdown: # type:ignore[truthy-function] + shutdown() + shutdown = _shutdown_executors + if shutdown: # type:ignore[truthy-function] + shutdown() + + +atexit.register(_shutdown_resources) diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index 103d0d3ee0..03b3c53180 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -16,6 +16,10 @@ .. versionadded:: 3.1 +.. attention:: Starting in PyMongo 3.11, the monitoring classes outlined below + are included in the PyMongo distribution under the + :mod:`~pymongo.event_loggers` submodule. + Use :func:`register` to register global listeners for specific events. Listeners must inherit from one of the abstract classes below and implement the correct functions for that class. @@ -121,6 +125,9 @@ class ConnectionPoolLogger(ConnectionPoolListener): def pool_created(self, event): logging.info("[pool {0.address}] pool created".format(event)) + def pool_ready(self, event): + logging.info("[pool {0.address}] pool is ready".format(event)) + def pool_cleared(self, event): logging.info("[pool {0.address}] pool cleared".format(event)) @@ -128,15 +135,15 @@ def pool_closed(self, event): logging.info("[pool {0.address}] pool closed".format(event)) def connection_created(self, event): - logging.info("[pool {0.address}][conn #{0.connection_id}] " + logging.info("[pool {0.address}][connection #{0.connection_id}] " "connection created".format(event)) def connection_ready(self, event): - logging.info("[pool {0.address}][conn #{0.connection_id}] " + logging.info("[pool {0.address}][connection #{0.connection_id}] " "connection setup succeeded".format(event)) def connection_closed(self, event): - logging.info("[pool {0.address}][conn #{0.connection_id}] " + logging.info("[pool {0.address}][connection #{0.connection_id}] " "connection closed, reason: " "{0.reason}".format(event)) @@ -149,11 +156,11 @@ def connection_check_out_failed(self, event): "failed, reason: {0.reason}".format(event)) def connection_checked_out(self, event): - logging.info("[pool {0.address}][conn #{0.connection_id}] " + logging.info("[pool {0.address}][connection #{0.connection_id}] " "connection checked out of pool".format(event)) def connection_checked_in(self, event): - logging.info("[pool {0.address}][conn #{0.connection_id}] " + logging.info("[pool {0.address}][connection #{0.connection_id}] " "connection checked into pool".format(event)) @@ -176,20 +183,39 @@ def connection_checked_in(self, event): handler first. """ -from collections import namedtuple +from __future__ import annotations + +import datetime +from collections import abc, namedtuple +from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence -from bson.py3compat import abc +from bson.objectid import ObjectId +from pymongo.hello import Hello, HelloCompat from pymongo.helpers import _handle_exception +from pymongo.typings import _Address, _DocumentOut + +if TYPE_CHECKING: + from datetime import timedelta + + from pymongo.server_description import ServerDescription + from pymongo.topology_description import TopologyDescription -_Listeners = namedtuple('Listeners', - ('command_listeners', 'server_listeners', - 'server_heartbeat_listeners', 'topology_listeners', - 'cmap_listeners')) + +_Listeners = namedtuple( + "_Listeners", + ( + "command_listeners", + "server_listeners", + "server_heartbeat_listeners", + "topology_listeners", + "cmap_listeners", + ), +) _LISTENERS = _Listeners([], [], [], [], []) -class _EventListener(object): +class _EventListener: """Abstract base class for all event listeners.""" @@ -200,7 +226,7 @@ class CommandListener(_EventListener): and `CommandFailedEvent`. """ - def started(self, event): + def started(self, event: CommandStartedEvent) -> None: """Abstract method to handle a `CommandStartedEvent`. :Parameters: @@ -208,7 +234,7 @@ def started(self, event): """ raise NotImplementedError - def succeeded(self, event): + def succeeded(self, event: CommandSucceededEvent) -> None: """Abstract method to handle a `CommandSucceededEvent`. :Parameters: @@ -216,7 +242,7 @@ def succeeded(self, event): """ raise NotImplementedError - def failed(self, event): + def failed(self, event: CommandFailedEvent) -> None: """Abstract method to handle a `CommandFailedEvent`. :Parameters: @@ -241,50 +267,62 @@ class ConnectionPoolListener(_EventListener): .. versionadded:: 3.9 """ - def pool_created(self, event): + def pool_created(self, event: PoolCreatedEvent) -> None: """Abstract method to handle a :class:`PoolCreatedEvent`. - Emitted when a Connection Pool is created. + Emitted when a connection Pool is created. :Parameters: - `event`: An instance of :class:`PoolCreatedEvent`. """ raise NotImplementedError - def pool_cleared(self, event): + def pool_ready(self, event: PoolReadyEvent) -> None: + """Abstract method to handle a :class:`PoolReadyEvent`. + + Emitted when a connection Pool is marked ready. + + :Parameters: + - `event`: An instance of :class:`PoolReadyEvent`. + + .. versionadded:: 4.0 + """ + raise NotImplementedError + + def pool_cleared(self, event: PoolClearedEvent) -> None: """Abstract method to handle a `PoolClearedEvent`. - Emitted when a Connection Pool is cleared. + Emitted when a connection Pool is cleared. :Parameters: - `event`: An instance of :class:`PoolClearedEvent`. """ raise NotImplementedError - def pool_closed(self, event): + def pool_closed(self, event: PoolClosedEvent) -> None: """Abstract method to handle a `PoolClosedEvent`. - Emitted when a Connection Pool is closed. + Emitted when a connection Pool is closed. :Parameters: - `event`: An instance of :class:`PoolClosedEvent`. """ raise NotImplementedError - def connection_created(self, event): + def connection_created(self, event: ConnectionCreatedEvent) -> None: """Abstract method to handle a :class:`ConnectionCreatedEvent`. - Emitted when a Connection Pool creates a Connection object. + Emitted when a connection Pool creates a Connection object. :Parameters: - `event`: An instance of :class:`ConnectionCreatedEvent`. """ raise NotImplementedError - def connection_ready(self, event): + def connection_ready(self, event: ConnectionReadyEvent) -> None: """Abstract method to handle a :class:`ConnectionReadyEvent`. - Emitted when a Connection has finished its setup, and is now ready to + Emitted when a connection has finished its setup, and is now ready to use. :Parameters: @@ -292,17 +330,17 @@ def connection_ready(self, event): """ raise NotImplementedError - def connection_closed(self, event): + def connection_closed(self, event: ConnectionClosedEvent) -> None: """Abstract method to handle a :class:`ConnectionClosedEvent`. - Emitted when a Connection Pool closes a Connection. + Emitted when a connection Pool closes a connection. :Parameters: - `event`: An instance of :class:`ConnectionClosedEvent`. """ raise NotImplementedError - def connection_check_out_started(self, event): + def connection_check_out_started(self, event: ConnectionCheckOutStartedEvent) -> None: """Abstract method to handle a :class:`ConnectionCheckOutStartedEvent`. Emitted when the driver starts attempting to check out a connection. @@ -312,7 +350,7 @@ def connection_check_out_started(self, event): """ raise NotImplementedError - def connection_check_out_failed(self, event): + def connection_check_out_failed(self, event: ConnectionCheckOutFailedEvent) -> None: """Abstract method to handle a :class:`ConnectionCheckOutFailedEvent`. Emitted when the driver's attempt to check out a connection fails. @@ -322,20 +360,20 @@ def connection_check_out_failed(self, event): """ raise NotImplementedError - def connection_checked_out(self, event): + def connection_checked_out(self, event: ConnectionCheckedOutEvent) -> None: """Abstract method to handle a :class:`ConnectionCheckedOutEvent`. - Emitted when the driver successfully checks out a Connection. + Emitted when the driver successfully checks out a connection. :Parameters: - `event`: An instance of :class:`ConnectionCheckedOutEvent`. """ raise NotImplementedError - def connection_checked_in(self, event): + def connection_checked_in(self, event: ConnectionCheckedInEvent) -> None: """Abstract method to handle a :class:`ConnectionCheckedInEvent`. - Emitted when the driver checks in a Connection back to the Connection + Emitted when the driver checks in a connection back to the connection Pool. :Parameters: @@ -353,7 +391,7 @@ class ServerHeartbeatListener(_EventListener): .. versionadded:: 3.3 """ - def started(self, event): + def started(self, event: ServerHeartbeatStartedEvent) -> None: """Abstract method to handle a `ServerHeartbeatStartedEvent`. :Parameters: @@ -361,7 +399,7 @@ def started(self, event): """ raise NotImplementedError - def succeeded(self, event): + def succeeded(self, event: ServerHeartbeatSucceededEvent) -> None: """Abstract method to handle a `ServerHeartbeatSucceededEvent`. :Parameters: @@ -369,7 +407,7 @@ def succeeded(self, event): """ raise NotImplementedError - def failed(self, event): + def failed(self, event: ServerHeartbeatFailedEvent) -> None: """Abstract method to handle a `ServerHeartbeatFailedEvent`. :Parameters: @@ -386,7 +424,7 @@ class TopologyListener(_EventListener): .. versionadded:: 3.3 """ - def opened(self, event): + def opened(self, event: TopologyOpenedEvent) -> None: """Abstract method to handle a `TopologyOpenedEvent`. :Parameters: @@ -394,7 +432,7 @@ def opened(self, event): """ raise NotImplementedError - def description_changed(self, event): + def description_changed(self, event: TopologyDescriptionChangedEvent) -> None: """Abstract method to handle a `TopologyDescriptionChangedEvent`. :Parameters: @@ -402,7 +440,7 @@ def description_changed(self, event): """ raise NotImplementedError - def closed(self, event): + def closed(self, event: TopologyClosedEvent) -> None: """Abstract method to handle a `TopologyClosedEvent`. :Parameters: @@ -419,7 +457,7 @@ class ServerListener(_EventListener): .. versionadded:: 3.3 """ - def opened(self, event): + def opened(self, event: ServerOpeningEvent) -> None: """Abstract method to handle a `ServerOpeningEvent`. :Parameters: @@ -427,7 +465,7 @@ def opened(self, event): """ raise NotImplementedError - def description_changed(self, event): + def description_changed(self, event: ServerDescriptionChangedEvent) -> None: """Abstract method to handle a `ServerDescriptionChangedEvent`. :Parameters: @@ -435,7 +473,7 @@ def description_changed(self, event): """ raise NotImplementedError - def closed(self, event): + def closed(self, event: ServerClosedEvent) -> None: """Abstract method to handle a `ServerClosedEvent`. :Parameters: @@ -444,25 +482,29 @@ def closed(self, event): raise NotImplementedError -def _to_micros(dur): +def _to_micros(dur: timedelta) -> int: """Convert duration 'dur' to microseconds.""" return int(dur.total_seconds() * 10e5) -def _validate_event_listeners(option, listeners): +def _validate_event_listeners( + option: str, listeners: Sequence[_EventListeners] +) -> Sequence[_EventListeners]: """Validate event listeners""" if not isinstance(listeners, abc.Sequence): - raise TypeError("%s must be a list or tuple" % (option,)) + raise TypeError(f"{option} must be a list or tuple") for listener in listeners: if not isinstance(listener, _EventListener): - raise TypeError("Listeners for %s must be either a " - "CommandListener, ServerHeartbeatListener, " - "ServerListener, TopologyListener, or " - "ConnectionPoolListener." % (option,)) + raise TypeError( + f"Listeners for {option} must be either a " + "CommandListener, ServerHeartbeatListener, " + "ServerListener, TopologyListener, or " + "ConnectionPoolListener." + ) return listeners -def register(listener): +def register(listener: _EventListener) -> None: """Register a global event listener. :Parameters: @@ -471,10 +513,12 @@ def register(listener): :class:`TopologyListener`, or :class:`ConnectionPoolListener`. """ if not isinstance(listener, _EventListener): - raise TypeError("Listeners for %s must be either a " - "CommandListener, ServerHeartbeatListener, " - "ServerListener, TopologyListener, or " - "ConnectionPoolListener." % (listener,)) + raise TypeError( + f"Listeners for {listener} must be either a " + "CommandListener, ServerHeartbeatListener, " + "ServerListener, TopologyListener, or " + "ConnectionPoolListener." + ) if isinstance(listener, CommandListener): _LISTENERS.command_listeners.append(listener) if isinstance(listener, ServerHeartbeatListener): @@ -486,45 +530,91 @@ def register(listener): if isinstance(listener, ConnectionPoolListener): _LISTENERS.cmap_listeners.append(listener) + # Note - to avoid bugs from forgetting which if these is all lowercase and # which are camelCase, and at the same time avoid having to add a test for # every command, use all lowercase here and test against command_name.lower(). -_SENSITIVE_COMMANDS = set( - ["authenticate", "saslstart", "saslcontinue", "getnonce", "createuser", - "updateuser", "copydbgetnonce", "copydbsaslstart", "copydb"]) - - -class _CommandEvent(object): +_SENSITIVE_COMMANDS: set = { + "authenticate", + "saslstart", + "saslcontinue", + "getnonce", + "createuser", + "updateuser", + "copydbgetnonce", + "copydbsaslstart", + "copydb", +} + + +# The "hello" command is also deemed sensitive when attempting speculative +# authentication. +def _is_speculative_authenticate(command_name: str, doc: Mapping[str, Any]) -> bool: + if ( + command_name.lower() in ("hello", HelloCompat.LEGACY_CMD) + and "speculativeAuthenticate" in doc + ): + return True + return False + + +class _CommandEvent: """Base class for command events.""" - __slots__ = ("__cmd_name", "__rqst_id", "__conn_id", "__op_id") - - def __init__(self, command_name, request_id, connection_id, operation_id): + __slots__ = ("__cmd_name", "__rqst_id", "__conn_id", "__op_id", "__service_id", "__db") + + def __init__( + self, + command_name: str, + request_id: int, + connection_id: _Address, + operation_id: Optional[int], + service_id: Optional[ObjectId] = None, + database_name: str = "", + ) -> None: self.__cmd_name = command_name self.__rqst_id = request_id self.__conn_id = connection_id self.__op_id = operation_id + self.__service_id = service_id + self.__db = database_name @property - def command_name(self): + def command_name(self) -> str: """The command name.""" return self.__cmd_name @property - def request_id(self): + def request_id(self) -> int: """The request id for this operation.""" return self.__rqst_id @property - def connection_id(self): + def connection_id(self) -> _Address: """The address (host, port) of the server this command was sent to.""" return self.__conn_id @property - def operation_id(self): + def service_id(self) -> Optional[ObjectId]: + """The service_id this command was sent to, or ``None``. + + .. versionadded:: 3.12 + """ + return self.__service_id + + @property + def operation_id(self) -> Optional[int]: """An id for this series of events or None.""" return self.__op_id + @property + def database_name(self) -> str: + """The database_name this command was sent to, or ``""``. + + .. versionadded:: 4.6 + """ + return self.__db + class CommandStartedEvent(_CommandEvent): """Event published when a command starts. @@ -536,30 +626,57 @@ class CommandStartedEvent(_CommandEvent): - `connection_id`: The address (host, port) of the server this command was sent to. - `operation_id`: An optional identifier for a series of related events. + - `service_id`: The service_id this command was sent to, or ``None``. """ - __slots__ = ("__cmd", "__db") - def __init__(self, command, database_name, *args): + __slots__ = ("__cmd",) + + def __init__( + self, + command: _DocumentOut, + database_name: str, + request_id: int, + connection_id: _Address, + operation_id: Optional[int], + service_id: Optional[ObjectId] = None, + ) -> None: if not command: - raise ValueError("%r is not a valid command" % (command,)) + raise ValueError(f"{command!r} is not a valid command") # Command name must be first key. command_name = next(iter(command)) - super(CommandStartedEvent, self).__init__(command_name, *args) - if command_name.lower() in _SENSITIVE_COMMANDS: - self.__cmd = {} + super().__init__( + command_name, + request_id, + connection_id, + operation_id, + service_id=service_id, + database_name=database_name, + ) + cmd_name = command_name.lower() + if cmd_name in _SENSITIVE_COMMANDS or _is_speculative_authenticate(cmd_name, command): + self.__cmd: _DocumentOut = {} else: self.__cmd = command - self.__db = database_name @property - def command(self): + def command(self) -> _DocumentOut: """The command document.""" return self.__cmd @property - def database_name(self): + def database_name(self) -> str: """The name of the database this command was run against.""" - return self.__db + return super().database_name + + def __repr__(self) -> str: + return ("<{} {} db: {!r}, command: {!r}, operation_id: {}, service_id: {}>").format( + self.__class__.__name__, + self.connection_id, + self.database_name, + self.command_name, + self.operation_id, + self.service_id, + ) class CommandSucceededEvent(_CommandEvent): @@ -573,29 +690,61 @@ class CommandSucceededEvent(_CommandEvent): - `connection_id`: The address (host, port) of the server this command was sent to. - `operation_id`: An optional identifier for a series of related events. + - `service_id`: The service_id this command was sent to, or ``None``. + - `database_name`: The database this command was sent to, or ``""``. """ + __slots__ = ("__duration_micros", "__reply") - def __init__(self, duration, reply, command_name, - request_id, connection_id, operation_id): - super(CommandSucceededEvent, self).__init__( - command_name, request_id, connection_id, operation_id) + def __init__( + self, + duration: datetime.timedelta, + reply: _DocumentOut, + command_name: str, + request_id: int, + connection_id: _Address, + operation_id: Optional[int], + service_id: Optional[ObjectId] = None, + database_name: str = "", + ) -> None: + super().__init__( + command_name, + request_id, + connection_id, + operation_id, + service_id=service_id, + database_name=database_name, + ) self.__duration_micros = _to_micros(duration) - if command_name.lower() in _SENSITIVE_COMMANDS: - self.__reply = {} + cmd_name = command_name.lower() + if cmd_name in _SENSITIVE_COMMANDS or _is_speculative_authenticate(cmd_name, reply): + self.__reply: _DocumentOut = {} else: self.__reply = reply @property - def duration_micros(self): + def duration_micros(self) -> int: """The duration of this operation in microseconds.""" return self.__duration_micros @property - def reply(self): + def reply(self) -> _DocumentOut: """The server failure document for this operation.""" return self.__reply + def __repr__(self) -> str: + return ( + "<{} {} db: {!r}, command: {!r}, operation_id: {}, duration_micros: {}, service_id: {}>" + ).format( + self.__class__.__name__, + self.connection_id, + self.database_name, + self.command_name, + self.operation_id, + self.duration_micros, + self.service_id, + ) + class CommandFailedEvent(_CommandEvent): """Event published when a command fails. @@ -608,41 +757,77 @@ class CommandFailedEvent(_CommandEvent): - `connection_id`: The address (host, port) of the server this command was sent to. - `operation_id`: An optional identifier for a series of related events. + - `service_id`: The service_id this command was sent to, or ``None``. + - `database_name`: The database this command was sent to, or ``""``. """ + __slots__ = ("__duration_micros", "__failure") - def __init__(self, duration, failure, *args): - super(CommandFailedEvent, self).__init__(*args) + def __init__( + self, + duration: datetime.timedelta, + failure: _DocumentOut, + command_name: str, + request_id: int, + connection_id: _Address, + operation_id: Optional[int], + service_id: Optional[ObjectId] = None, + database_name: str = "", + ) -> None: + super().__init__( + command_name, + request_id, + connection_id, + operation_id, + service_id=service_id, + database_name=database_name, + ) self.__duration_micros = _to_micros(duration) self.__failure = failure @property - def duration_micros(self): + def duration_micros(self) -> int: """The duration of this operation in microseconds.""" return self.__duration_micros @property - def failure(self): + def failure(self) -> _DocumentOut: """The server failure document for this operation.""" return self.__failure - -class _PoolEvent(object): + def __repr__(self) -> str: + return ( + "<{} {} db: {!r}, command: {!r}, operation_id: {}, duration_micros: {}, " + "failure: {!r}, service_id: {}>" + ).format( + self.__class__.__name__, + self.connection_id, + self.database_name, + self.command_name, + self.operation_id, + self.duration_micros, + self.failure, + self.service_id, + ) + + +class _PoolEvent: """Base class for pool events.""" + __slots__ = ("__address",) - def __init__(self, address): + def __init__(self, address: _Address) -> None: self.__address = address @property - def address(self): + def address(self) -> _Address: """The address (host, port) pair of the server the pool is attempting to connect to. """ return self.__address - def __repr__(self): - return '%s(%r)' % (self.__class__.__name__, self.__address) + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.__address!r})" class PoolCreatedEvent(_PoolEvent): @@ -654,21 +839,33 @@ class PoolCreatedEvent(_PoolEvent): .. versionadded:: 3.9 """ + __slots__ = ("__options",) - def __init__(self, address, options): - super(PoolCreatedEvent, self).__init__(address) + def __init__(self, address: _Address, options: dict[str, Any]) -> None: + super().__init__(address) self.__options = options @property - def options(self): - """Any non-default pool options that were set on this Connection Pool. - """ + def options(self) -> dict[str, Any]: + """Any non-default pool options that were set on this Connection Pool.""" return self.__options - def __repr__(self): - return '%s(%r, %r)' % ( - self.__class__.__name__, self.address, self.__options) + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.address!r}, {self.__options!r})" + + +class PoolReadyEvent(_PoolEvent): + """Published when a Connection Pool is marked ready. + + :Parameters: + - `address`: The address (host, port) pair of the server this Pool is + attempting to connect to. + + .. versionadded:: 4.0 + """ + + __slots__ = () class PoolClearedEvent(_PoolEvent): @@ -677,10 +874,29 @@ class PoolClearedEvent(_PoolEvent): :Parameters: - `address`: The address (host, port) pair of the server this Pool is attempting to connect to. + - `service_id`: The service_id this command was sent to, or ``None``. .. versionadded:: 3.9 """ - __slots__ = () + + __slots__ = ("__service_id",) + + def __init__(self, address: _Address, service_id: Optional[ObjectId] = None) -> None: + super().__init__(address) + self.__service_id = service_id + + @property + def service_id(self) -> Optional[ObjectId]: + """Connections with this service_id are cleared. + + When service_id is ``None``, all connections in the pool are cleared. + + .. versionadded:: 3.12 + """ + return self.__service_id + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.address!r}, {self.__service_id!r})" class PoolClosedEvent(_PoolEvent): @@ -692,75 +908,88 @@ class PoolClosedEvent(_PoolEvent): .. versionadded:: 3.9 """ + __slots__ = () -class ConnectionClosedReason(object): +class ConnectionClosedReason: """An enum that defines values for `reason` on a :class:`ConnectionClosedEvent`. .. versionadded:: 3.9 """ - STALE = 'stale' + STALE = "stale" """The pool was cleared, making the connection no longer valid.""" - IDLE = 'idle' + IDLE = "idle" """The connection became stale by being idle for too long (maxIdleTimeMS). """ - ERROR = 'error' + ERROR = "error" """The connection experienced an error, making it no longer valid.""" - POOL_CLOSED = 'poolClosed' + POOL_CLOSED = "poolClosed" """The pool was closed, making the connection no longer valid.""" -class ConnectionCheckOutFailedReason(object): +class ConnectionCheckOutFailedReason: """An enum that defines values for `reason` on a :class:`ConnectionCheckOutFailedEvent`. .. versionadded:: 3.9 """ - TIMEOUT = 'timeout' + TIMEOUT = "timeout" """The connection check out attempt exceeded the specified timeout.""" - POOL_CLOSED = 'poolClosed' + POOL_CLOSED = "poolClosed" """The pool was previously closed, and cannot provide new connections.""" - CONN_ERROR = 'connectionError' + CONN_ERROR = "connectionError" """The connection check out attempt experienced an error while setting up a new connection. """ -class _ConnectionEvent(object): - """Private base class for some connection events.""" - __slots__ = ("__address", "__connection_id") +class _ConnectionEvent: + """Private base class for connection events.""" - def __init__(self, address, connection_id): + __slots__ = ("__address",) + + def __init__(self, address: _Address) -> None: self.__address = address - self.__connection_id = connection_id @property - def address(self): + def address(self) -> _Address: """The address (host, port) pair of the server this connection is attempting to connect to. """ return self.__address + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.__address!r})" + + +class _ConnectionIdEvent(_ConnectionEvent): + """Private base class for connection events with an id.""" + + __slots__ = ("__connection_id",) + + def __init__(self, address: _Address, connection_id: int) -> None: + super().__init__(address) + self.__connection_id = connection_id + @property - def connection_id(self): - """The ID of the Connection.""" + def connection_id(self) -> int: + """The ID of the connection.""" return self.__connection_id - def __repr__(self): - return '%s(%r, %r)' % ( - self.__class__.__name__, self.__address, self.__connection_id) + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.address!r}, {self.__connection_id!r})" -class ConnectionCreatedEvent(_ConnectionEvent): +class ConnectionCreatedEvent(_ConnectionIdEvent): """Published when a Connection Pool creates a Connection object. NOTE: This connection is not ready for use until the @@ -773,10 +1002,11 @@ class ConnectionCreatedEvent(_ConnectionEvent): .. versionadded:: 3.9 """ + __slots__ = () -class ConnectionReadyEvent(_ConnectionEvent): +class ConnectionReadyEvent(_ConnectionIdEvent): """Published when a Connection has finished its setup, and is ready to use. :Parameters: @@ -786,10 +1016,11 @@ class ConnectionReadyEvent(_ConnectionEvent): .. versionadded:: 3.9 """ + __slots__ = () -class ConnectionClosedEvent(_ConnectionEvent): +class ConnectionClosedEvent(_ConnectionIdEvent): """Published when a Connection is closed. :Parameters: @@ -800,14 +1031,15 @@ class ConnectionClosedEvent(_ConnectionEvent): .. versionadded:: 3.9 """ + __slots__ = ("__reason",) - def __init__(self, address, connection_id, reason): - super(ConnectionClosedEvent, self).__init__(address, connection_id) + def __init__(self, address: _Address, connection_id: int, reason: str): + super().__init__(address, connection_id) self.__reason = reason @property - def reason(self): + def reason(self) -> str: """A reason explaining why this connection was closed. The reason must be one of the strings from the @@ -815,13 +1047,16 @@ def reason(self): """ return self.__reason - def __repr__(self): - return '%s(%r, %r, %r)' % ( - self.__class__.__name__, self.address, self.connection_id, - self.__reason) + def __repr__(self) -> str: + return "{}({!r}, {!r}, {!r})".format( + self.__class__.__name__, + self.address, + self.connection_id, + self.__reason, + ) -class ConnectionCheckOutStartedEvent(object): +class ConnectionCheckOutStartedEvent(_ConnectionEvent): """Published when the driver starts attempting to check out a connection. :Parameters: @@ -830,23 +1065,11 @@ class ConnectionCheckOutStartedEvent(object): .. versionadded:: 3.9 """ - __slots__ = ("__address",) - def __init__(self, address): - self.__address = address - - @property - def address(self): - """The address (host, port) pair of the server this connection is - attempting to connect to. - """ - return self.__address - - def __repr__(self): - return '%s(%r)' % (self.__class__.__name__, self.__address) + __slots__ = () -class ConnectionCheckOutFailedEvent(object): +class ConnectionCheckOutFailedEvent(_ConnectionEvent): """Published when the driver's attempt to check out a connection fails. :Parameters: @@ -856,21 +1079,15 @@ class ConnectionCheckOutFailedEvent(object): .. versionadded:: 3.9 """ - __slots__ = ("__address", "__reason") - def __init__(self, address, reason): - self.__address = address - self.__reason = reason + __slots__ = ("__reason",) - @property - def address(self): - """The address (host, port) pair of the server this connection is - attempting to connect to. - """ - return self.__address + def __init__(self, address: _Address, reason: str) -> None: + super().__init__(address) + self.__reason = reason @property - def reason(self): + def reason(self) -> str: """A reason explaining why connection check out failed. The reason must be one of the strings from the @@ -878,13 +1095,12 @@ def reason(self): """ return self.__reason - def __repr__(self): - return '%s(%r, %r)' % ( - self.__class__.__name__, self.__address, self.__reason) + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.address!r}, {self.__reason!r})" -class ConnectionCheckedOutEvent(_ConnectionEvent): - """Published when the driver successfully checks out a Connection. +class ConnectionCheckedOutEvent(_ConnectionIdEvent): + """Published when the driver successfully checks out a connection. :Parameters: - `address`: The address (host, port) pair of the server this @@ -893,10 +1109,11 @@ class ConnectionCheckedOutEvent(_ConnectionEvent): .. versionadded:: 3.9 """ + __slots__ = () -class ConnectionCheckedInEvent(_ConnectionEvent): +class ConnectionCheckedInEvent(_ConnectionIdEvent): """Published when the driver checks in a Connection into the Pool. :Parameters: @@ -906,28 +1123,32 @@ class ConnectionCheckedInEvent(_ConnectionEvent): .. versionadded:: 3.9 """ + __slots__ = () -class _ServerEvent(object): +class _ServerEvent: """Base class for server events.""" __slots__ = ("__server_address", "__topology_id") - def __init__(self, server_address, topology_id): + def __init__(self, server_address: _Address, topology_id: ObjectId) -> None: self.__server_address = server_address self.__topology_id = topology_id @property - def server_address(self): + def server_address(self) -> _Address: """The address (host, port) pair of the server""" return self.__server_address @property - def topology_id(self): + def topology_id(self) -> ObjectId: """A unique identifier for the topology this server is a part of.""" return self.__topology_id + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {self.server_address} topology_id: {self.topology_id}>" + class ServerDescriptionChangedEvent(_ServerEvent): """Published when server description changes. @@ -935,25 +1156,40 @@ class ServerDescriptionChangedEvent(_ServerEvent): .. versionadded:: 3.3 """ - __slots__ = ('__previous_description', '__new_description') + __slots__ = ("__previous_description", "__new_description") - def __init__(self, previous_description, new_description, *args): - super(ServerDescriptionChangedEvent, self).__init__(*args) + def __init__( + self, + previous_description: ServerDescription, + new_description: ServerDescription, + *args: Any, + ) -> None: + super().__init__(*args) self.__previous_description = previous_description self.__new_description = new_description @property - def previous_description(self): + def previous_description(self) -> ServerDescription: """The previous - :class:`~pymongo.server_description.ServerDescription`.""" + :class:`~pymongo.server_description.ServerDescription`. + """ return self.__previous_description @property - def new_description(self): + def new_description(self) -> ServerDescription: """The new - :class:`~pymongo.server_description.ServerDescription`.""" + :class:`~pymongo.server_description.ServerDescription`. + """ return self.__new_description + def __repr__(self) -> str: + return "<{} {} changed from: {}, to: {}>".format( + self.__class__.__name__, + self.server_address, + self.previous_description, + self.new_description, + ) + class ServerOpeningEvent(_ServerEvent): """Published when server is initialized. @@ -973,19 +1209,22 @@ class ServerClosedEvent(_ServerEvent): __slots__ = () -class TopologyEvent(object): +class TopologyEvent: """Base class for topology description events.""" - __slots__ = ('__topology_id') + __slots__ = ("__topology_id",) - def __init__(self, topology_id): + def __init__(self, topology_id: ObjectId) -> None: self.__topology_id = topology_id @property - def topology_id(self): + def topology_id(self) -> ObjectId: """A unique identifier for the topology this server is a part of.""" return self.__topology_id + def __repr__(self) -> str: + return f"<{self.__class__.__name__} topology_id: {self.topology_id}>" + class TopologyDescriptionChangedEvent(TopologyEvent): """Published when the topology description changes. @@ -993,25 +1232,40 @@ class TopologyDescriptionChangedEvent(TopologyEvent): .. versionadded:: 3.3 """ - __slots__ = ('__previous_description', '__new_description') + __slots__ = ("__previous_description", "__new_description") - def __init__(self, previous_description, new_description, *args): - super(TopologyDescriptionChangedEvent, self).__init__(*args) + def __init__( + self, + previous_description: TopologyDescription, + new_description: TopologyDescription, + *args: Any, + ) -> None: + super().__init__(*args) self.__previous_description = previous_description self.__new_description = new_description @property - def previous_description(self): + def previous_description(self) -> TopologyDescription: """The previous - :class:`~pymongo.topology_description.TopologyDescription`.""" + :class:`~pymongo.topology_description.TopologyDescription`. + """ return self.__previous_description @property - def new_description(self): + def new_description(self) -> TopologyDescription: """The new - :class:`~pymongo.topology_description.TopologyDescription`.""" + :class:`~pymongo.topology_description.TopologyDescription`. + """ return self.__new_description + def __repr__(self) -> str: + return "<{} topology_id: {} changed from: {}, to: {}>".format( + self.__class__.__name__, + self.topology_id, + self.previous_description, + self.new_description, + ) + class TopologyOpenedEvent(TopologyEvent): """Published when the topology is initialized. @@ -1031,20 +1285,33 @@ class TopologyClosedEvent(TopologyEvent): __slots__ = () -class _ServerHeartbeatEvent(object): +class _ServerHeartbeatEvent: """Base class for server heartbeat events.""" - __slots__ = ('__connection_id') + __slots__ = ("__connection_id", "__awaited") - def __init__(self, connection_id): + def __init__(self, connection_id: _Address, awaited: bool = False) -> None: self.__connection_id = connection_id + self.__awaited = awaited @property - def connection_id(self): + def connection_id(self) -> _Address: """The address (host, port) of the server this heartbeat was sent - to.""" + to. + """ return self.__connection_id + @property + def awaited(self) -> bool: + """Whether the heartbeat was issued as an awaitable hello command. + + .. versionadded:: 4.6 + """ + return self.__awaited + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {self.connection_id} awaited: {self.awaited}>" + class ServerHeartbeatStartedEvent(_ServerHeartbeatEvent): """Published when a heartbeat is started. @@ -1061,23 +1328,46 @@ class ServerHeartbeatSucceededEvent(_ServerHeartbeatEvent): .. versionadded:: 3.3 """ - __slots__ = ('__duration', '__reply') + __slots__ = ("__duration", "__reply") - def __init__(self, duration, reply, *args): - super(ServerHeartbeatSucceededEvent, self).__init__(*args) + def __init__( + self, duration: float, reply: Hello, connection_id: _Address, awaited: bool = False + ) -> None: + super().__init__(connection_id, awaited) self.__duration = duration self.__reply = reply @property - def duration(self): + def duration(self) -> float: """The duration of this heartbeat in microseconds.""" return self.__duration @property - def reply(self): - """An instance of :class:`~pymongo.ismaster.IsMaster`.""" + def reply(self) -> Hello: + """An instance of :class:`~pymongo.hello.Hello`.""" return self.__reply + @property + def awaited(self) -> bool: + """Whether the heartbeat was awaited. + + If true, then :meth:`duration` reflects the sum of the round trip time + to the server and the time that the server waited before sending a + response. + + .. versionadded:: 3.11 + """ + return super().awaited + + def __repr__(self) -> str: + return "<{} {} duration: {}, awaited: {}, reply: {}>".format( + self.__class__.__name__, + self.connection_id, + self.duration, + self.awaited, + self.reply, + ) + class ServerHeartbeatFailedEvent(_ServerHeartbeatEvent): """Fired when the server heartbeat fails, either with an "ok: 0" @@ -1086,25 +1376,48 @@ class ServerHeartbeatFailedEvent(_ServerHeartbeatEvent): .. versionadded:: 3.3 """ - __slots__ = ('__duration', '__reply') + __slots__ = ("__duration", "__reply") - def __init__(self, duration, reply, *args): - super(ServerHeartbeatFailedEvent, self).__init__(*args) + def __init__( + self, duration: float, reply: Exception, connection_id: _Address, awaited: bool = False + ) -> None: + super().__init__(connection_id, awaited) self.__duration = duration self.__reply = reply @property - def duration(self): + def duration(self) -> float: """The duration of this heartbeat in microseconds.""" return self.__duration @property - def reply(self): + def reply(self) -> Exception: """A subclass of :exc:`Exception`.""" return self.__reply + @property + def awaited(self) -> bool: + """Whether the heartbeat was awaited. + + If true, then :meth:`duration` reflects the sum of the round trip time + to the server and the time that the server waited before sending a + response. + + .. versionadded:: 3.11 + """ + return super().awaited + + def __repr__(self) -> str: + return "<{} {} duration: {}, awaited: {}, reply: {!r}>".format( + self.__class__.__name__, + self.connection_id, + self.duration, + self.awaited, + self.reply, + ) + -class _EventListeners(object): +class _EventListeners: """Configure event listeners for a client instance. Any event listeners registered globally are included by default. @@ -1112,7 +1425,8 @@ class _EventListeners(object): :Parameters: - `listeners`: A list of event listeners. """ - def __init__(self, listeners): + + def __init__(self, listeners: Optional[Sequence[_EventListener]]): self.__command_listeners = _LISTENERS.command_listeners[:] self.__server_listeners = _LISTENERS.server_listeners[:] lst = _LISTENERS.server_heartbeat_listeners @@ -1133,45 +1447,54 @@ def __init__(self, listeners): self.__cmap_listeners.append(lst) self.__enabled_for_commands = bool(self.__command_listeners) self.__enabled_for_server = bool(self.__server_listeners) - self.__enabled_for_server_heartbeat = bool( - self.__server_heartbeat_listeners) + self.__enabled_for_server_heartbeat = bool(self.__server_heartbeat_listeners) self.__enabled_for_topology = bool(self.__topology_listeners) self.__enabled_for_cmap = bool(self.__cmap_listeners) @property - def enabled_for_commands(self): + def enabled_for_commands(self) -> bool: """Are any CommandListener instances registered?""" return self.__enabled_for_commands @property - def enabled_for_server(self): + def enabled_for_server(self) -> bool: """Are any ServerListener instances registered?""" return self.__enabled_for_server @property - def enabled_for_server_heartbeat(self): + def enabled_for_server_heartbeat(self) -> bool: """Are any ServerHeartbeatListener instances registered?""" return self.__enabled_for_server_heartbeat @property - def enabled_for_topology(self): + def enabled_for_topology(self) -> bool: """Are any TopologyListener instances registered?""" return self.__enabled_for_topology @property - def enabled_for_cmap(self): + def enabled_for_cmap(self) -> bool: """Are any ConnectionPoolListener instances registered?""" return self.__enabled_for_cmap - def event_listeners(self): + def event_listeners(self) -> list[_EventListeners]: """List of registered event listeners.""" - return (self.__command_listeners[:], - self.__server_heartbeat_listeners[:], - self.__server_listeners[:], - self.__topology_listeners[:]) - - def publish_command_start(self, command, database_name, - request_id, connection_id, op_id=None): + return ( + self.__command_listeners + + self.__server_heartbeat_listeners + + self.__server_listeners + + self.__topology_listeners + + self.__cmap_listeners + ) + + def publish_command_start( + self, + command: _DocumentOut, + database_name: str, + request_id: int, + connection_id: _Address, + op_id: Optional[int] = None, + service_id: Optional[ObjectId] = None, + ) -> None: """Publish a CommandStartedEvent to all command listeners. :Parameters: @@ -1182,19 +1505,31 @@ def publish_command_start(self, command, database_name, - `connection_id`: The address (host, port) of the server this command was sent to. - `op_id`: The (optional) operation id for this operation. + - `service_id`: The service_id this command was sent to, or ``None``. """ if op_id is None: op_id = request_id event = CommandStartedEvent( - command, database_name, request_id, connection_id, op_id) + command, database_name, request_id, connection_id, op_id, service_id=service_id + ) for subscriber in self.__command_listeners: try: subscriber.started(event) except Exception: _handle_exception() - def publish_command_success(self, duration, reply, command_name, - request_id, connection_id, op_id=None): + def publish_command_success( + self, + duration: timedelta, + reply: _DocumentOut, + command_name: str, + request_id: int, + connection_id: _Address, + op_id: Optional[int] = None, + service_id: Optional[ObjectId] = None, + speculative_hello: bool = False, + database_name: str = "", + ) -> None: """Publish a CommandSucceededEvent to all command listeners. :Parameters: @@ -1205,19 +1540,43 @@ def publish_command_success(self, duration, reply, command_name, - `connection_id`: The address (host, port) of the server this command was sent to. - `op_id`: The (optional) operation id for this operation. + - `service_id`: The service_id this command was sent to, or ``None``. + - `speculative_hello`: Was the command sent with speculative auth? + - `database_name`: The database this command was sent to, or ``""``. """ if op_id is None: op_id = request_id + if speculative_hello: + # Redact entire response when the command started contained + # speculativeAuthenticate. + reply = {} event = CommandSucceededEvent( - duration, reply, command_name, request_id, connection_id, op_id) + duration, + reply, + command_name, + request_id, + connection_id, + op_id, + service_id, + database_name=database_name, + ) for subscriber in self.__command_listeners: try: subscriber.succeeded(event) except Exception: _handle_exception() - def publish_command_failure(self, duration, failure, command_name, - request_id, connection_id, op_id=None): + def publish_command_failure( + self, + duration: timedelta, + failure: _DocumentOut, + command_name: str, + request_id: int, + connection_id: _Address, + op_id: Optional[int] = None, + service_id: Optional[ObjectId] = None, + database_name: str = "", + ) -> None: """Publish a CommandFailedEvent to all command listeners. :Parameters: @@ -1229,33 +1588,45 @@ def publish_command_failure(self, duration, failure, command_name, - `connection_id`: The address (host, port) of the server this command was sent to. - `op_id`: The (optional) operation id for this operation. + - `service_id`: The service_id this command was sent to, or ``None``. + - `database_name`: The database this command was sent to, or ``""``. """ if op_id is None: op_id = request_id event = CommandFailedEvent( - duration, failure, command_name, request_id, connection_id, op_id) + duration, + failure, + command_name, + request_id, + connection_id, + op_id, + service_id=service_id, + database_name=database_name, + ) for subscriber in self.__command_listeners: try: subscriber.failed(event) except Exception: _handle_exception() - def publish_server_heartbeat_started(self, connection_id): + def publish_server_heartbeat_started(self, connection_id: _Address, awaited: bool) -> None: """Publish a ServerHeartbeatStartedEvent to all server heartbeat listeners. :Parameters: - `connection_id`: The address (host, port) pair of the connection. + - `awaited`: True if this heartbeat is part of an awaitable hello command. """ - event = ServerHeartbeatStartedEvent(connection_id) + event = ServerHeartbeatStartedEvent(connection_id, awaited) for subscriber in self.__server_heartbeat_listeners: try: subscriber.started(event) except Exception: _handle_exception() - def publish_server_heartbeat_succeeded(self, connection_id, duration, - reply): + def publish_server_heartbeat_succeeded( + self, connection_id: _Address, duration: float, reply: Hello, awaited: bool + ) -> None: """Publish a ServerHeartbeatSucceededEvent to all server heartbeat listeners. @@ -1264,15 +1635,18 @@ def publish_server_heartbeat_succeeded(self, connection_id, duration, - `duration`: The execution time of the event in the highest possible resolution for the platform. - `reply`: The command reply. - """ - event = ServerHeartbeatSucceededEvent(duration, reply, connection_id) + - `awaited`: True if the response was awaited. + """ + event = ServerHeartbeatSucceededEvent(duration, reply, connection_id, awaited) for subscriber in self.__server_heartbeat_listeners: try: subscriber.succeeded(event) except Exception: _handle_exception() - def publish_server_heartbeat_failed(self, connection_id, duration, reply): + def publish_server_heartbeat_failed( + self, connection_id: _Address, duration: float, reply: Exception, awaited: bool + ) -> None: """Publish a ServerHeartbeatFailedEvent to all server heartbeat listeners. @@ -1281,15 +1655,16 @@ def publish_server_heartbeat_failed(self, connection_id, duration, reply): - `duration`: The execution time of the event in the highest possible resolution for the platform. - `reply`: The command reply. - """ - event = ServerHeartbeatFailedEvent(duration, reply, connection_id) + - `awaited`: True if the response was awaited. + """ + event = ServerHeartbeatFailedEvent(duration, reply, connection_id, awaited) for subscriber in self.__server_heartbeat_listeners: try: subscriber.failed(event) except Exception: _handle_exception() - def publish_server_opened(self, server_address, topology_id): + def publish_server_opened(self, server_address: _Address, topology_id: ObjectId) -> None: """Publish a ServerOpeningEvent to all server listeners. :Parameters: @@ -1304,7 +1679,7 @@ def publish_server_opened(self, server_address, topology_id): except Exception: _handle_exception() - def publish_server_closed(self, server_address, topology_id): + def publish_server_closed(self, server_address: _Address, topology_id: ObjectId) -> None: """Publish a ServerClosedEvent to all server listeners. :Parameters: @@ -1319,9 +1694,13 @@ def publish_server_closed(self, server_address, topology_id): except Exception: _handle_exception() - def publish_server_description_changed(self, previous_description, - new_description, server_address, - topology_id): + def publish_server_description_changed( + self, + previous_description: ServerDescription, + new_description: ServerDescription, + server_address: _Address, + topology_id: ObjectId, + ) -> None: """Publish a ServerDescriptionChangedEvent to all server listeners. :Parameters: @@ -1331,16 +1710,16 @@ def publish_server_description_changed(self, previous_description, - `topology_id`: A unique identifier for the topology this server is a part of. """ - event = ServerDescriptionChangedEvent(previous_description, - new_description, server_address, - topology_id) + event = ServerDescriptionChangedEvent( + previous_description, new_description, server_address, topology_id + ) for subscriber in self.__server_listeners: try: subscriber.description_changed(event) except Exception: _handle_exception() - def publish_topology_opened(self, topology_id): + def publish_topology_opened(self, topology_id: ObjectId) -> None: """Publish a TopologyOpenedEvent to all topology listeners. :Parameters: @@ -1354,7 +1733,7 @@ def publish_topology_opened(self, topology_id): except Exception: _handle_exception() - def publish_topology_closed(self, topology_id): + def publish_topology_closed(self, topology_id: ObjectId) -> None: """Publish a TopologyClosedEvent to all topology listeners. :Parameters: @@ -1368,8 +1747,12 @@ def publish_topology_closed(self, topology_id): except Exception: _handle_exception() - def publish_topology_description_changed(self, previous_description, - new_description, topology_id): + def publish_topology_description_changed( + self, + previous_description: TopologyDescription, + new_description: TopologyDescription, + topology_id: ObjectId, + ) -> None: """Publish a TopologyDescriptionChangedEvent to all topology listeners. :Parameters: @@ -1378,17 +1761,15 @@ def publish_topology_description_changed(self, previous_description, - `topology_id`: A unique identifier for the topology this server is a part of. """ - event = TopologyDescriptionChangedEvent(previous_description, - new_description, topology_id) + event = TopologyDescriptionChangedEvent(previous_description, new_description, topology_id) for subscriber in self.__topology_listeners: try: subscriber.description_changed(event) except Exception: _handle_exception() - def publish_pool_created(self, address, options): - """Publish a :class:`PoolCreatedEvent` to all pool listeners. - """ + def publish_pool_created(self, address: _Address, options: dict[str, Any]) -> None: + """Publish a :class:`PoolCreatedEvent` to all pool listeners.""" event = PoolCreatedEvent(address, options) for subscriber in self.__cmap_listeners: try: @@ -1396,19 +1777,26 @@ def publish_pool_created(self, address, options): except Exception: _handle_exception() - def publish_pool_cleared(self, address): - """Publish a :class:`PoolClearedEvent` to all pool listeners. - """ - event = PoolClearedEvent(address) + def publish_pool_ready(self, address: _Address) -> None: + """Publish a :class:`PoolReadyEvent` to all pool listeners.""" + event = PoolReadyEvent(address) + for subscriber in self.__cmap_listeners: + try: + subscriber.pool_ready(event) + except Exception: + _handle_exception() + + def publish_pool_cleared(self, address: _Address, service_id: Optional[ObjectId]) -> None: + """Publish a :class:`PoolClearedEvent` to all pool listeners.""" + event = PoolClearedEvent(address, service_id) for subscriber in self.__cmap_listeners: try: subscriber.pool_cleared(event) except Exception: _handle_exception() - def publish_pool_closed(self, address): - """Publish a :class:`PoolClosedEvent` to all pool listeners. - """ + def publish_pool_closed(self, address: _Address) -> None: + """Publish a :class:`PoolClosedEvent` to all pool listeners.""" event = PoolClosedEvent(address) for subscriber in self.__cmap_listeners: try: @@ -1416,7 +1804,7 @@ def publish_pool_closed(self, address): except Exception: _handle_exception() - def publish_connection_created(self, address, connection_id): + def publish_connection_created(self, address: _Address, connection_id: int) -> None: """Publish a :class:`ConnectionCreatedEvent` to all connection listeners. """ @@ -1427,9 +1815,8 @@ def publish_connection_created(self, address, connection_id): except Exception: _handle_exception() - def publish_connection_ready(self, address, connection_id): - """Publish a :class:`ConnectionReadyEvent` to all connection listeners. - """ + def publish_connection_ready(self, address: _Address, connection_id: int) -> None: + """Publish a :class:`ConnectionReadyEvent` to all connection listeners.""" event = ConnectionReadyEvent(address, connection_id) for subscriber in self.__cmap_listeners: try: @@ -1437,7 +1824,7 @@ def publish_connection_ready(self, address, connection_id): except Exception: _handle_exception() - def publish_connection_closed(self, address, connection_id, reason): + def publish_connection_closed(self, address: _Address, connection_id: int, reason: str) -> None: """Publish a :class:`ConnectionClosedEvent` to all connection listeners. """ @@ -1448,7 +1835,7 @@ def publish_connection_closed(self, address, connection_id, reason): except Exception: _handle_exception() - def publish_connection_check_out_started(self, address): + def publish_connection_check_out_started(self, address: _Address) -> None: """Publish a :class:`ConnectionCheckOutStartedEvent` to all connection listeners. """ @@ -1459,18 +1846,18 @@ def publish_connection_check_out_started(self, address): except Exception: _handle_exception() - def publish_connection_check_out_failed(self, address, reason): + def publish_connection_check_out_failed(self, address: _Address, reason: str) -> None: """Publish a :class:`ConnectionCheckOutFailedEvent` to all connection listeners. """ event = ConnectionCheckOutFailedEvent(address, reason) for subscriber in self.__cmap_listeners: try: - subscriber.connection_check_out_started(event) + subscriber.connection_check_out_failed(event) except Exception: _handle_exception() - def publish_connection_checked_out(self, address, connection_id): + def publish_connection_checked_out(self, address: _Address, connection_id: int) -> None: """Publish a :class:`ConnectionCheckedOutEvent` to all connection listeners. """ @@ -1481,7 +1868,7 @@ def publish_connection_checked_out(self, address, connection_id): except Exception: _handle_exception() - def publish_connection_checked_in(self, address, connection_id): + def publish_connection_checked_in(self, address: _Address, connection_id: int) -> None: """Publish a :class:`ConnectionCheckedInEvent` to all connection listeners. """ diff --git a/pymongo/monotonic.py b/pymongo/monotonic.py deleted file mode 100644 index 3be25b8b17..0000000000 --- a/pymongo/monotonic.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2014-2015 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Time. Monotonic if possible. -""" - -from __future__ import absolute_import - -__all__ = ['time'] - -try: - # Patches standard time module. - # From https://pypi.python.org/pypi/Monotime. - import monotime -except ImportError: - pass - -try: - # From https://pypi.python.org/pypi/monotonic. - from monotonic import monotonic as time -except ImportError: - try: - # Monotime or Python 3. - from time import monotonic as time - except ImportError: - # Not monotonic. - from time import time diff --git a/pymongo/network.py b/pymongo/network.py index 0996180f5f..fb4388121e 100644 --- a/pymongo/network.py +++ b/pymongo/network.py @@ -13,61 +13,83 @@ # limitations under the License. """Internal network layer helper methods.""" +from __future__ import annotations import datetime import errno -import select +import socket import struct -import threading - -_HAS_POLL = True -_EVENT_MASK = 0 -try: - from select import poll - _EVENT_MASK = ( - select.POLLIN | select.POLLPRI | select.POLLERR | select.POLLHUP) -except ImportError: - _HAS_POLL = False - -try: - from select import error as _SELECT_ERROR -except ImportError: - _SELECT_ERROR = OSError +import time +from typing import ( + TYPE_CHECKING, + Any, + Mapping, + MutableMapping, + Optional, + Sequence, + Union, + cast, +) from bson import _decode_all_selective -from bson.py3compat import PY3 - -from pymongo import helpers, message +from pymongo import _csot, helpers, message, ssl_support from pymongo.common import MAX_MESSAGE_SIZE -from pymongo.compression_support import decompress, _NO_COMPRESSION -from pymongo.errors import (AutoReconnect, - NotMasterError, - OperationFailure, - ProtocolError) -from pymongo.message import _UNPACK_REPLY - +from pymongo.compression_support import _NO_COMPRESSION, decompress +from pymongo.errors import ( + NotPrimaryError, + OperationFailure, + ProtocolError, + _OperationCancelled, +) +from pymongo.message import _UNPACK_REPLY, _OpMsg, _OpReply +from pymongo.monitoring import _is_speculative_authenticate +from pymongo.socket_checker import _errno_from_exception + +if TYPE_CHECKING: + from bson import CodecOptions + from pymongo.client_session import ClientSession + from pymongo.compression_support import SnappyContext, ZlibContext, ZstdContext + from pymongo.mongo_client import MongoClient + from pymongo.monitoring import _EventListeners + from pymongo.pool import Connection + from pymongo.read_concern import ReadConcern + from pymongo.read_preferences import _ServerMode + from pymongo.typings import _Address, _CollationIn, _DocumentOut, _DocumentType + from pymongo.write_concern import WriteConcern _UNPACK_HEADER = struct.Struct(" _DocumentType: """Execute a command over the socket, or raise socket.error. :Parameters: - - `sock`: a raw socket instance + - `conn`: a Connection instance - `dbname`: name of the database on which to run the command - `spec`: a command document as an ordered dict type, eg SON. - - `slave_ok`: whether to set the SlaveOkay wire protocol bit - `is_mongos`: are we connected to a mongos? - `read_preference`: a read preference - `codec_options`: a CodecOptions instance @@ -75,8 +97,7 @@ def command(sock, dbname, spec, slave_ok, is_mongos, - `client`: optional MongoClient instance for updating $clusterTime. - `check`: raise OperationFailure if there are errors - `allowable_errors`: errors to ignore if `check` is True - - `address`: the (host, port) of `sock` - - `check_keys`: if True, check `spec` for invalid keys + - `address`: the (host, port) of `conn` - `listeners`: An instance of :class:`~pymongo.monitoring.EventListeners` - `max_bson_size`: The maximum encoded bson size for this server - `read_concern`: The read concern for this command. @@ -89,232 +110,243 @@ def command(sock, dbname, spec, slave_ok, is_mongos, - `user_fields` (optional): Response fields that should be decoded using the TypeDecoders from codec_options, passed to bson._decode_all_selective. + - `exhaust_allowed`: True if we should enable OP_MSG exhaustAllowed. """ name = next(iter(spec)) - ns = dbname + '.$cmd' - flags = 4 if slave_ok else 0 + ns = dbname + ".$cmd" + speculative_hello = False # Publish the original command document, perhaps with lsid and $clusterTime. orig = spec if is_mongos and not use_op_msg: + assert read_preference is not None spec = message._maybe_add_read_preference(spec, read_preference) if read_concern and not (session and session.in_transaction): if read_concern.level: - spec['readConcern'] = read_concern.document - if (session and session.options.causal_consistency - and session.operation_time is not None): - spec.setdefault( - 'readConcern', {})['afterClusterTime'] = session.operation_time + spec["readConcern"] = read_concern.document + if session: + session._update_read_concern(spec, conn) if collation is not None: - spec['collation'] = collation + spec["collation"] = collation publish = listeners is not None and listeners.enabled_for_commands if publish: start = datetime.datetime.now() + speculative_hello = _is_speculative_authenticate(name, spec) if compression_ctx and name.lower() in _NO_COMPRESSION: compression_ctx = None - if (client and client._encrypter and - not client._encrypter._bypass_auto_encryption): - spec = orig = client._encrypter.encrypt( - dbname, spec, check_keys, codec_options) - # We already checked the keys, no need to do it again. - check_keys = False + if client and client._encrypter and not client._encrypter._bypass_auto_encryption: + spec = orig = client._encrypter.encrypt(dbname, spec, codec_options) + + # Support CSOT + if client: + conn.apply_timeout(client, spec) + _csot.apply_write_concern(spec, write_concern) if use_op_msg: - flags = 2 if unacknowledged else 0 + flags = _OpMsg.MORE_TO_COME if unacknowledged else 0 + flags |= _OpMsg.EXHAUST_ALLOWED if exhaust_allowed else 0 request_id, msg, size, max_doc_size = message._op_msg( - flags, spec, dbname, read_preference, slave_ok, check_keys, - codec_options, ctx=compression_ctx) + flags, spec, dbname, read_preference, codec_options, ctx=compression_ctx + ) # If this is an unacknowledged write then make sure the encoded doc(s) # are small enough, otherwise rely on the server to return an error. - if (unacknowledged and max_bson_size is not None and - max_doc_size > max_bson_size): + if unacknowledged and max_bson_size is not None and max_doc_size > max_bson_size: message._raise_document_too_large(name, size, max_bson_size) else: - request_id, msg, size = message.query( - flags, ns, 0, -1, spec, None, codec_options, check_keys, - compression_ctx) + request_id, msg, size = message._query( + 0, ns, 0, -1, spec, None, codec_options, compression_ctx + ) - if (max_bson_size is not None - and size > max_bson_size + message._COMMAND_OVERHEAD): - message._raise_document_too_large( - name, size, max_bson_size + message._COMMAND_OVERHEAD) + if max_bson_size is not None and size > max_bson_size + message._COMMAND_OVERHEAD: + message._raise_document_too_large(name, size, max_bson_size + message._COMMAND_OVERHEAD) if publish: encoding_duration = datetime.datetime.now() - start - listeners.publish_command_start(orig, dbname, request_id, address) + assert listeners is not None + assert address is not None + listeners.publish_command_start( + orig, dbname, request_id, address, service_id=conn.service_id + ) start = datetime.datetime.now() try: - sock.sendall(msg) + conn.conn.sendall(msg) if use_op_msg and unacknowledged: # Unacknowledged, fake a successful command response. reply = None - response_doc = {"ok": 1} + response_doc: _DocumentOut = {"ok": 1} else: - reply = receive_message(sock, request_id) + reply = receive_message(conn, request_id) + conn.more_to_come = reply.more_to_come unpacked_docs = reply.unpack_response( - codec_options=codec_options, user_fields=user_fields) + codec_options=codec_options, user_fields=user_fields + ) response_doc = unpacked_docs[0] if client: client._process_response(response_doc, session) if check: helpers._check_command_response( - response_doc, None, allowable_errors, - parse_write_concern_error=parse_write_concern_error) + response_doc, + conn.max_wire_version, + allowable_errors, + parse_write_concern_error=parse_write_concern_error, + ) except Exception as exc: if publish: duration = (datetime.datetime.now() - start) + encoding_duration - if isinstance(exc, (NotMasterError, OperationFailure)): - failure = exc.details + if isinstance(exc, (NotPrimaryError, OperationFailure)): + failure: _DocumentOut = exc.details # type: ignore[assignment] else: failure = message._convert_exception(exc) + assert listeners is not None + assert address is not None listeners.publish_command_failure( - duration, failure, name, request_id, address) + duration, + failure, + name, + request_id, + address, + service_id=conn.service_id, + database_name=dbname, + ) raise if publish: duration = (datetime.datetime.now() - start) + encoding_duration + assert listeners is not None + assert address is not None listeners.publish_command_success( - duration, response_doc, name, request_id, address) + duration, + response_doc, + name, + request_id, + address, + service_id=conn.service_id, + speculative_hello=speculative_hello, + database_name=dbname, + ) if client and client._encrypter and reply: decrypted = client._encrypter.decrypt(reply.raw_command_response()) - response_doc = _decode_all_selective(decrypted, codec_options, - user_fields)[0] + response_doc = cast( + "_DocumentOut", _decode_all_selective(decrypted, codec_options, user_fields)[0] + ) + + return response_doc # type: ignore[return-value] - return response_doc _UNPACK_COMPRESSION_HEADER = struct.Struct(" Union[_OpReply, _OpMsg]: """Receive a raw BSON message or raise socket.error.""" + if _csot.get_timeout(): + deadline = _csot.get_deadline() + else: + timeout = conn.conn.gettimeout() + if timeout: + deadline = time.monotonic() + timeout + else: + deadline = None # Ignore the response's request id. - length, _, response_to, op_code = _UNPACK_HEADER( - _receive_data_on_socket(sock, 16)) + length, _, response_to, op_code = _UNPACK_HEADER(_receive_data_on_socket(conn, 16, deadline)) # No request_id for exhaust cursor "getMore". if request_id is not None: if request_id != response_to: - raise ProtocolError("Got response id %r but expected " - "%r" % (response_to, request_id)) + raise ProtocolError(f"Got response id {response_to!r} but expected {request_id!r}") if length <= 16: - raise ProtocolError("Message length (%r) not longer than standard " - "message header size (16)" % (length,)) + raise ProtocolError( + f"Message length ({length!r}) not longer than standard message header size (16)" + ) if length > max_message_size: - raise ProtocolError("Message length (%r) is larger than server max " - "message size (%r)" % (length, max_message_size)) + raise ProtocolError( + f"Message length ({length!r}) is larger than server max " + f"message size ({max_message_size!r})" + ) if op_code == 2012: op_code, _, compressor_id = _UNPACK_COMPRESSION_HEADER( - _receive_data_on_socket(sock, 9)) - data = decompress( - _receive_data_on_socket(sock, length - 25), compressor_id) + _receive_data_on_socket(conn, 9, deadline) + ) + data = decompress(_receive_data_on_socket(conn, length - 25, deadline), compressor_id) else: - data = _receive_data_on_socket(sock, length - 16) + data = _receive_data_on_socket(conn, length - 16, deadline) try: unpack_reply = _UNPACK_REPLY[op_code] except KeyError: - raise ProtocolError("Got opcode %r but expected " - "%r" % (op_code, _UNPACK_REPLY.keys())) + raise ProtocolError( + f"Got opcode {op_code!r} but expected {_UNPACK_REPLY.keys()!r}" + ) from None return unpack_reply(data) -# memoryview was introduced in Python 2.7 but we only use it on Python 3 -# because before 2.7.4 the struct module did not support memoryview: -# https://bugs.python.org/issue10212. -# In Jython, using slice assignment on a memoryview results in a -# NullPointerException. -if not PY3: - def _receive_data_on_socket(sock, length): - buf = bytearray(length) - i = 0 - while length: - try: - chunk = sock.recv(length) - except (IOError, OSError) as exc: - if _errno_from_exception(exc) == errno.EINTR: - continue - raise - if chunk == b"": - raise AutoReconnect("connection closed") - - buf[i:i + len(chunk)] = chunk - i += len(chunk) - length -= len(chunk) - - return bytes(buf) -else: - def _receive_data_on_socket(sock, length): - buf = bytearray(length) - mv = memoryview(buf) - bytes_read = 0 - while bytes_read < length: - try: - chunk_length = sock.recv_into(mv[bytes_read:]) - except (IOError, OSError) as exc: - if _errno_from_exception(exc) == errno.EINTR: - continue - raise - if chunk_length == 0: - raise AutoReconnect("connection closed") - - bytes_read += chunk_length - - return mv - - -def _errno_from_exception(exc): - if hasattr(exc, 'errno'): - return exc.errno - elif exc.args: - return exc.args[0] - else: - return None - - -class SocketChecker(object): +_POLL_TIMEOUT = 0.5 - def __init__(self): - if _HAS_POLL: - self._lock = threading.Lock() - self._poller = poll() - else: - self._lock = None - self._poller = None - def socket_closed(self, sock): - """Return True if we know socket has been closed, False otherwise. - """ +def wait_for_read(conn: Connection, deadline: Optional[float]) -> None: + """Block until at least one byte is read, or a timeout, or a cancel.""" + context = conn.cancel_context + # Only Monitor connections can be cancelled. + if context: + sock = conn.conn + timed_out = False while True: - try: - if self._poller: - with self._lock: - self._poller.register(sock, _EVENT_MASK) - try: - rd = self._poller.poll(0) - finally: - self._poller.unregister(sock) + # SSLSocket can have buffered data which won't be caught by select. + if hasattr(sock, "pending") and sock.pending() > 0: + readable = True + else: + # Wait up to 500ms for the socket to become readable and then + # check for cancellation. + if deadline: + remaining = deadline - time.monotonic() + # When the timeout has expired perform one final check to + # see if the socket is readable. This helps avoid spurious + # timeouts on AWS Lambda and other FaaS environments. + if remaining <= 0: + timed_out = True + timeout = max(min(remaining, _POLL_TIMEOUT), 0) else: - rd, _, _ = select.select([sock], [], [], 0) - except (RuntimeError, KeyError): - # RuntimeError is raised during a concurrent poll. KeyError - # is raised by unregister if the socket is not in the poller. - # These errors should not be possible since we protect the - # poller with a mutex. - raise - except ValueError: - # ValueError is raised by register/unregister/select if the - # socket file descriptor is negative or outside the range for - # select (> 1023). - return True - except (_SELECT_ERROR, IOError) as exc: - if _errno_from_exception(exc) in (errno.EINTR, errno.EAGAIN): - continue - return True - except Exception: - # Any other exceptions should be attributed to a closed - # or invalid socket. - return True - return len(rd) > 0 + timeout = _POLL_TIMEOUT + readable = conn.socket_checker.select(sock, read=True, timeout=timeout) + if context.cancelled: + raise _OperationCancelled("hello cancelled") + if readable: + return + if timed_out: + raise socket.timeout("timed out") + + +# Errors raised by sockets (and TLS sockets) when in non-blocking mode. +BLOCKING_IO_ERRORS = (BlockingIOError, *ssl_support.BLOCKING_IO_ERRORS) + + +def _receive_data_on_socket(conn: Connection, length: int, deadline: Optional[float]) -> memoryview: + buf = bytearray(length) + mv = memoryview(buf) + bytes_read = 0 + while bytes_read < length: + try: + wait_for_read(conn, deadline) + # CSOT: Update timeout. When the timeout has expired perform one + # final non-blocking recv. This helps avoid spurious timeouts when + # the response is actually already buffered on the client. + if _csot.get_timeout() and deadline is not None: + conn.set_conn_timeout(max(deadline - time.monotonic(), 0)) + chunk_length = conn.conn.recv_into(mv[bytes_read:]) + except BLOCKING_IO_ERRORS: + raise socket.timeout("timed out") from None + except OSError as exc: + if _errno_from_exception(exc) == errno.EINTR: + continue + raise + if chunk_length == 0: + raise OSError("connection closed") + + bytes_read += chunk_length + + return mv diff --git a/pymongo/ocsp_cache.py b/pymongo/ocsp_cache.py new file mode 100644 index 0000000000..742579312f --- /dev/null +++ b/pymongo/ocsp_cache.py @@ -0,0 +1,108 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities for caching OCSP responses.""" + +from __future__ import annotations + +from collections import namedtuple +from datetime import datetime as _datetime +from datetime import timezone +from typing import TYPE_CHECKING, Any + +from pymongo.lock import _create_lock + +if TYPE_CHECKING: + from cryptography.x509.ocsp import OCSPRequest, OCSPResponse + + +class _OCSPCache: + """A cache for OCSP responses.""" + + CACHE_KEY_TYPE = namedtuple( # type: ignore + "OcspResponseCacheKey", + ["hash_algorithm", "issuer_name_hash", "issuer_key_hash", "serial_number"], + ) + + def __init__(self) -> None: + self._data: dict[Any, OCSPResponse] = {} + # Hold this lock when accessing _data. + self._lock = _create_lock() + + def _get_cache_key(self, ocsp_request: OCSPRequest) -> CACHE_KEY_TYPE: + return self.CACHE_KEY_TYPE( + hash_algorithm=ocsp_request.hash_algorithm.name.lower(), + issuer_name_hash=ocsp_request.issuer_name_hash, + issuer_key_hash=ocsp_request.issuer_key_hash, + serial_number=ocsp_request.serial_number, + ) + + def __setitem__(self, key: OCSPRequest, value: OCSPResponse) -> None: + """Add/update a cache entry. + + 'key' is of type cryptography.x509.ocsp.OCSPRequest + 'value' is of type cryptography.x509.ocsp.OCSPResponse + + Validity of the OCSP response must be checked by caller. + """ + with self._lock: + cache_key = self._get_cache_key(key) + + # As per the OCSP protocol, if the response's nextUpdate field is + # not set, the responder is indicating that newer revocation + # information is available all the time. + if value.next_update is None: + self._data.pop(cache_key, None) + return + + # Do nothing if the response is invalid. + if not ( + value.this_update + <= _datetime.now(tz=timezone.utc).replace(tzinfo=None) + < value.next_update + ): + return + + # Cache new response OR update cached response if new response + # has longer validity. + cached_value = self._data.get(cache_key, None) + if cached_value is None or ( + cached_value.next_update is not None + and cached_value.next_update < value.next_update + ): + self._data[cache_key] = value + + def __getitem__(self, item: OCSPRequest) -> OCSPResponse: + """Get a cache entry if it exists. + + 'item' is of type cryptography.x509.ocsp.OCSPRequest + + Raises KeyError if the item is not in the cache. + """ + with self._lock: + cache_key = self._get_cache_key(item) + value = self._data[cache_key] + + # Return cached response if it is still valid. + assert value.this_update is not None + assert value.next_update is not None + if ( + value.this_update + <= _datetime.now(tz=timezone.utc).replace(tzinfo=None) + < value.next_update + ): + return value + + self._data.pop(cache_key, None) + raise KeyError(cache_key) diff --git a/pymongo/ocsp_support.py b/pymongo/ocsp_support.py new file mode 100644 index 0000000000..1bda3b4d71 --- /dev/null +++ b/pymongo/ocsp_support.py @@ -0,0 +1,432 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Support for requesting and verifying OCSP responses.""" +from __future__ import annotations + +import logging as _logging +import re as _re +from datetime import datetime as _datetime +from datetime import timezone +from typing import TYPE_CHECKING, Iterable, Optional, Type, Union + +from cryptography.exceptions import InvalidSignature as _InvalidSignature +from cryptography.hazmat.backends import default_backend as _default_backend +from cryptography.hazmat.primitives.asymmetric.dsa import DSAPublicKey as _DSAPublicKey +from cryptography.hazmat.primitives.asymmetric.ec import ECDSA as _ECDSA +from cryptography.hazmat.primitives.asymmetric.ec import ( + EllipticCurvePublicKey as _EllipticCurvePublicKey, +) +from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15 as _PKCS1v15 +from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicKey as _RSAPublicKey +from cryptography.hazmat.primitives.asymmetric.x448 import ( + X448PublicKey as _X448PublicKey, +) +from cryptography.hazmat.primitives.asymmetric.x25519 import ( + X25519PublicKey as _X25519PublicKey, +) +from cryptography.hazmat.primitives.hashes import SHA1 as _SHA1 +from cryptography.hazmat.primitives.hashes import Hash as _Hash +from cryptography.hazmat.primitives.serialization import Encoding as _Encoding +from cryptography.hazmat.primitives.serialization import PublicFormat as _PublicFormat +from cryptography.x509 import AuthorityInformationAccess as _AuthorityInformationAccess +from cryptography.x509 import ExtendedKeyUsage as _ExtendedKeyUsage +from cryptography.x509 import ExtensionNotFound as _ExtensionNotFound +from cryptography.x509 import TLSFeature as _TLSFeature +from cryptography.x509 import TLSFeatureType as _TLSFeatureType +from cryptography.x509 import load_pem_x509_certificate as _load_pem_x509_certificate +from cryptography.x509.ocsp import OCSPCertStatus as _OCSPCertStatus +from cryptography.x509.ocsp import OCSPRequestBuilder as _OCSPRequestBuilder +from cryptography.x509.ocsp import OCSPResponseStatus as _OCSPResponseStatus +from cryptography.x509.ocsp import load_der_ocsp_response as _load_der_ocsp_response +from cryptography.x509.oid import ( + AuthorityInformationAccessOID as _AuthorityInformationAccessOID, +) +from cryptography.x509.oid import ExtendedKeyUsageOID as _ExtendedKeyUsageOID +from requests import post as _post +from requests.exceptions import RequestException as _RequestException + +from pymongo import _csot + +if TYPE_CHECKING: + from cryptography.hazmat.primitives.asymmetric import ( + dsa, + ec, + ed448, + ed25519, + rsa, + x448, + x25519, + ) + from cryptography.hazmat.primitives.asymmetric.utils import Prehashed + from cryptography.hazmat.primitives.hashes import HashAlgorithm + from cryptography.x509 import Certificate, Name + from cryptography.x509.extensions import Extension, ExtensionTypeVar + from cryptography.x509.ocsp import OCSPRequest, OCSPResponse + from OpenSSL.SSL import Connection + + from pymongo.ocsp_cache import _OCSPCache + from pymongo.pyopenssl_context import _CallbackData + + CertificateIssuerPublicKeyTypes = Union[ + dsa.DSAPublicKey, + rsa.RSAPublicKey, + ec.EllipticCurvePublicKey, + ed25519.Ed25519PublicKey, + ed448.Ed448PublicKey, + x25519.X25519PublicKey, + x448.X448PublicKey, + ] + +# Note: the functions in this module generally return 1 or 0. The reason +# is simple. The entry point, ocsp_callback, is registered as a callback +# with OpenSSL through PyOpenSSL. The callback must return 1 (success) or +# 0 (failure). + +_LOGGER = _logging.getLogger(__name__) + +_CERT_REGEX = _re.compile( + b"-----BEGIN CERTIFICATE[^\r\n]+.+?-----END CERTIFICATE[^\r\n]+", _re.DOTALL +) + + +def _load_trusted_ca_certs(cafile: str) -> list[Certificate]: + """Parse the tlsCAFile into a list of certificates.""" + with open(cafile, "rb") as f: + data = f.read() + + # Load all the certs in the file. + trusted_ca_certs = [] + backend = _default_backend() + for cert_data in _re.findall(_CERT_REGEX, data): + trusted_ca_certs.append(_load_pem_x509_certificate(cert_data, backend)) + return trusted_ca_certs + + +def _get_issuer_cert( + cert: Certificate, chain: Iterable[Certificate], trusted_ca_certs: Optional[list[Certificate]] +) -> Optional[Certificate]: + issuer_name = cert.issuer + for candidate in chain: + if candidate.subject == issuer_name: + return candidate + + # Depending on the server's TLS library, the peer's cert chain may not + # include the self signed root CA. In this case we check the user + # provided tlsCAFile for the issuer. + # Remove once we use the verified peer cert chain in PYTHON-2147. + if trusted_ca_certs: + for candidate in trusted_ca_certs: + if candidate.subject == issuer_name: + return candidate + return None + + +def _verify_signature( + key: CertificateIssuerPublicKeyTypes, + signature: bytes, + algorithm: Union[Prehashed, HashAlgorithm, None], + data: bytes, +) -> int: + # See cryptography.x509.Certificate.public_key + # for the public key types. + try: + if isinstance(key, _RSAPublicKey): + key.verify(signature, data, _PKCS1v15(), algorithm) # type: ignore[arg-type] + elif isinstance(key, _DSAPublicKey): + key.verify(signature, data, algorithm) # type: ignore[arg-type] + elif isinstance(key, _EllipticCurvePublicKey): + key.verify(signature, data, _ECDSA(algorithm)) # type: ignore[arg-type] + elif isinstance( + key, (_X25519PublicKey, _X448PublicKey) + ): # Curve25519 and Curve448 keys do not require verification + return 1 + else: + key.verify(signature, data) + except _InvalidSignature: + return 0 + return 1 + + +def _get_extension( + cert: Certificate, klass: Type[ExtensionTypeVar] +) -> Optional[Extension[ExtensionTypeVar]]: + try: + return cert.extensions.get_extension_for_class(klass) + except _ExtensionNotFound: + return None + + +def _public_key_hash(cert: Certificate) -> bytes: + public_key = cert.public_key() + # https://tools.ietf.org/html/rfc2560#section-4.2.1 + # "KeyHash ::= OCTET STRING -- SHA-1 hash of responder's public key + # (excluding the tag and length fields)" + # https://stackoverflow.com/a/46309453/600498 + if isinstance(public_key, _RSAPublicKey): + pbytes = public_key.public_bytes(_Encoding.DER, _PublicFormat.PKCS1) + elif isinstance(public_key, _EllipticCurvePublicKey): + pbytes = public_key.public_bytes(_Encoding.X962, _PublicFormat.UncompressedPoint) + else: + pbytes = public_key.public_bytes(_Encoding.DER, _PublicFormat.SubjectPublicKeyInfo) + digest = _Hash(_SHA1(), backend=_default_backend()) # noqa: S303 + digest.update(pbytes) + return digest.finalize() + + +def _get_certs_by_key_hash( + certificates: Iterable[Certificate], issuer: Certificate, responder_key_hash: Optional[bytes] +) -> list[Certificate]: + return [ + cert + for cert in certificates + if _public_key_hash(cert) == responder_key_hash and cert.issuer == issuer.subject + ] + + +def _get_certs_by_name( + certificates: Iterable[Certificate], issuer: Certificate, responder_name: Optional[Name] +) -> list[Certificate]: + return [ + cert + for cert in certificates + if cert.subject == responder_name and cert.issuer == issuer.subject + ] + + +def _verify_response_signature(issuer: Certificate, response: OCSPResponse) -> int: + # Response object will have a responder_name or responder_key_hash + # not both. + name = response.responder_name + rkey_hash = response.responder_key_hash + ikey_hash = response.issuer_key_hash + if name is not None and name == issuer.subject or rkey_hash == ikey_hash: + _LOGGER.debug("Responder is issuer") + # Responder is the issuer + responder_cert = issuer + else: + _LOGGER.debug("Responder is a delegate") + # Responder is a delegate + # https://tools.ietf.org/html/rfc6960#section-2.6 + # RFC6960, Section 3.2, Number 3 + certs = response.certificates + if response.responder_name is not None: + responder_certs = _get_certs_by_name(certs, issuer, name) + _LOGGER.debug("Using responder name") + else: + responder_certs = _get_certs_by_key_hash(certs, issuer, rkey_hash) + _LOGGER.debug("Using key hash") + if not responder_certs: + _LOGGER.debug("No matching or valid responder certs.") + return 0 + # XXX: Can there be more than one? If so, should we try each one + # until we find one that passes signature verification? + responder_cert = responder_certs[0] + + # RFC6960, Section 3.2, Number 4 + ext = _get_extension(responder_cert, _ExtendedKeyUsage) + if not ext or _ExtendedKeyUsageOID.OCSP_SIGNING not in ext.value: + _LOGGER.debug("Delegate not authorized for OCSP signing") + return 0 + if not _verify_signature( + issuer.public_key(), + responder_cert.signature, + responder_cert.signature_hash_algorithm, + responder_cert.tbs_certificate_bytes, + ): + _LOGGER.debug("Delegate signature verification failed") + return 0 + # RFC6960, Section 3.2, Number 2 + ret = _verify_signature( + responder_cert.public_key(), + response.signature, + response.signature_hash_algorithm, + response.tbs_response_bytes, + ) + if not ret: + _LOGGER.debug("Response signature verification failed") + return ret + + +def _build_ocsp_request(cert: Certificate, issuer: Certificate) -> OCSPRequest: + # https://cryptography.io/en/latest/x509/ocsp/#creating-requests + builder = _OCSPRequestBuilder() + builder = builder.add_certificate(cert, issuer, _SHA1()) # noqa: S303 + return builder.build() + + +def _verify_response(issuer: Certificate, response: OCSPResponse) -> int: + _LOGGER.debug("Verifying response") + # RFC6960, Section 3.2, Number 2, 3 and 4 happen here. + res = _verify_response_signature(issuer, response) + if not res: + return 0 + + # Note that we are not using a "tolerance period" as discussed in + # https://tools.ietf.org/rfc/rfc5019.txt? + now = _datetime.now(tz=timezone.utc).replace(tzinfo=None) + # RFC6960, Section 3.2, Number 5 + if response.this_update > now: + _LOGGER.debug("thisUpdate is in the future") + return 0 + # RFC6960, Section 3.2, Number 6 + if response.next_update and response.next_update < now: + _LOGGER.debug("nextUpdate is in the past") + return 0 + return 1 + + +def _get_ocsp_response( + cert: Certificate, issuer: Certificate, uri: Union[str, bytes], ocsp_response_cache: _OCSPCache +) -> Optional[OCSPResponse]: + ocsp_request = _build_ocsp_request(cert, issuer) + try: + ocsp_response = ocsp_response_cache[ocsp_request] + _LOGGER.debug("Using cached OCSP response.") + except KeyError: + # CSOT: use the configured timeout or 5 seconds, whichever is smaller. + # Note that request's timeout works differently and does not imply an absolute + # deadline: https://requests.readthedocs.io/en/stable/user/quickstart/#timeouts + timeout = max(_csot.clamp_remaining(5), 0.001) + try: + response = _post( + uri, + data=ocsp_request.public_bytes(_Encoding.DER), + headers={"Content-Type": "application/ocsp-request"}, + timeout=timeout, + ) + except _RequestException as exc: + _LOGGER.debug("HTTP request failed: %s", exc) + return None + if response.status_code != 200: + _LOGGER.debug("HTTP request returned %d", response.status_code) + return None + ocsp_response = _load_der_ocsp_response(response.content) + _LOGGER.debug("OCSP response status: %r", ocsp_response.response_status) + if ocsp_response.response_status != _OCSPResponseStatus.SUCCESSFUL: + return None + # RFC6960, Section 3.2, Number 1. Only relevant if we need to + # talk to the responder directly. + # Accessing response.serial_number raises if response status is not + # SUCCESSFUL. + if ocsp_response.serial_number != ocsp_request.serial_number: + _LOGGER.debug("Response serial number does not match request") + return None + if not _verify_response(issuer, ocsp_response): + # The response failed verification. + return None + _LOGGER.debug("Caching OCSP response.") + ocsp_response_cache[ocsp_request] = ocsp_response + + return ocsp_response + + +def _ocsp_callback(conn: Connection, ocsp_bytes: bytes, user_data: Optional[_CallbackData]) -> bool: + """Callback for use with OpenSSL.SSL.Context.set_ocsp_client_callback.""" + # always pass in user_data but OpenSSL requires it be optional + assert user_data + pycert = conn.get_peer_certificate() + if pycert is None: + _LOGGER.debug("No peer cert?") + return False + cert = pycert.to_cryptography() + # Use the verified chain when available (pyopenssl>=20.0). + if hasattr(conn, "get_verified_chain"): + pychain = conn.get_verified_chain() + trusted_ca_certs = None + else: + pychain = conn.get_peer_cert_chain() + trusted_ca_certs = user_data.trusted_ca_certs + if not pychain: + _LOGGER.debug("No peer cert chain?") + return False + chain = [cer.to_cryptography() for cer in pychain] + issuer = _get_issuer_cert(cert, chain, trusted_ca_certs) + must_staple = False + # https://tools.ietf.org/html/rfc7633#section-4.2.3.1 + ext_tls = _get_extension(cert, _TLSFeature) + if ext_tls is not None: + for feature in ext_tls.value: + if feature == _TLSFeatureType.status_request: + _LOGGER.debug("Peer presented a must-staple cert") + must_staple = True + break + ocsp_response_cache = user_data.ocsp_response_cache + + # No stapled OCSP response + if ocsp_bytes == b"": + _LOGGER.debug("Peer did not staple an OCSP response") + if must_staple: + _LOGGER.debug("Must-staple cert with no stapled response, hard fail.") + return False + if not user_data.check_ocsp_endpoint: + _LOGGER.debug("OCSP endpoint checking is disabled, soft fail.") + # No stapled OCSP response, checking responder URI disabled, soft fail. + return True + # https://tools.ietf.org/html/rfc6960#section-3.1 + ext_aia = _get_extension(cert, _AuthorityInformationAccess) + if ext_aia is None: + _LOGGER.debug("No authority access information, soft fail") + # No stapled OCSP response, no responder URI, soft fail. + return True + uris = [ + desc.access_location.value + for desc in ext_aia.value + if desc.access_method == _AuthorityInformationAccessOID.OCSP + ] + if not uris: + _LOGGER.debug("No OCSP URI, soft fail") + # No responder URI, soft fail. + return True + if issuer is None: + _LOGGER.debug("No issuer cert?") + return False + _LOGGER.debug("Requesting OCSP data") + # When requesting data from an OCSP endpoint we only fail on + # successful, valid responses with a certificate status of REVOKED. + for uri in uris: + _LOGGER.debug("Trying %s", uri) + response = _get_ocsp_response(cert, issuer, uri, ocsp_response_cache) + if response is None: + # The endpoint didn't respond in time, or the response was + # unsuccessful or didn't match the request, or the response + # failed verification. + continue + _LOGGER.debug("OCSP cert status: %r", response.certificate_status) + if response.certificate_status == _OCSPCertStatus.GOOD: + return True + if response.certificate_status == _OCSPCertStatus.REVOKED: + return False + # Soft fail if we couldn't get a definitive status. + _LOGGER.debug("No definitive OCSP cert status, soft fail") + return True + + _LOGGER.debug("Peer stapled an OCSP response") + if issuer is None: + _LOGGER.debug("No issuer cert?") + return False + response = _load_der_ocsp_response(ocsp_bytes) + _LOGGER.debug("OCSP response status: %r", response.response_status) + # This happens in _request_ocsp when there is no stapled response so + # we know if we can compare serial numbers for the request and response. + if response.response_status != _OCSPResponseStatus.SUCCESSFUL: + return False + if not _verify_response(issuer, response): + return False + # Cache the verified, stapled response. + ocsp_response_cache[_build_ocsp_request(cert, issuer)] = response + _LOGGER.debug("OCSP cert status: %r", response.certificate_status) + if response.certificate_status == _OCSPCertStatus.REVOKED: + return False + return True diff --git a/pymongo/operations.py b/pymongo/operations.py index 76974e75a0..2c48a2994e 100644 --- a/pymongo/operations.py +++ b/pymongo/operations.py @@ -13,18 +13,43 @@ # limitations under the License. """Operation class definitions.""" - -from pymongo.common import validate_boolean, validate_is_mapping, validate_list +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, + Generic, + Mapping, + Optional, + Sequence, + Tuple, + Union, +) + +from bson.raw_bson import RawBSONDocument +from pymongo import helpers from pymongo.collation import validate_collation_or_none +from pymongo.common import validate_boolean, validate_is_mapping, validate_list from pymongo.helpers import _gen_index_name, _index_document, _index_list +from pymongo.typings import _CollationIn, _DocumentType, _Pipeline + +if TYPE_CHECKING: + from bson.son import SON + from pymongo.bulk import _Bulk + +# Hint supports index name, "myIndex", a list of either strings or index pairs: [('x', 1), ('y', -1), 'z''], or a dictionary +_IndexList = Union[ + Sequence[Union[str, Tuple[str, Union[int, str, Mapping[str, Any]]]]], Mapping[str, Any] +] +_IndexKeyHint = Union[str, _IndexList] -class InsertOne(object): +class InsertOne(Generic[_DocumentType]): """Represents an insert_one operation.""" __slots__ = ("_doc",) - def __init__(self, document): + def __init__(self, document: _DocumentType) -> None: """Create an InsertOne instance. For use with :meth:`~pymongo.collection.Collection.bulk_write`. @@ -35,28 +60,33 @@ def __init__(self, document): """ self._doc = document - def _add_to_bulk(self, bulkobj): + def _add_to_bulk(self, bulkobj: _Bulk) -> None: """Add this operation to the _Bulk instance `bulkobj`.""" - bulkobj.add_insert(self._doc) + bulkobj.add_insert(self._doc) # type: ignore[arg-type] - def __repr__(self): - return "InsertOne(%r)" % (self._doc,) + def __repr__(self) -> str: + return f"InsertOne({self._doc!r})" - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if type(other) == type(self): return other._doc == self._doc return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other -class DeleteOne(object): +class DeleteOne: """Represents a delete_one operation.""" - __slots__ = ("_filter", "_collation") + __slots__ = ("_filter", "_collation", "_hint") - def __init__(self, filter, collation=None): + def __init__( + self, + filter: Mapping[str, Any], + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + ) -> None: """Create a DeleteOne instance. For use with :meth:`~pymongo.collection.Collection.bulk_write`. @@ -64,40 +94,64 @@ def __init__(self, filter, collation=None): :Parameters: - `filter`: A query that matches the document to delete. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only - supported on MongoDB 3.4 and above. - + :class:`~pymongo.collation.Collation`. + - `hint` (optional): An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.4 and above. + + .. versionchanged:: 3.11 + Added the ``hint`` option. .. versionchanged:: 3.5 Added the `collation` option. """ if filter is not None: validate_is_mapping("filter", filter) + if hint is not None and not isinstance(hint, str): + self._hint: Union[str, SON[str, Any], None] = helpers._index_document(hint) + else: + self._hint = hint self._filter = filter self._collation = collation - def _add_to_bulk(self, bulkobj): + def _add_to_bulk(self, bulkobj: _Bulk) -> None: """Add this operation to the _Bulk instance `bulkobj`.""" - bulkobj.add_delete(self._filter, 1, collation=self._collation) + bulkobj.add_delete( + self._filter, + 1, + collation=validate_collation_or_none(self._collation), + hint=self._hint, + ) - def __repr__(self): - return "DeleteOne(%r, %r)" % (self._filter, self._collation) + def __repr__(self) -> str: + return f"DeleteOne({self._filter!r}, {self._collation!r}, {self._hint!r})" - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if type(other) == type(self): - return ((other._filter, other._collation) == - (self._filter, self._collation)) + return (other._filter, other._collation, other._hint) == ( + self._filter, + self._collation, + self._hint, + ) return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other -class DeleteMany(object): +class DeleteMany: """Represents a delete_many operation.""" - __slots__ = ("_filter", "_collation") + __slots__ = ("_filter", "_collation", "_hint") - def __init__(self, filter, collation=None): + def __init__( + self, + filter: Mapping[str, Any], + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + ) -> None: """Create a DeleteMany instance. For use with :meth:`~pymongo.collection.Collection.bulk_write`. @@ -105,40 +159,66 @@ def __init__(self, filter, collation=None): :Parameters: - `filter`: A query that matches the documents to delete. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only - supported on MongoDB 3.4 and above. - + :class:`~pymongo.collation.Collation`. + - `hint` (optional): An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.4 and above. + + .. versionchanged:: 3.11 + Added the ``hint`` option. .. versionchanged:: 3.5 Added the `collation` option. """ if filter is not None: validate_is_mapping("filter", filter) + if hint is not None and not isinstance(hint, str): + self._hint: Union[str, SON[str, Any], None] = helpers._index_document(hint) + else: + self._hint = hint self._filter = filter self._collation = collation - def _add_to_bulk(self, bulkobj): + def _add_to_bulk(self, bulkobj: _Bulk) -> None: """Add this operation to the _Bulk instance `bulkobj`.""" - bulkobj.add_delete(self._filter, 0, collation=self._collation) + bulkobj.add_delete( + self._filter, + 0, + collation=validate_collation_or_none(self._collation), + hint=self._hint, + ) - def __repr__(self): - return "DeleteMany(%r, %r)" % (self._filter, self._collation) + def __repr__(self) -> str: + return f"DeleteMany({self._filter!r}, {self._collation!r}, {self._hint!r})" - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if type(other) == type(self): - return ((other._filter, other._collation) == - (self._filter, self._collation)) + return (other._filter, other._collation, other._hint) == ( + self._filter, + self._collation, + self._hint, + ) return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other -class ReplaceOne(object): +class ReplaceOne(Generic[_DocumentType]): """Represents a replace_one operation.""" - __slots__ = ("_filter", "_doc", "_upsert", "_collation") + __slots__ = ("_filter", "_doc", "_upsert", "_collation", "_hint") - def __init__(self, filter, replacement, upsert=False, collation=None): + def __init__( + self, + filter: Mapping[str, Any], + replacement: Union[_DocumentType, RawBSONDocument], + upsert: bool = False, + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + ) -> None: """Create a ReplaceOne instance. For use with :meth:`~pymongo.collection.Collection.bulk_write`. @@ -149,76 +229,127 @@ def __init__(self, filter, replacement, upsert=False, collation=None): - `upsert` (optional): If ``True``, perform an insert if no documents match the filter. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only - supported on MongoDB 3.4 and above. - + :class:`~pymongo.collation.Collation`. + - `hint` (optional): An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.2 and above. + + .. versionchanged:: 3.11 + Added the ``hint`` option. .. versionchanged:: 3.5 - Added the `collation` option. + Added the ``collation`` option. """ if filter is not None: validate_is_mapping("filter", filter) if upsert is not None: validate_boolean("upsert", upsert) + if hint is not None and not isinstance(hint, str): + self._hint: Union[str, SON[str, Any], None] = helpers._index_document(hint) + else: + self._hint = hint self._filter = filter self._doc = replacement self._upsert = upsert self._collation = collation - def _add_to_bulk(self, bulkobj): + def _add_to_bulk(self, bulkobj: _Bulk) -> None: """Add this operation to the _Bulk instance `bulkobj`.""" - bulkobj.add_replace(self._filter, self._doc, self._upsert, - collation=self._collation) - - def __eq__(self, other): + bulkobj.add_replace( + self._filter, + self._doc, + self._upsert, + collation=validate_collation_or_none(self._collation), + hint=self._hint, + ) + + def __eq__(self, other: Any) -> bool: if type(other) == type(self): - return ( - (other._filter, other._doc, other._upsert, other._collation) == - (self._filter, self._doc, self._upsert, self._collation)) + return (other._filter, other._doc, other._upsert, other._collation, other._hint,) == ( + self._filter, + self._doc, + self._upsert, + self._collation, + other._hint, + ) return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other - def __repr__(self): - return "%s(%r, %r, %r, %r)" % ( - self.__class__.__name__, self._filter, self._doc, self._upsert, - self._collation) + def __repr__(self) -> str: + return "{}({!r}, {!r}, {!r}, {!r}, {!r})".format( + self.__class__.__name__, + self._filter, + self._doc, + self._upsert, + self._collation, + self._hint, + ) -class _UpdateOp(object): +class _UpdateOp: """Private base class for update operations.""" - __slots__ = ("_filter", "_doc", "_upsert", "_collation", "_array_filters") - - def __init__(self, filter, doc, upsert, collation, array_filters): + __slots__ = ("_filter", "_doc", "_upsert", "_collation", "_array_filters", "_hint") + + def __init__( + self, + filter: Mapping[str, Any], + doc: Union[Mapping[str, Any], _Pipeline], + upsert: bool, + collation: Optional[_CollationIn], + array_filters: Optional[list[Mapping[str, Any]]], + hint: Optional[_IndexKeyHint], + ): if filter is not None: validate_is_mapping("filter", filter) if upsert is not None: validate_boolean("upsert", upsert) if array_filters is not None: validate_list("array_filters", array_filters) + if hint is not None and not isinstance(hint, str): + self._hint: Union[str, SON[str, Any], None] = helpers._index_document(hint) + else: + self._hint = hint + self._filter = filter self._doc = doc self._upsert = upsert self._collation = collation self._array_filters = array_filters - def __eq__(self, other): - if type(other) == type(self): + def __eq__(self, other: object) -> bool: + if isinstance(other, type(self)): return ( - (other._filter, other._doc, other._upsert, other._collation, - other._array_filters) == - (self._filter, self._doc, self._upsert, self._collation, - self._array_filters)) + other._filter, + other._doc, + other._upsert, + other._collation, + other._array_filters, + other._hint, + ) == ( + self._filter, + self._doc, + self._upsert, + self._collation, + self._array_filters, + self._hint, + ) return NotImplemented - def __ne__(self, other): - return not self == other - - def __repr__(self): - return "%s(%r, %r, %r, %r, %r)" % ( - self.__class__.__name__, self._filter, self._doc, self._upsert, - self._collation, self._array_filters) + def __repr__(self) -> str: + return "{}({!r}, {!r}, {!r}, {!r}, {!r}, {!r})".format( + self.__class__.__name__, + self._filter, + self._doc, + self._upsert, + self._collation, + self._array_filters, + self._hint, + ) class UpdateOne(_UpdateOp): @@ -226,8 +357,15 @@ class UpdateOne(_UpdateOp): __slots__ = () - def __init__(self, filter, update, upsert=False, collation=None, - array_filters=None): + def __init__( + self, + filter: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + upsert: bool = False, + collation: Optional[_CollationIn] = None, + array_filters: Optional[list[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + ) -> None: """Represents an update_one operation. For use with :meth:`~pymongo.collection.Collection.bulk_write`. @@ -238,11 +376,18 @@ def __init__(self, filter, update, upsert=False, collation=None, - `upsert` (optional): If ``True``, perform an insert if no documents match the filter. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only - supported on MongoDB 3.4 and above. + :class:`~pymongo.collation.Collation`. - `array_filters` (optional): A list of filters specifying which - array elements an update should apply. Requires MongoDB 3.6+. - + array elements an update should apply. + - `hint` (optional): An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.2 and above. + + .. versionchanged:: 3.11 + Added the `hint` option. .. versionchanged:: 3.9 Added the ability to accept a pipeline as the `update`. .. versionchanged:: 3.6 @@ -250,14 +395,19 @@ def __init__(self, filter, update, upsert=False, collation=None, .. versionchanged:: 3.5 Added the `collation` option. """ - super(UpdateOne, self).__init__(filter, update, upsert, collation, - array_filters) + super().__init__(filter, update, upsert, collation, array_filters, hint) - def _add_to_bulk(self, bulkobj): + def _add_to_bulk(self, bulkobj: _Bulk) -> None: """Add this operation to the _Bulk instance `bulkobj`.""" - bulkobj.add_update(self._filter, self._doc, False, self._upsert, - collation=self._collation, - array_filters=self._array_filters) + bulkobj.add_update( + self._filter, + self._doc, + False, + self._upsert, + collation=validate_collation_or_none(self._collation), + array_filters=self._array_filters, + hint=self._hint, + ) class UpdateMany(_UpdateOp): @@ -265,8 +415,15 @@ class UpdateMany(_UpdateOp): __slots__ = () - def __init__(self, filter, update, upsert=False, collation=None, - array_filters=None): + def __init__( + self, + filter: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + upsert: bool = False, + collation: Optional[_CollationIn] = None, + array_filters: Optional[list[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + ) -> None: """Create an UpdateMany instance. For use with :meth:`~pymongo.collection.Collection.bulk_write`. @@ -277,11 +434,18 @@ def __init__(self, filter, update, upsert=False, collation=None, - `upsert` (optional): If ``True``, perform an insert if no documents match the filter. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only - supported on MongoDB 3.4 and above. + :class:`~pymongo.collation.Collation`. - `array_filters` (optional): A list of filters specifying which - array elements an update should apply. Requires MongoDB 3.6+. - + array elements an update should apply. + - `hint` (optional): An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.2 and above. + + .. versionchanged:: 3.11 + Added the `hint` option. .. versionchanged:: 3.9 Added the ability to accept a pipeline as the `update`. .. versionchanged:: 3.6 @@ -289,40 +453,45 @@ def __init__(self, filter, update, upsert=False, collation=None, .. versionchanged:: 3.5 Added the `collation` option. """ - super(UpdateMany, self).__init__(filter, update, upsert, collation, - array_filters) + super().__init__(filter, update, upsert, collation, array_filters, hint) - def _add_to_bulk(self, bulkobj): + def _add_to_bulk(self, bulkobj: _Bulk) -> None: """Add this operation to the _Bulk instance `bulkobj`.""" - bulkobj.add_update(self._filter, self._doc, True, self._upsert, - collation=self._collation, - array_filters=self._array_filters) - - -class IndexModel(object): + bulkobj.add_update( + self._filter, + self._doc, + True, + self._upsert, + collation=validate_collation_or_none(self._collation), + array_filters=self._array_filters, + hint=self._hint, + ) + + +class IndexModel: """Represents an index to create.""" __slots__ = ("__document",) - def __init__(self, keys, **kwargs): + def __init__(self, keys: _IndexKeyHint, **kwargs: Any) -> None: """Create an Index instance. For use with :meth:`~pymongo.collection.Collection.create_indexes`. - Takes either a single key or a list of (key, direction) pairs. - The key(s) must be an instance of :class:`basestring` - (:class:`str` in python 3), and the direction(s) must be one of - (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`, - :data:`~pymongo.GEO2D`, :data:`~pymongo.GEOHAYSTACK`, - :data:`~pymongo.GEOSPHERE`, :data:`~pymongo.HASHED`, - :data:`~pymongo.TEXT`). + Takes either a single key or a list containing (key, direction) pairs + or keys. If no direction is given, :data:`~pymongo.ASCENDING` will + be assumed. + The key(s) must be an instance of :class:`str`, and the direction(s) must + be one of (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`, + :data:`~pymongo.GEO2D`, :data:`~pymongo.GEOSPHERE`, + :data:`~pymongo.HASHED`, :data:`~pymongo.TEXT`). Valid options include, but are not limited to: - `name`: custom name to use for this index - if none is given, a name will be generated. - - `unique`: if ``True`` creates a uniqueness constraint on the index. - - `background`: if ``True`` this index should be created in the + - `unique`: if ``True``, creates a uniqueness constraint on the index. + - `background`: if ``True``, this index should be created in the background. - `sparse`: if ``True``, omit from the index any documents that lack the indexed field. @@ -338,40 +507,75 @@ def __init__(self, keys, **kwargs): this collection after seconds. The indexed field must be a UTC datetime or the data will not expire. - `partialFilterExpression`: A document that specifies a filter for - a partial index. Requires server version >= 3.2. + a partial index. - `collation`: An instance of :class:`~pymongo.collation.Collation` - that specifies the collation to use in MongoDB >= 3.4. + that specifies the collation to use. - `wildcardProjection`: Allows users to include or exclude specific field paths from a `wildcard index`_ using the { "$**" : 1} key - pattern. Requires server version >= 4.2. + pattern. Requires MongoDB >= 4.2. + - `hidden`: if ``True``, this index will be hidden from the query + planner and will not be evaluated as part of query plan + selection. Requires MongoDB >= 4.4. See the MongoDB documentation for a full list of supported options by server version. :Parameters: - - `keys`: a single key or a list of (key, direction) - pairs specifying the index to create + - `keys`: a single key or a list containing (key, direction) pairs + or keys specifying the index to create. - `**kwargs` (optional): any additional index creation options (see the above list) should be passed as keyword - arguments + arguments. + .. versionchanged:: 3.11 + Added the ``hidden`` option. .. versionchanged:: 3.2 - Added partialFilterExpression to support partial indexes. + Added the ``partialFilterExpression`` option to support partial + indexes. - .. _wildcard index: https://docs.mongodb.com/master/core/index-wildcard/#wildcard-index-core + .. _wildcard index: https://mongodb.com/docs/master/core/index-wildcard/ """ keys = _index_list(keys) - if "name" not in kwargs: + if kwargs.get("name") is None: kwargs["name"] = _gen_index_name(keys) kwargs["key"] = _index_document(keys) - collation = validate_collation_or_none(kwargs.pop('collation', None)) + collation = validate_collation_or_none(kwargs.pop("collation", None)) self.__document = kwargs if collation is not None: - self.__document['collation'] = collation + self.__document["collation"] = collation @property - def document(self): + def document(self) -> dict[str, Any]: """An index document suitable for passing to the createIndexes command. """ return self.__document + + +class SearchIndexModel: + """Represents a search index to create.""" + + __slots__ = ("__document",) + + def __init__(self, definition: Mapping[str, Any], name: Optional[str] = None) -> None: + """Create a Search Index instance. + + For use with :meth:`~pymongo.collection.Collection.create_search_index` and :meth:`~pymongo.collection.Collection.create_search_indexes`. + + :Parameters: + - `definition` - The definition for this index. + - `name` (optional) - The name for this index, if present. + + .. versionadded:: 4.5 + + .. note:: Search indexes require a MongoDB server version 7.0+ Atlas cluster. + """ + if name is not None: + self.__document = dict(name=name, definition=definition) + else: + self.__document = dict(definition=definition) + + @property + def document(self) -> Mapping[str, Any]: + """The document for this index.""" + return self.__document diff --git a/pymongo/periodic_executor.py b/pymongo/periodic_executor.py index ba9664fa78..30fd33ddf4 100644 --- a/pymongo/periodic_executor.py +++ b/pymongo/periodic_executor.py @@ -14,17 +14,26 @@ """Run a target function on a background thread.""" -import atexit +from __future__ import annotations + +import sys import threading import time import weakref +from typing import Any, Callable, Optional -from pymongo.monotonic import time as _time +from pymongo.lock import _create_lock -class PeriodicExecutor(object): - def __init__(self, interval, min_interval, target, name=None): - """"Run a target function periodically on a background thread. +class PeriodicExecutor: + def __init__( + self, + interval: float, + min_interval: float, + target: Callable[[], bool], + name: Optional[str] = None, + ): + """ "Run a target function periodically on a background thread. If the target's return value is false, the executor stops. @@ -44,13 +53,16 @@ def __init__(self, interval, min_interval, target, name=None): self._min_interval = min_interval self._target = target self._stopped = False - self._thread = None + self._thread: Optional[threading.Thread] = None self._name = name - + self._skip_sleep = False self._thread_will_exit = False - self._lock = threading.Lock() + self._lock = _create_lock() - def open(self): + def __repr__(self) -> str: + return f"<{self.__class__.__name__}(name={self._name}) object at 0x{id(self):x}>" + + def open(self) -> None: """Start. Multiple calls have no effect. Not safe to call from multiple threads at once. @@ -62,13 +74,14 @@ def open(self): # join should not block indefinitely because there is no # other work done outside the while loop in self._run. try: + assert self._thread is not None self._thread.join() except ReferenceError: # Thread terminated. pass self._thread_will_exit = False self._stopped = False - started = False + started: Any = False try: started = self._thread and self._thread.is_alive() except ReferenceError: @@ -80,9 +93,17 @@ def open(self): thread.daemon = True self._thread = weakref.proxy(thread) _register_executor(self) - thread.start() + # Mitigation to RuntimeError firing when thread starts on shutdown + # https://github.com/python/cpython/issues/114570 + try: + thread.start() + except RuntimeError as e: + if "interpreter shutdown" in str(e) or sys.is_finalizing(): + self._thread = None + return + raise - def close(self, dummy=None): + def close(self, dummy: Any = None) -> None: """Stop. To restart, call open(). The dummy parameter allows an executor's close method to be a weakref @@ -90,7 +111,7 @@ def close(self, dummy=None): """ self._stopped = True - def join(self, timeout=None): + def join(self, timeout: Optional[int] = None) -> None: if self._thread is not None: try: self._thread.join(timeout) @@ -98,39 +119,44 @@ def join(self, timeout=None): # Thread already terminated, or not yet started. pass - def wake(self): + def wake(self) -> None: """Execute the target function soon.""" self._event = True - def update_interval(self, new_interval): + def update_interval(self, new_interval: int) -> None: self._interval = new_interval - def __should_stop(self): + def skip_sleep(self) -> None: + self._skip_sleep = True + + def __should_stop(self) -> bool: with self._lock: if self._stopped: self._thread_will_exit = True return True return False - def _run(self): + def _run(self) -> None: while not self.__should_stop(): try: if not self._target(): self._stopped = True break - except: + except BaseException: with self._lock: self._stopped = True self._thread_will_exit = True raise - deadline = _time() + self._interval - - while not self._stopped and _time() < deadline: - time.sleep(self._min_interval) - if self._event: - break # Early wake. + if self._skip_sleep: + self._skip_sleep = False + else: + deadline = time.monotonic() + self._interval + while not self._stopped and time.monotonic() < deadline: + time.sleep(self._min_interval) + if self._event: + break # Early wake. self._event = False @@ -144,16 +170,16 @@ def _run(self): _EXECUTORS = set() -def _register_executor(executor): +def _register_executor(executor: PeriodicExecutor) -> None: ref = weakref.ref(executor, _on_executor_deleted) _EXECUTORS.add(ref) -def _on_executor_deleted(ref): +def _on_executor_deleted(ref: weakref.ReferenceType[PeriodicExecutor]) -> None: _EXECUTORS.remove(ref) -def _shutdown_executors(): +def _shutdown_executors() -> None: if _EXECUTORS is None: return @@ -173,5 +199,3 @@ def _shutdown_executors(): executor.join(1) executor = None - -atexit.register(_shutdown_executors) diff --git a/pymongo/pool.py b/pymongo/pool.py index 6407d53ab9..cdafb2cc2c 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -12,162 +12,155 @@ # implied. See the License for the specific language governing # permissions and limitations under the License. +from __future__ import annotations + +import collections import contextlib import copy import os import platform import socket +import ssl import sys import threading -import collections - -try: - import ssl - from ssl import SSLError - _HAVE_SNI = getattr(ssl, 'HAS_SNI', False) -except ImportError: - _HAVE_SNI = False - class SSLError(socket.error): - pass - -try: - from ssl import CertificateError as _SSLCertificateError -except ImportError: - class _SSLCertificateError(ValueError): - pass - - +import time +import weakref +from typing import ( + TYPE_CHECKING, + Any, + Iterator, + Mapping, + MutableMapping, + NoReturn, + Optional, + Sequence, + Union, +) + +import bson from bson import DEFAULT_CODEC_OPTIONS -from bson.py3compat import imap, itervalues, _unicode, integer_types from bson.son import SON -from pymongo import auth, helpers, thread_util, __version__ +from pymongo import __version__, _csot, auth, helpers from pymongo.client_session import _validate_session_write_concern -from pymongo.common import (MAX_BSON_SIZE, - MAX_IDLE_TIME_SEC, - MAX_MESSAGE_SIZE, - MAX_POOL_SIZE, - MAX_WIRE_VERSION, - MAX_WRITE_BATCH_SIZE, - MIN_POOL_SIZE, - ORDERED_TYPES, - WAIT_QUEUE_TIMEOUT) -from pymongo.errors import (AutoReconnect, - ConnectionFailure, - ConfigurationError, - InvalidOperation, - DocumentTooLarge, - NetworkTimeout, - NotMasterError, - OperationFailure, - PyMongoError) -from pymongo.ismaster import IsMaster -from pymongo.monotonic import time as _time -from pymongo.monitoring import (ConnectionCheckOutFailedReason, - ConnectionClosedReason) -from pymongo.network import (command, - receive_message, - SocketChecker) +from pymongo.common import ( + MAX_BSON_SIZE, + MAX_CONNECTING, + MAX_IDLE_TIME_SEC, + MAX_MESSAGE_SIZE, + MAX_POOL_SIZE, + MAX_WIRE_VERSION, + MAX_WRITE_BATCH_SIZE, + MIN_POOL_SIZE, + ORDERED_TYPES, + WAIT_QUEUE_TIMEOUT, +) +from pymongo.errors import ( + AutoReconnect, + ConfigurationError, + ConnectionFailure, + DocumentTooLarge, + ExecutionTimeout, + InvalidOperation, + NetworkTimeout, + NotPrimaryError, + OperationFailure, + PyMongoError, + WaitQueueTimeoutError, + _CertificateError, +) +from pymongo.hello import Hello, HelloCompat +from pymongo.helpers import _handle_reauth +from pymongo.lock import _create_lock +from pymongo.monitoring import ( + ConnectionCheckOutFailedReason, + ConnectionClosedReason, + _EventListeners, +) +from pymongo.network import command, receive_message from pymongo.read_preferences import ReadPreference +from pymongo.server_api import _add_to_command from pymongo.server_type import SERVER_TYPE -# Always use our backport so we always have support for IP address matching -from pymongo.ssl_match_hostname import match_hostname, CertificateError +from pymongo.socket_checker import SocketChecker +from pymongo.ssl_support import HAS_SNI, SSLError + +if TYPE_CHECKING: + from bson import CodecOptions + from bson.objectid import ObjectId + from pymongo.auth import MongoCredential, _AuthContext + from pymongo.client_session import ClientSession + from pymongo.compression_support import ( + CompressionSettings, + SnappyContext, + ZlibContext, + ZstdContext, + ) + from pymongo.driver_info import DriverInfo + from pymongo.message import _OpMsg, _OpReply + from pymongo.mongo_client import MongoClient, _MongoClientErrorHandler + from pymongo.pyopenssl_context import SSLContext, _sslConn + from pymongo.read_concern import ReadConcern + from pymongo.read_preferences import _ServerMode + from pymongo.server_api import ServerApi + from pymongo.typings import ClusterTime, _Address, _CollationIn + from pymongo.write_concern import WriteConcern -# For SNI support. According to RFC6066, section 3, IPv4 and IPv6 literals are -# not permitted for SNI hostname. try: - from ipaddress import ip_address - def is_ip_address(address): - try: - ip_address(_unicode(address)) - return True - except (ValueError, UnicodeError): - return False -except ImportError: - if hasattr(socket, 'inet_pton') and socket.has_ipv6: - # Most *nix, recent Windows - def is_ip_address(address): - try: - # inet_pton rejects IPv4 literals with leading zeros - # (e.g. 192.168.0.01), inet_aton does not, and we - # can connect to them without issue. Use inet_aton. - socket.inet_aton(address) - return True - except socket.error: - try: - socket.inet_pton(socket.AF_INET6, address) - return True - except socket.error: - return False - else: - # No inet_pton - def is_ip_address(address): - try: - socket.inet_aton(address) - return True - except socket.error: - if ':' in address: - # ':' is not a valid character for a hostname. If we get - # here a few things have to be true: - # - We're on a recent version of python 2.7 (2.7.9+). - # Older 2.7 versions don't support SNI. - # - We're on Windows XP or some unusual Unix that doesn't - # have inet_pton. - # - The application is using IPv6 literals with TLS, which - # is pretty unusual. - return True - return False + from fcntl import F_GETFD, F_SETFD, FD_CLOEXEC, fcntl -try: - from fcntl import fcntl, F_GETFD, F_SETFD, FD_CLOEXEC - def _set_non_inheritable_non_atomic(fd): + def _set_non_inheritable_non_atomic(fd: int) -> None: """Set the close-on-exec flag on the given file descriptor.""" flags = fcntl(fd, F_GETFD) fcntl(fd, F_SETFD, flags | FD_CLOEXEC) + except ImportError: # Windows, various platforms we don't claim to support # (Jython, IronPython, ...), systems that don't provide # everything we need from fcntl, etc. - def _set_non_inheritable_non_atomic(dummy): + def _set_non_inheritable_non_atomic(fd: int) -> None: # noqa: ARG001 """Dummy function for platforms that don't provide fcntl.""" - pass -_MAX_TCP_KEEPIDLE = 300 + +_MAX_TCP_KEEPIDLE = 120 _MAX_TCP_KEEPINTVL = 10 _MAX_TCP_KEEPCNT = 9 -if sys.platform == 'win32': +if sys.platform == "win32": try: import _winreg as winreg except ImportError: import winreg + def _query(key, name, default): + try: + value, _ = winreg.QueryValueEx(key, name) + # Ensure the value is a number or raise ValueError. + return int(value) + except (OSError, ValueError): + # QueryValueEx raises OSError when the key does not exist (i.e. + # the system is using the Windows default value). + return default + try: with winreg.OpenKey( - winreg.HKEY_LOCAL_MACHINE, - r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters") as key: - _DEFAULT_TCP_IDLE_MS, _ = winreg.QueryValueEx(key, "KeepAliveTime") - _DEFAULT_TCP_INTERVAL_MS, _ = winreg.QueryValueEx( - key, "KeepAliveInterval") - # Make sure these are integers. - if not isinstance(_DEFAULT_TCP_IDLE_MS, integer_types): - raise ValueError - if not isinstance(_DEFAULT_TCP_INTERVAL_MS, integer_types): - raise ValueError - except (OSError, ValueError): - # We could not check the default values so do not attempt to override. - def _set_keepalive_times(dummy): - pass - else: - def _set_keepalive_times(sock): - idle_ms = min(_DEFAULT_TCP_IDLE_MS, _MAX_TCP_KEEPIDLE * 1000) - interval_ms = min(_DEFAULT_TCP_INTERVAL_MS, - _MAX_TCP_KEEPINTVL * 1000) - if (idle_ms < _DEFAULT_TCP_IDLE_MS or - interval_ms < _DEFAULT_TCP_INTERVAL_MS): - sock.ioctl(socket.SIO_KEEPALIVE_VALS, - (1, idle_ms, interval_ms)) + winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters" + ) as key: + _WINDOWS_TCP_IDLE_MS = _query(key, "KeepAliveTime", 7200000) + _WINDOWS_TCP_INTERVAL_MS = _query(key, "KeepAliveInterval", 1000) + except OSError: + # We could not check the default values because winreg.OpenKey failed. + # Assume the system is using the default values. + _WINDOWS_TCP_IDLE_MS = 7200000 + _WINDOWS_TCP_INTERVAL_MS = 1000 + + def _set_keepalive_times(sock): + idle_ms = min(_WINDOWS_TCP_IDLE_MS, _MAX_TCP_KEEPIDLE * 1000) + interval_ms = min(_WINDOWS_TCP_INTERVAL_MS, _MAX_TCP_KEEPINTVL * 1000) + if idle_ms < _WINDOWS_TCP_IDLE_MS or interval_ms < _WINDOWS_TCP_INTERVAL_MS: + sock.ioctl(socket.SIO_KEEPALIVE_VALS, (1, idle_ms, interval_ms)) + else: - def _set_tcp_option(sock, tcp_option, max_value): + + def _set_tcp_option(sock: socket.socket, tcp_option: str, max_value: int) -> None: if hasattr(socket, tcp_option): sockopt = getattr(socket, tcp_option) try: @@ -177,194 +170,409 @@ def _set_tcp_option(sock, tcp_option, max_value): default = sock.getsockopt(socket.IPPROTO_TCP, sockopt) if default > max_value: sock.setsockopt(socket.IPPROTO_TCP, sockopt, max_value) - except socket.error: + except OSError: pass - def _set_keepalive_times(sock): - _set_tcp_option(sock, 'TCP_KEEPIDLE', _MAX_TCP_KEEPIDLE) - _set_tcp_option(sock, 'TCP_KEEPINTVL', _MAX_TCP_KEEPINTVL) - _set_tcp_option(sock, 'TCP_KEEPCNT', _MAX_TCP_KEEPCNT) - -_METADATA = SON([ - ('driver', SON([('name', 'PyMongo'), ('version', __version__)])), -]) - -if sys.platform.startswith('linux'): - # platform.linux_distribution was deprecated in Python 3.5. - if sys.version_info[:2] < (3, 5): - # Distro name and version (e.g. Ubuntu 16.04 xenial) - _name = ' '.join([part for part in - platform.linux_distribution() if part]) - else: - _name = platform.system() - _METADATA['os'] = SON([ - ('type', platform.system()), - ('name', _name), - ('architecture', platform.machine()), - # Kernel version (e.g. 4.4.0-17-generic). - ('version', platform.release()) - ]) -elif sys.platform == 'darwin': - _METADATA['os'] = SON([ - ('type', platform.system()), - ('name', platform.system()), - ('architecture', platform.machine()), - # (mac|i|tv)OS(X) version (e.g. 10.11.6) instead of darwin - # kernel version. - ('version', platform.mac_ver()[0]) - ]) -elif sys.platform == 'win32': - _METADATA['os'] = SON([ - ('type', platform.system()), - # "Windows XP", "Windows 7", "Windows 10", etc. - ('name', ' '.join((platform.system(), platform.release()))), - ('architecture', platform.machine()), - # Windows patch level (e.g. 5.1.2600-SP3) - ('version', '-'.join(platform.win32_ver()[1:3])) - ]) -elif sys.platform.startswith('java'): + def _set_keepalive_times(sock: socket.socket) -> None: + _set_tcp_option(sock, "TCP_KEEPIDLE", _MAX_TCP_KEEPIDLE) + _set_tcp_option(sock, "TCP_KEEPINTVL", _MAX_TCP_KEEPINTVL) + _set_tcp_option(sock, "TCP_KEEPCNT", _MAX_TCP_KEEPCNT) + + +_METADATA: SON[str, Any] = SON( + [ + ("driver", SON([("name", "PyMongo"), ("version", __version__)])), + ] +) + +if sys.platform.startswith("linux"): + # platform.linux_distribution was deprecated in Python 3.5 + # and removed in Python 3.8. Starting in Python 3.5 it + # raises DeprecationWarning + # DeprecationWarning: dist() and linux_distribution() functions are deprecated in Python 3.5 + _name = platform.system() + _METADATA["os"] = SON( + [ + ("type", _name), + ("name", _name), + ("architecture", platform.machine()), + # Kernel version (e.g. 4.4.0-17-generic). + ("version", platform.release()), + ] + ) +elif sys.platform == "darwin": + _METADATA["os"] = SON( + [ + ("type", platform.system()), + ("name", platform.system()), + ("architecture", platform.machine()), + # (mac|i|tv)OS(X) version (e.g. 10.11.6) instead of darwin + # kernel version. + ("version", platform.mac_ver()[0]), + ] + ) +elif sys.platform == "win32": + _METADATA["os"] = SON( + [ + ("type", platform.system()), + # "Windows XP", "Windows 7", "Windows 10", etc. + ("name", " ".join((platform.system(), platform.release()))), + ("architecture", platform.machine()), + # Windows patch level (e.g. 5.1.2600-SP3) + ("version", "-".join(platform.win32_ver()[1:3])), + ] + ) +elif sys.platform.startswith("java"): _name, _ver, _arch = platform.java_ver()[-1] - _METADATA['os'] = SON([ - # Linux, Windows 7, Mac OS X, etc. - ('type', _name), - ('name', _name), - # x86, x86_64, AMD64, etc. - ('architecture', _arch), - # Linux kernel version, OSX version, etc. - ('version', _ver) - ]) + _METADATA["os"] = SON( + [ + # Linux, Windows 7, Mac OS X, etc. + ("type", _name), + ("name", _name), + # x86, x86_64, AMD64, etc. + ("architecture", _arch), + # Linux kernel version, OSX version, etc. + ("version", _ver), + ] + ) else: # Get potential alias (e.g. SunOS 5.11 becomes Solaris 2.11) - _aliased = platform.system_alias( - platform.system(), platform.release(), platform.version()) - _METADATA['os'] = SON([ - ('type', platform.system()), - ('name', ' '.join([part for part in _aliased[:2] if part])), - ('architecture', platform.machine()), - ('version', _aliased[2]) - ]) - -if platform.python_implementation().startswith('PyPy'): - _METADATA['platform'] = ' '.join( - (platform.python_implementation(), - '.'.join(imap(str, sys.pypy_version_info)), - '(Python %s)' % '.'.join(imap(str, sys.version_info)))) -elif sys.platform.startswith('java'): - _METADATA['platform'] = ' '.join( - (platform.python_implementation(), - '.'.join(imap(str, sys.version_info)), - '(%s)' % ' '.join((platform.system(), platform.release())))) + _aliased = platform.system_alias(platform.system(), platform.release(), platform.version()) + _METADATA["os"] = SON( + [ + ("type", platform.system()), + ("name", " ".join([part for part in _aliased[:2] if part])), + ("architecture", platform.machine()), + ("version", _aliased[2]), + ] + ) + +if platform.python_implementation().startswith("PyPy"): + _METADATA["platform"] = " ".join( + ( + platform.python_implementation(), + ".".join(map(str, sys.pypy_version_info)), # type: ignore + "(Python %s)" % ".".join(map(str, sys.version_info)), + ) + ) +elif sys.platform.startswith("java"): + _METADATA["platform"] = " ".join( + ( + platform.python_implementation(), + ".".join(map(str, sys.version_info)), + "(%s)" % " ".join((platform.system(), platform.release())), + ) + ) else: - _METADATA['platform'] = ' '.join( - (platform.python_implementation(), - '.'.join(imap(str, sys.version_info)))) + _METADATA["platform"] = " ".join( + (platform.python_implementation(), ".".join(map(str, sys.version_info))) + ) + + +def _is_lambda() -> bool: + if os.getenv("AWS_LAMBDA_RUNTIME_API"): + return True + env = os.getenv("AWS_EXECUTION_ENV") + if env: + return env.startswith("AWS_Lambda_") + return False + + +def _is_azure_func() -> bool: + return bool(os.getenv("FUNCTIONS_WORKER_RUNTIME")) + + +def _is_gcp_func() -> bool: + return bool(os.getenv("K_SERVICE") or os.getenv("FUNCTION_NAME")) + + +def _is_vercel() -> bool: + return bool(os.getenv("VERCEL")) + + +def _is_faas() -> bool: + return _is_lambda() or _is_azure_func() or _is_gcp_func() or _is_vercel() + + +def _getenv_int(key: str) -> Optional[int]: + """Like os.getenv but returns an int, or None if the value is missing/malformed.""" + val = os.getenv(key) + if not val: + return None + try: + return int(val) + except ValueError: + return None + + +def _metadata_env() -> dict[str, Any]: + env: dict[str, Any] = {} + # Skip if multiple (or no) envs are matched. + if (_is_lambda(), _is_azure_func(), _is_gcp_func(), _is_vercel()).count(True) != 1: + return env + if _is_lambda(): + env["name"] = "aws.lambda" + region = os.getenv("AWS_REGION") + if region: + env["region"] = region + memory_mb = _getenv_int("AWS_LAMBDA_FUNCTION_MEMORY_SIZE") + if memory_mb is not None: + env["memory_mb"] = memory_mb + elif _is_azure_func(): + env["name"] = "azure.func" + elif _is_gcp_func(): + env["name"] = "gcp.func" + region = os.getenv("FUNCTION_REGION") + if region: + env["region"] = region + memory_mb = _getenv_int("FUNCTION_MEMORY_MB") + if memory_mb is not None: + env["memory_mb"] = memory_mb + timeout_sec = _getenv_int("FUNCTION_TIMEOUT_SEC") + if timeout_sec is not None: + env["timeout_sec"] = timeout_sec + elif _is_vercel(): + env["name"] = "vercel" + region = os.getenv("VERCEL_REGION") + if region: + env["region"] = region + return env + + +_MAX_METADATA_SIZE = 512 + + +# See: https://github.com/mongodb/specifications/blob/5112bcc/source/mongodb-handshake/handshake.rst#limitations +def _truncate_metadata(metadata: MutableMapping[str, Any]) -> None: + """Perform metadata truncation.""" + if len(bson.encode(metadata)) <= _MAX_METADATA_SIZE: + return + # 1. Omit fields from env except env.name. + env_name = metadata.get("env", {}).get("name") + if env_name: + metadata["env"] = {"name": env_name} + if len(bson.encode(metadata)) <= _MAX_METADATA_SIZE: + return + # 2. Omit fields from os except os.type. + os_type = metadata.get("os", {}).get("type") + if os_type: + metadata["os"] = {"type": os_type} + if len(bson.encode(metadata)) <= _MAX_METADATA_SIZE: + return + # 3. Omit the env document entirely. + metadata.pop("env", None) + encoded_size = len(bson.encode(metadata)) + if encoded_size <= _MAX_METADATA_SIZE: + return + # 4. Truncate platform. + overflow = encoded_size - _MAX_METADATA_SIZE + plat = metadata.get("platform", "") + if plat: + plat = plat[:-overflow] + if plat: + metadata["platform"] = plat + else: + metadata.pop("platform", None) # If the first getaddrinfo call of this interpreter's life is on a thread, # while the main thread holds the import lock, getaddrinfo deadlocks trying # to import the IDNA codec. Import it here, where presumably we're on the # main thread, to avoid the deadlock. See PYTHON-607. -u'foo'.encode('idna') +"foo".encode("idna") -def _raise_connection_failure(address, error, msg_prefix=None): +def _raise_connection_failure( + address: Any, + error: Exception, + msg_prefix: Optional[str] = None, + timeout_details: Optional[dict[str, float]] = None, +) -> NoReturn: """Convert a socket.error to ConnectionFailure and raise it.""" host, port = address # If connecting to a Unix socket, port will be None. if port is not None: - msg = '%s:%d: %s' % (host, port, error) + msg = "%s:%d: %s" % (host, port, error) else: - msg = '%s: %s' % (host, error) + msg = f"{host}: {error}" if msg_prefix: msg = msg_prefix + msg + if "configured timeouts" not in msg: + msg += format_timeout_details(timeout_details) if isinstance(error, socket.timeout): - raise NetworkTimeout(msg) - elif isinstance(error, SSLError) and 'timed out' in str(error): - # CPython 2.7 and PyPy 2.x do not distinguish network - # timeouts from other SSLErrors (https://bugs.python.org/issue10272). + raise NetworkTimeout(msg) from error + elif isinstance(error, SSLError) and "timed out" in str(error): + # Eventlet does not distinguish TLS network timeouts from other + # SSLErrors (https://github.com/eventlet/eventlet/issues/692). # Luckily, we can work around this limitation because the phrase - # 'timed out' appears in all the timeout related SSLErrors raised - # on the above platforms. - raise NetworkTimeout(msg) + # 'timed out' appears in all the timeout related SSLErrors raised. + raise NetworkTimeout(msg) from error else: - raise AutoReconnect(msg) + raise AutoReconnect(msg) from error + + +def _cond_wait(condition: threading.Condition, deadline: Optional[float]) -> bool: + timeout = deadline - time.monotonic() if deadline else None + return condition.wait(timeout) + + +def _get_timeout_details(options: PoolOptions) -> dict[str, float]: + details = {} + timeout = _csot.get_timeout() + socket_timeout = options.socket_timeout + connect_timeout = options.connect_timeout + if timeout: + details["timeoutMS"] = timeout * 1000 + if socket_timeout and not timeout: + details["socketTimeoutMS"] = socket_timeout * 1000 + if connect_timeout: + details["connectTimeoutMS"] = connect_timeout * 1000 + return details -class PoolOptions(object): +def format_timeout_details(details: Optional[dict[str, float]]) -> str: + result = "" + if details: + result += " (configured timeouts:" + for timeout in ["socketTimeoutMS", "timeoutMS", "connectTimeoutMS"]: + if timeout in details: + result += f" {timeout}: {details[timeout]}ms," + result = result[:-1] + result += ")" + return result - __slots__ = ('__max_pool_size', '__min_pool_size', - '__max_idle_time_seconds', - '__connect_timeout', '__socket_timeout', - '__wait_queue_timeout', '__wait_queue_multiple', - '__ssl_context', '__ssl_match_hostname', '__socket_keepalive', - '__event_listeners', '__appname', '__driver', '__metadata', - '__compression_settings') - def __init__(self, max_pool_size=MAX_POOL_SIZE, - min_pool_size=MIN_POOL_SIZE, - max_idle_time_seconds=MAX_IDLE_TIME_SEC, connect_timeout=None, - socket_timeout=None, wait_queue_timeout=WAIT_QUEUE_TIMEOUT, - wait_queue_multiple=None, ssl_context=None, - ssl_match_hostname=True, socket_keepalive=True, - event_listeners=None, appname=None, driver=None, - compression_settings=None): +class PoolOptions: + """Read only connection pool options for a MongoClient. + Should not be instantiated directly by application developers. Access + a client's pool options via + :attr:`~pymongo.client_options.ClientOptions.pool_options` instead:: + + pool_opts = client.options.pool_options + pool_opts.max_pool_size + pool_opts.min_pool_size + + """ + + __slots__ = ( + "__max_pool_size", + "__min_pool_size", + "__max_idle_time_seconds", + "__connect_timeout", + "__socket_timeout", + "__wait_queue_timeout", + "__ssl_context", + "__tls_allow_invalid_hostnames", + "__event_listeners", + "__appname", + "__driver", + "__metadata", + "__compression_settings", + "__max_connecting", + "__pause_enabled", + "__server_api", + "__load_balanced", + "__credentials", + ) + + def __init__( + self, + max_pool_size: int = MAX_POOL_SIZE, + min_pool_size: int = MIN_POOL_SIZE, + max_idle_time_seconds: Optional[int] = MAX_IDLE_TIME_SEC, + connect_timeout: Optional[float] = None, + socket_timeout: Optional[float] = None, + wait_queue_timeout: Optional[int] = WAIT_QUEUE_TIMEOUT, + ssl_context: Optional[SSLContext] = None, + tls_allow_invalid_hostnames: bool = False, + event_listeners: Optional[_EventListeners] = None, + appname: Optional[str] = None, + driver: Optional[DriverInfo] = None, + compression_settings: Optional[CompressionSettings] = None, + max_connecting: int = MAX_CONNECTING, + pause_enabled: bool = True, + server_api: Optional[ServerApi] = None, + load_balanced: Optional[bool] = None, + credentials: Optional[MongoCredential] = None, + ): self.__max_pool_size = max_pool_size self.__min_pool_size = min_pool_size self.__max_idle_time_seconds = max_idle_time_seconds self.__connect_timeout = connect_timeout self.__socket_timeout = socket_timeout self.__wait_queue_timeout = wait_queue_timeout - self.__wait_queue_multiple = wait_queue_multiple self.__ssl_context = ssl_context - self.__ssl_match_hostname = ssl_match_hostname - self.__socket_keepalive = socket_keepalive + self.__tls_allow_invalid_hostnames = tls_allow_invalid_hostnames self.__event_listeners = event_listeners self.__appname = appname self.__driver = driver self.__compression_settings = compression_settings + self.__max_connecting = max_connecting + self.__pause_enabled = pause_enabled + self.__server_api = server_api + self.__load_balanced = load_balanced + self.__credentials = credentials self.__metadata = copy.deepcopy(_METADATA) if appname: - self.__metadata['application'] = {'name': appname} + self.__metadata["application"] = {"name": appname} # Combine the "driver" MongoClient option with PyMongo's info, like: # { # 'driver': { # 'name': 'PyMongo|MyDriver', - # 'version': '3.7.0|1.2.3', + # 'version': '4.2.0|1.2.3', # }, - # 'platform': 'CPython 3.6.0|MyPlatform' + # 'platform': 'CPython 3.7.0|MyPlatform' # } if driver: if driver.name: - self.__metadata['driver']['name'] = "%s|%s" % ( - _METADATA['driver']['name'], driver.name) + self.__metadata["driver"]["name"] = "{}|{}".format( + _METADATA["driver"]["name"], + driver.name, + ) if driver.version: - self.__metadata['driver']['version'] = "%s|%s" % ( - _METADATA['driver']['version'], driver.version) + self.__metadata["driver"]["version"] = "{}|{}".format( + _METADATA["driver"]["version"], + driver.version, + ) if driver.platform: - self.__metadata['platform'] = "%s|%s" % ( - _METADATA['platform'], driver.platform) + self.__metadata["platform"] = "{}|{}".format(_METADATA["platform"], driver.platform) + + env = _metadata_env() + if env: + self.__metadata["env"] = env + + _truncate_metadata(self.__metadata) @property - def non_default_options(self): + def _credentials(self) -> Optional[MongoCredential]: + """A :class:`~pymongo.auth.MongoCredentials` instance or None.""" + return self.__credentials + + @property + def non_default_options(self) -> dict[str, Any]: """The non-default options this pool was created with. Added for CMAP's :class:`PoolCreatedEvent`. """ opts = {} if self.__max_pool_size != MAX_POOL_SIZE: - opts['maxPoolSize'] = self.__max_pool_size + opts["maxPoolSize"] = self.__max_pool_size if self.__min_pool_size != MIN_POOL_SIZE: - opts['minPoolSize'] = self.__min_pool_size + opts["minPoolSize"] = self.__min_pool_size if self.__max_idle_time_seconds != MAX_IDLE_TIME_SEC: - opts['maxIdleTimeMS'] = self.__max_idle_time_seconds * 1000 + assert self.__max_idle_time_seconds is not None + opts["maxIdleTimeMS"] = self.__max_idle_time_seconds * 1000 if self.__wait_queue_timeout != WAIT_QUEUE_TIMEOUT: - opts['waitQueueTimeoutMS'] = self.__wait_queue_timeout * 1000 + assert self.__wait_queue_timeout is not None + opts["waitQueueTimeoutMS"] = self.__wait_queue_timeout * 1000 + if self.__max_connecting != MAX_CONNECTING: + opts["maxConnecting"] = self.__max_connecting return opts @property - def max_pool_size(self): + def max_pool_size(self) -> float: """The maximum allowable number of concurrent connections to each connected server. Requests to a server will block if there are `maxPoolSize` outstanding connections to the requested server. @@ -379,14 +587,25 @@ def max_pool_size(self): return self.__max_pool_size @property - def min_pool_size(self): + def min_pool_size(self) -> int: """The minimum required number of concurrent connections that the pool will maintain to each connected server. Default is 0. """ return self.__min_pool_size @property - def max_idle_time_seconds(self): + def max_connecting(self) -> int: + """The maximum number of concurrent connection creation attempts per + pool. Defaults to 2. + """ + return self.__max_connecting + + @property + def pause_enabled(self) -> bool: + return self.__pause_enabled + + @property + def max_idle_time_seconds(self) -> Optional[int]: """The maximum number of seconds that a connection can remain idle in the pool before being removed and replaced. Defaults to `None` (no limit). @@ -394,166 +613,326 @@ def max_idle_time_seconds(self): return self.__max_idle_time_seconds @property - def connect_timeout(self): - """How long a connection can take to be opened before timing out. - """ + def connect_timeout(self) -> Optional[float]: + """How long a connection can take to be opened before timing out.""" return self.__connect_timeout @property - def socket_timeout(self): - """How long a send or receive on a socket can take before timing out. - """ + def socket_timeout(self) -> Optional[float]: + """How long a send or receive on a socket can take before timing out.""" return self.__socket_timeout @property - def wait_queue_timeout(self): + def wait_queue_timeout(self) -> Optional[int]: """How long a thread will wait for a socket from the pool if the pool has no free sockets. """ return self.__wait_queue_timeout @property - def wait_queue_multiple(self): - """Multiplied by max_pool_size to give the number of threads allowed - to wait for a socket at one time. - """ - return self.__wait_queue_multiple - - @property - def ssl_context(self): - """An SSLContext instance or None. - """ + def _ssl_context(self) -> Optional[SSLContext]: + """An SSLContext instance or None.""" return self.__ssl_context @property - def ssl_match_hostname(self): - """Call ssl.match_hostname if cert_reqs is not ssl.CERT_NONE. - """ - return self.__ssl_match_hostname - - @property - def socket_keepalive(self): - """Whether to send periodic messages to determine if a connection - is closed. - """ - return self.__socket_keepalive + def tls_allow_invalid_hostnames(self) -> bool: + """If True skip ssl.match_hostname.""" + return self.__tls_allow_invalid_hostnames @property - def event_listeners(self): - """An instance of pymongo.monitoring._EventListeners. - """ + def _event_listeners(self) -> Optional[_EventListeners]: + """An instance of pymongo.monitoring._EventListeners.""" return self.__event_listeners @property - def appname(self): - """The application name, for sending with ismaster in server handshake. - """ + def appname(self) -> Optional[str]: + """The application name, for sending with hello in server handshake.""" return self.__appname @property - def driver(self): - """Driver name and version, for sending with ismaster in handshake. - """ + def driver(self) -> Optional[DriverInfo]: + """Driver name and version, for sending with hello in handshake.""" return self.__driver @property - def compression_settings(self): + def _compression_settings(self) -> Optional[CompressionSettings]: return self.__compression_settings @property - def metadata(self): - """A dict of metadata about the application, driver, os, and platform. - """ + def metadata(self) -> SON[str, Any]: + """A dict of metadata about the application, driver, os, and platform.""" return self.__metadata.copy() + @property + def server_api(self) -> Optional[ServerApi]: + """A pymongo.server_api.ServerApi or None.""" + return self.__server_api -class SocketInfo(object): - """Store a socket with some metadata. + @property + def load_balanced(self) -> Optional[bool]: + """True if this Pool is configured in load balanced mode.""" + return self.__load_balanced + + +class _CancellationContext: + def __init__(self) -> None: + self._cancelled = False + + def cancel(self) -> None: + """Cancel this context.""" + self._cancelled = True + + @property + def cancelled(self) -> bool: + """Was cancel called?""" + return self._cancelled + + +class Connection: + """Store a connection with some metadata. :Parameters: - - `sock`: a raw socket object + - `conn`: a raw connection object - `pool`: a Pool instance - `address`: the server's (host, port) - `id`: the id of this socket in it's pool """ - def __init__(self, sock, pool, address, id): - self.sock = sock + + def __init__( + self, conn: Union[socket.socket, _sslConn], pool: Pool, address: tuple[str, int], id: int + ): + self.pool_ref = weakref.ref(pool) + self.conn = conn self.address = address self.id = id - self.authset = set() self.closed = False - self.last_checkin_time = _time() + self.last_checkin_time = time.monotonic() self.performed_handshake = False - self.is_writable = False + self.is_writable: bool = False self.max_wire_version = MAX_WIRE_VERSION self.max_bson_size = MAX_BSON_SIZE self.max_message_size = MAX_MESSAGE_SIZE self.max_write_batch_size = MAX_WRITE_BATCH_SIZE self.supports_sessions = False + self.hello_ok: bool = False self.is_mongos = False self.op_msg_enabled = False - self.listeners = pool.opts.event_listeners + self.listeners = pool.opts._event_listeners self.enabled_for_cmap = pool.enabled_for_cmap - self.compression_settings = pool.opts.compression_settings - self.compression_context = None - - # The pool's pool_id changes with each reset() so we can close sockets - # created before the last reset. - self.pool_id = pool.pool_id + self.compression_settings = pool.opts._compression_settings + self.compression_context: Union[SnappyContext, ZlibContext, ZstdContext, None] = None + self.socket_checker: SocketChecker = SocketChecker() + self.oidc_token_gen_id: Optional[int] = None + # Support for mechanism negotiation on the initial handshake. + self.negotiated_mechs: Optional[list[str]] = None + self.auth_ctx: Optional[_AuthContext] = None + + # The pool's generation changes with each reset() so we can close + # sockets created before the last reset. + self.pool_gen = pool.gen + self.generation = self.pool_gen.get_overall() self.ready = False - - def ismaster(self, metadata, cluster_time): - cmd = SON([('ismaster', 1)]) - if not self.performed_handshake: - cmd['client'] = metadata + self.cancel_context: Optional[_CancellationContext] = None + if not pool.handshake: + # This is a Monitor connection. + self.cancel_context = _CancellationContext() + self.opts = pool.opts + self.more_to_come: bool = False + # For load balancer support. + self.service_id: Optional[ObjectId] = None + # When executing a transaction in load balancing mode, this flag is + # set to true to indicate that the session now owns the connection. + self.pinned_txn = False + self.pinned_cursor = False + self.active = False + self.last_timeout = self.opts.socket_timeout + self.connect_rtt = 0.0 + + def set_conn_timeout(self, timeout: Optional[float]) -> None: + """Cache last timeout to avoid duplicate calls to conn.settimeout.""" + if timeout == self.last_timeout: + return + self.last_timeout = timeout + self.conn.settimeout(timeout) + + def apply_timeout( + self, client: MongoClient, cmd: Optional[MutableMapping[str, Any]] + ) -> Optional[float]: + # CSOT: use remaining timeout when set. + timeout = _csot.remaining() + if timeout is None: + # Reset the socket timeout unless we're performing a streaming monitor check. + if not self.more_to_come: + self.set_conn_timeout(self.opts.socket_timeout) + return None + # RTT validation. + rtt = _csot.get_rtt() + if rtt is None: + rtt = self.connect_rtt + max_time_ms = timeout - rtt + if max_time_ms < 0: + timeout_details = _get_timeout_details(self.opts) + formatted = format_timeout_details(timeout_details) + # CSOT: raise an error without running the command since we know it will time out. + errmsg = f"operation would exceed time limit, remaining timeout:{timeout:.5f} <= network round trip time:{rtt:.5f} {formatted}" + raise ExecutionTimeout( + errmsg, + 50, + {"ok": 0, "errmsg": errmsg, "code": 50}, + self.max_wire_version, + ) + if cmd is not None: + cmd["maxTimeMS"] = int(max_time_ms * 1000) + self.set_conn_timeout(timeout) + return timeout + + def pin_txn(self) -> None: + self.pinned_txn = True + assert not self.pinned_cursor + + def pin_cursor(self) -> None: + self.pinned_cursor = True + assert not self.pinned_txn + + def unpin(self) -> None: + pool = self.pool_ref() + if pool: + pool.checkin(self) + else: + self.close_conn(ConnectionClosedReason.STALE) + + def hello_cmd(self) -> SON[str, Any]: + # Handshake spec requires us to use OP_MSG+hello command for the + # initial handshake in load balanced or stable API mode. + if self.opts.server_api or self.hello_ok or self.opts.load_balanced: + self.op_msg_enabled = True + return SON([(HelloCompat.CMD, 1)]) + else: + return SON([(HelloCompat.LEGACY_CMD, 1), ("helloOk", True)]) + + def hello(self) -> Hello[dict[str, Any]]: + return self._hello(None, None, None) + + def _hello( + self, + cluster_time: Optional[ClusterTime], + topology_version: Optional[Any], + heartbeat_frequency: Optional[int], + ) -> Hello[dict[str, Any]]: + cmd = self.hello_cmd() + performing_handshake = not self.performed_handshake + awaitable = False + if performing_handshake: + self.performed_handshake = True + cmd["client"] = self.opts.metadata if self.compression_settings: - cmd['compression'] = self.compression_settings.compressors - - if self.max_wire_version >= 6 and cluster_time is not None: - cmd['$clusterTime'] = cluster_time - - ismaster = IsMaster(self.command('admin', cmd, publish_events=False)) - self.is_writable = ismaster.is_writable - self.max_wire_version = ismaster.max_wire_version - self.max_bson_size = ismaster.max_bson_size - self.max_message_size = ismaster.max_message_size - self.max_write_batch_size = ismaster.max_write_batch_size - self.supports_sessions = ( - ismaster.logical_session_timeout_minutes is not None) - self.is_mongos = ismaster.server_type == SERVER_TYPE.Mongos - if not self.performed_handshake and self.compression_settings: - ctx = self.compression_settings.get_compression_context( - ismaster.compressors) + cmd["compression"] = self.compression_settings.compressors + if self.opts.load_balanced: + cmd["loadBalanced"] = True + elif topology_version is not None: + cmd["topologyVersion"] = topology_version + assert heartbeat_frequency is not None + cmd["maxAwaitTimeMS"] = int(heartbeat_frequency * 1000) + awaitable = True + # If connect_timeout is None there is no timeout. + if self.opts.connect_timeout: + self.set_conn_timeout(self.opts.connect_timeout + heartbeat_frequency) + + if not performing_handshake and cluster_time is not None: + cmd["$clusterTime"] = cluster_time + + creds = self.opts._credentials + if creds: + if creds.mechanism == "DEFAULT" and creds.username: + cmd["saslSupportedMechs"] = creds.source + "." + creds.username + auth_ctx = auth._AuthContext.from_credentials(creds, self.address) + if auth_ctx: + speculative_authenticate = auth_ctx.speculate_command() + if speculative_authenticate is not None: + cmd["speculativeAuthenticate"] = speculative_authenticate + else: + auth_ctx = None + + if performing_handshake: + start = time.monotonic() + doc = self.command("admin", cmd, publish_events=False, exhaust_allowed=awaitable) + if performing_handshake: + self.connect_rtt = time.monotonic() - start + hello = Hello(doc, awaitable=awaitable) + self.is_writable = hello.is_writable + self.max_wire_version = hello.max_wire_version + self.max_bson_size = hello.max_bson_size + self.max_message_size = hello.max_message_size + self.max_write_batch_size = hello.max_write_batch_size + self.supports_sessions = hello.logical_session_timeout_minutes is not None + self.hello_ok = hello.hello_ok + self.is_repl = hello.server_type in ( + SERVER_TYPE.RSPrimary, + SERVER_TYPE.RSSecondary, + SERVER_TYPE.RSArbiter, + SERVER_TYPE.RSOther, + SERVER_TYPE.RSGhost, + ) + self.is_standalone = hello.server_type == SERVER_TYPE.Standalone + self.is_mongos = hello.server_type == SERVER_TYPE.Mongos + if performing_handshake and self.compression_settings: + ctx = self.compression_settings.get_compression_context(hello.compressors) self.compression_context = ctx - self.performed_handshake = True - self.op_msg_enabled = ismaster.max_wire_version >= 6 - return ismaster - - def command(self, dbname, spec, slave_ok=False, - read_preference=ReadPreference.PRIMARY, - codec_options=DEFAULT_CODEC_OPTIONS, check=True, - allowable_errors=None, check_keys=False, - read_concern=None, - write_concern=None, - parse_write_concern_error=False, - collation=None, - session=None, - client=None, - retryable_write=False, - publish_events=True, - user_fields=None): + self.op_msg_enabled = True + if creds: + self.negotiated_mechs = hello.sasl_supported_mechs + if auth_ctx: + auth_ctx.parse_response(hello) # type:ignore[arg-type] + if auth_ctx.speculate_succeeded(): + self.auth_ctx = auth_ctx + if self.opts.load_balanced: + if not hello.service_id: + raise ConfigurationError( + "Driver attempted to initialize in load balancing mode," + " but the server does not support this mode" + ) + self.service_id = hello.service_id + self.generation = self.pool_gen.get(self.service_id) + return hello + + def _next_reply(self) -> dict[str, Any]: + reply = self.receive_message(None) + self.more_to_come = reply.more_to_come + unpacked_docs = reply.unpack_response() + response_doc = unpacked_docs[0] + helpers._check_command_response(response_doc, self.max_wire_version) + return response_doc + + @_handle_reauth + def command( + self, + dbname: str, + spec: MutableMapping[str, Any], + read_preference: _ServerMode = ReadPreference.PRIMARY, + codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_concern: Optional[ReadConcern] = None, + write_concern: Optional[WriteConcern] = None, + parse_write_concern_error: bool = False, + collation: Optional[_CollationIn] = None, + session: Optional[ClientSession] = None, + client: Optional[MongoClient] = None, + retryable_write: bool = False, + publish_events: bool = True, + user_fields: Optional[Mapping[str, Any]] = None, + exhaust_allowed: bool = False, + ) -> dict[str, Any]: """Execute a command or raise an error. :Parameters: - `dbname`: name of the database on which to run the command - `spec`: a command document as a dict, SON, or mapping object - - `slave_ok`: whether to set the SlaveOkay wire protocol bit - `read_preference`: a read preference - `codec_options`: a CodecOptions instance - `check`: raise OperationFailure if there are errors - `allowable_errors`: errors to ignore if `check` is True - - `check_keys`: if True, check `spec` for invalid keys - `read_concern`: The read concern for this command. - `write_concern`: The write concern for this command. - `parse_write_concern_error`: Whether to parse the @@ -571,109 +950,100 @@ def command(self, dbname, spec, slave_ok=False, session = _validate_session_write_concern(session, write_concern) # Ensure command name remains in first place. - if not isinstance(spec, ORDERED_TYPES): + if not isinstance(spec, ORDERED_TYPES): # type:ignore[arg-type] spec = SON(spec) - if (read_concern and self.max_wire_version < 4 - and not read_concern.ok_for_legacy): - raise ConfigurationError( - 'read concern level of %s is not valid ' - 'with a max wire version of %d.' - % (read_concern.level, self.max_wire_version)) - if not (write_concern is None or write_concern.acknowledged or - collation is None): - raise ConfigurationError( - 'Collation is unsupported for unacknowledged writes.') - if (self.max_wire_version >= 5 and - write_concern and - not write_concern.is_server_default): - spec['writeConcern'] = write_concern.document - elif self.max_wire_version < 5 and collation is not None: - raise ConfigurationError( - 'Must be connected to MongoDB 3.4+ to use a collation.') + if not (write_concern is None or write_concern.acknowledged or collation is None): + raise ConfigurationError("Collation is unsupported for unacknowledged writes.") + self.add_server_api(spec) if session: - session._apply_to(spec, retryable_write, read_preference) + session._apply_to(spec, retryable_write, read_preference, self) self.send_cluster_time(spec, session, client) listeners = self.listeners if publish_events else None - unacknowledged = write_concern and not write_concern.acknowledged + unacknowledged = bool(write_concern and not write_concern.acknowledged) if self.op_msg_enabled: self._raise_if_not_writable(unacknowledged) try: - return command(self.sock, dbname, spec, slave_ok, - self.is_mongos, read_preference, codec_options, - session, client, check, allowable_errors, - self.address, check_keys, listeners, - self.max_bson_size, read_concern, - parse_write_concern_error=parse_write_concern_error, - collation=collation, - compression_ctx=self.compression_context, - use_op_msg=self.op_msg_enabled, - unacknowledged=unacknowledged, - user_fields=user_fields) - except OperationFailure: + return command( + self, + dbname, + spec, + self.is_mongos, + read_preference, + codec_options, + session, + client, + check, + allowable_errors, + self.address, + listeners, + self.max_bson_size, + read_concern, + parse_write_concern_error=parse_write_concern_error, + collation=collation, + compression_ctx=self.compression_context, + use_op_msg=self.op_msg_enabled, + unacknowledged=unacknowledged, + user_fields=user_fields, + exhaust_allowed=exhaust_allowed, + write_concern=write_concern, + ) + except (OperationFailure, NotPrimaryError): raise # Catch socket.error, KeyboardInterrupt, etc. and close ourselves. except BaseException as error: self._raise_connection_failure(error) - def send_message(self, message, max_doc_size): + def send_message(self, message: bytes, max_doc_size: int) -> None: """Send a raw BSON message or raise ConnectionFailure. If a network exception is raised, the socket is closed. """ - if (self.max_bson_size is not None - and max_doc_size > self.max_bson_size): + if self.max_bson_size is not None and max_doc_size > self.max_bson_size: raise DocumentTooLarge( "BSON document too large (%d bytes) - the connected server " - "supports BSON document sizes up to %d bytes." % - (max_doc_size, self.max_bson_size)) + "supports BSON document sizes up to %d bytes." % (max_doc_size, self.max_bson_size) + ) try: - self.sock.sendall(message) + self.conn.sendall(message) except BaseException as error: self._raise_connection_failure(error) - def receive_message(self, request_id): + def receive_message(self, request_id: Optional[int]) -> Union[_OpReply, _OpMsg]: """Receive a raw BSON message or raise ConnectionFailure. If any exception is raised, the socket is closed. """ try: - return receive_message(self.sock, request_id, - self.max_message_size) + return receive_message(self, request_id, self.max_message_size) except BaseException as error: self._raise_connection_failure(error) - def _raise_if_not_writable(self, unacknowledged): - """Raise NotMasterError on unacknowledged write if this socket is not + def _raise_if_not_writable(self, unacknowledged: bool) -> None: + """Raise NotPrimaryError on unacknowledged write if this socket is not writable. """ if unacknowledged and not self.is_writable: - # Write won't succeed, bail as if we'd received a not master error. - raise NotMasterError("not master", { - "ok": 0, "errmsg": "not master", "code": 10107}) + # Write won't succeed, bail as if we'd received a not primary error. + raise NotPrimaryError("not primary", {"ok": 0, "errmsg": "not primary", "code": 10107}) - def legacy_write(self, request_id, msg, max_doc_size, with_last_error): - """Send OP_INSERT, etc., optionally returning response as a dict. + def unack_write(self, msg: bytes, max_doc_size: int) -> None: + """Send unack OP_MSG. - Can raise ConnectionFailure or OperationFailure. + Can raise ConnectionFailure or InvalidDocument. :Parameters: - - `request_id`: an int. - - `msg`: bytes, an OP_INSERT, OP_UPDATE, or OP_DELETE message, - perhaps with a getlasterror command appended. + - `msg`: bytes, an OP_MSG message. - `max_doc_size`: size in bytes of the largest document in `msg`. - - `with_last_error`: True if a getlasterror command is appended. """ - self._raise_if_not_writable(not with_last_error) - + self._raise_if_not_writable(True) self.send_message(msg, max_doc_size) - if with_last_error: - reply = self.receive_message(request_id) - return helpers._check_gle_response(reply.command_response()) - def write_command(self, request_id, msg): + def write_command( + self, request_id: int, msg: bytes, codec_options: CodecOptions + ) -> dict[str, Any]: """Send "insert" etc. command, returning response as a dict. Can raise ConnectionFailure or OperationFailure. @@ -684,99 +1054,97 @@ def write_command(self, request_id, msg): """ self.send_message(msg, 0) reply = self.receive_message(request_id) - result = reply.command_response() + result = reply.command_response(codec_options) - # Raises NotMasterError or OperationFailure. - helpers._check_command_response(result) + # Raises NotPrimaryError or OperationFailure. + helpers._check_command_response(result, self.max_wire_version) return result - def check_auth(self, all_credentials): - """Update this socket's authentication. - - Log in or out to bring this socket's credentials up to date with - those provided. Can raise ConnectionFailure or OperationFailure. + def authenticate(self, reauthenticate: bool = False) -> None: + """Authenticate to the server if needed. - :Parameters: - - `all_credentials`: dict, maps auth source to MongoCredential. + Can raise ConnectionFailure or OperationFailure. """ - if all_credentials or self.authset: - cached = set(itervalues(all_credentials)) - authset = self.authset.copy() - - # Logout any credentials that no longer exist in the cache. - for credentials in authset - cached: - auth.logout(credentials.source, self) - self.authset.discard(credentials) - - for credentials in cached - authset: - auth.authenticate(credentials, self) - self.authset.add(credentials) - # CMAP spec says to publish the ready event only after authenticating # the connection. + if reauthenticate: + if self.performed_handshake: + # Existing auth_ctx is stale, remove it. + self.auth_ctx = None + self.ready = False if not self.ready: + creds = self.opts._credentials + if creds: + auth.authenticate(creds, self, reauthenticate=reauthenticate) self.ready = True if self.enabled_for_cmap: + assert self.listeners is not None self.listeners.publish_connection_ready(self.address, self.id) - def authenticate(self, credentials): - """Log in to the server and store these credentials in `authset`. - - Can raise ConnectionFailure or OperationFailure. - - :Parameters: - - `credentials`: A MongoCredential. - """ - auth.authenticate(credentials, self) - self.authset.add(credentials) - - def validate_session(self, client, session): + def validate_session( + self, client: Optional[MongoClient], session: Optional[ClientSession] + ) -> None: """Validate this session before use with client. - Raises error if this session is logged in as a different user or - the client is not the one that created the session. + Raises error if the client is not the one that created the session. """ if session: if session._client is not client: - raise InvalidOperation( - 'Can only use session with the MongoClient that' - ' started it') - if session._authset != self.authset: - raise InvalidOperation( - 'Cannot use session after authenticating with different' - ' credentials') - - def close_socket(self, reason): + raise InvalidOperation("Can only use session with the MongoClient that started it") + + def close_conn(self, reason: Optional[str]) -> None: """Close this connection with a reason.""" + if self.closed: + return + self._close_conn() + if reason and self.enabled_for_cmap: + assert self.listeners is not None + self.listeners.publish_connection_closed(self.address, self.id, reason) + + def _close_conn(self) -> None: + """Close this connection.""" if self.closed: return self.closed = True - # Avoid exceptions on interpreter shutdown. + if self.cancel_context: + self.cancel_context.cancel() + # Note: We catch exceptions to avoid spurious errors on interpreter + # shutdown. try: - self.sock.close() - except Exception: + self.conn.close() + except Exception: # noqa: S110 pass - if reason and self.enabled_for_cmap: - self.listeners.publish_connection_closed( - self.address, self.id, reason) - - def send_cluster_time(self, command, session, client): - """Add cluster time for MongoDB >= 3.6.""" - if self.max_wire_version >= 6 and client: + def conn_closed(self) -> bool: + """Return True if we know socket has been closed, False otherwise.""" + return self.socket_checker.socket_closed(self.conn) + + def send_cluster_time( + self, + command: MutableMapping[str, Any], + session: Optional[ClientSession], + client: Optional[MongoClient], + ) -> None: + """Add $clusterTime.""" + if client: client._send_cluster_time(command, session) - def update_last_checkin_time(self): - self.last_checkin_time = _time() + def add_server_api(self, command: MutableMapping[str, Any]) -> None: + """Add server_api parameters.""" + if self.opts.server_api: + _add_to_command(command, self.opts.server_api) - def update_is_writable(self, is_writable): + def update_last_checkin_time(self) -> None: + self.last_checkin_time = time.monotonic() + + def update_is_writable(self, is_writable: bool) -> None: self.is_writable = is_writable - def idle_time_seconds(self): + def idle_time_seconds(self) -> float: """Seconds since this socket was last checked into its pool.""" - return _time() - self.last_checkin_time + return time.monotonic() - self.last_checkin_time - def _raise_connection_failure(self, error): + def _raise_connection_failure(self, error: BaseException) -> NoReturn: # Catch *all* exceptions from socket methods and close the socket. In # regular Python, socket operations only raise socket.error, even if # the underlying cause was a Ctrl-C: a signal raised during socket.recv @@ -790,30 +1158,38 @@ def _raise_connection_failure(self, error): # ...) is called in Python code, which experiences the signal as a # KeyboardInterrupt from the start, rather than as an initial # socket.error, so we catch that, close the socket, and reraise it. - self.close_socket(ConnectionClosedReason.ERROR) - if isinstance(error, socket.error): - _raise_connection_failure(self.address, error) + # + # The connection closed event will be emitted later in checkin. + if self.ready: + reason = None + else: + reason = ConnectionClosedReason.ERROR + self.close_conn(reason) + # SSLError from PyOpenSSL inherits directly from Exception. + if isinstance(error, (IOError, OSError, SSLError)): + details = _get_timeout_details(self.opts) + _raise_connection_failure(self.address, error, timeout_details=details) else: raise - def __eq__(self, other): - return self.sock == other.sock + def __eq__(self, other: Any) -> bool: + return self.conn == other.conn - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other - def __hash__(self): - return hash(self.sock) + def __hash__(self) -> int: + return hash(self.conn) - def __repr__(self): - return "SocketInfo(%s)%s at %s" % ( - repr(self.sock), + def __repr__(self) -> str: + return "Connection({}){} at {}".format( + repr(self.conn), self.closed and " CLOSED" or "", - id(self) + id(self), ) -def _create_connection(address, options): +def _create_connection(address: _Address, options: PoolOptions) -> socket.socket: """Given (host, port) and PoolOptions, connect and return a socket object. Can raise socket.error. @@ -823,17 +1199,16 @@ def _create_connection(address, options): host, port = address # Check if dealing with a unix domain socket - if host.endswith('.sock'): + if host.endswith(".sock"): if not hasattr(socket, "AF_UNIX"): - raise ConnectionFailure("UNIX-sockets are not supported " - "on this system") + raise ConnectionFailure("UNIX-sockets are not supported on this system") sock = socket.socket(socket.AF_UNIX) # SOCK_CLOEXEC not supported for Unix sockets. _set_non_inheritable_non_atomic(sock.fileno()) try: sock.connect(host) return sock - except socket.error: + except OSError: sock.close() raise @@ -841,7 +1216,7 @@ def _create_connection(address, options): # is 'localhost' (::1 is fine). Avoids slow connect issues # like PYTHON-356. family = socket.AF_INET - if socket.has_ipv6 and host != 'localhost': + if socket.has_ipv6 and host != "localhost": family = socket.AF_UNSPEC err = None @@ -851,9 +1226,8 @@ def _create_connection(address, options): # number of platforms (newer Linux and *BSD). Starting with CPython 3.4 # all file descriptors are created non-inheritable. See PEP 446. try: - sock = socket.socket( - af, socktype | getattr(socket, 'SOCK_CLOEXEC', 0), proto) - except socket.error: + sock = socket.socket(af, socktype | getattr(socket, "SOCK_CLOEXEC", 0), proto) + except OSError: # Can SOCK_CLOEXEC be defined even if the kernel doesn't support # it? sock = socket.socket(af, socktype, proto) @@ -861,14 +1235,18 @@ def _create_connection(address, options): _set_non_inheritable_non_atomic(sock.fileno()) try: sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - sock.settimeout(options.connect_timeout) - sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, - options.socket_keepalive) - if options.socket_keepalive: - _set_keepalive_times(sock) + # CSOT: apply timeout to socket connect. + timeout = _csot.remaining() + if timeout is None: + timeout = options.connect_timeout + elif timeout <= 0: + raise socket.timeout("timed out") + sock.settimeout(timeout) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, True) + _set_keepalive_times(sock) sock.connect(sa) return sock - except socket.error as e: + except OSError as e: err = e sock.close() @@ -879,359 +1257,584 @@ def _create_connection(address, options): # host with an OS/kernel or Python interpreter that doesn't # support IPv6. The test case is Jython2.5.1 which doesn't # support IPv6 at all. - raise socket.error('getaddrinfo failed') + raise OSError("getaddrinfo failed") -_PY37PLUS = sys.version_info[:2] >= (3, 7) - - -def _configured_socket(address, options): +def _configured_socket(address: _Address, options: PoolOptions) -> Union[socket.socket, _sslConn]: """Given (host, port) and PoolOptions, return a configured socket. - Can raise socket.error, ConnectionFailure, or CertificateError. + Can raise socket.error, ConnectionFailure, or _CertificateError. Sets socket's SSL and timeout options. """ sock = _create_connection(address, options) - ssl_context = options.ssl_context + ssl_context = options._ssl_context + + if ssl_context is None: + sock.settimeout(options.socket_timeout) + return sock - if ssl_context is not None: - host = address[0] + host = address[0] + try: + # We have to pass hostname / ip address to wrap_socket + # to use SSLContext.check_hostname. + if HAS_SNI: + ssl_sock = ssl_context.wrap_socket(sock, server_hostname=host) + else: + ssl_sock = ssl_context.wrap_socket(sock) + except _CertificateError: + sock.close() + # Raise _CertificateError directly like we do after match_hostname + # below. + raise + except (OSError, SSLError) as exc: + sock.close() + # We raise AutoReconnect for transient and permanent SSL handshake + # failures alike. Permanent handshake failures, like protocol + # mismatch, will be turned into ServerSelectionTimeoutErrors later. + details = _get_timeout_details(options) + _raise_connection_failure(address, exc, "SSL handshake failed: ", timeout_details=details) + if ( + ssl_context.verify_mode + and not ssl_context.check_hostname + and not options.tls_allow_invalid_hostnames + ): try: - # According to RFC6066, section 3, IPv4 and IPv6 literals are - # not permitted for SNI hostname. - # Previous to Python 3.7 wrap_socket would blindly pass - # IP addresses as SNI hostname. - # https://bugs.python.org/issue32185 - # We have to pass hostname / ip address to wrap_socket - # to use SSLContext.check_hostname. - if _HAVE_SNI and (not is_ip_address(host) or _PY37PLUS): - sock = ssl_context.wrap_socket(sock, server_hostname=host) - else: - sock = ssl_context.wrap_socket(sock) - except _SSLCertificateError: - sock.close() - # Raise CertificateError directly like we do after match_hostname - # below. + ssl.match_hostname(ssl_sock.getpeercert(), hostname=host) + except _CertificateError: + ssl_sock.close() raise - except IOError as exc: - sock.close() - # We raise AutoReconnect for transient and permanent SSL handshake - # failures alike. Permanent handshake failures, like protocol - # mismatch, will be turned into ServerSelectionTimeoutErrors later. - _raise_connection_failure(address, exc, "SSL handshake failed: ") - if (ssl_context.verify_mode and not - getattr(ssl_context, "check_hostname", False) and - options.ssl_match_hostname): - try: - match_hostname(sock.getpeercert(), hostname=host) - except CertificateError: - sock.close() - raise - sock.settimeout(options.socket_timeout) - return sock + ssl_sock.settimeout(options.socket_timeout) + return ssl_sock class _PoolClosedError(PyMongoError): """Internal error raised when a thread tries to get a connection from a closed pool. """ - pass + + +class _PoolGeneration: + def __init__(self) -> None: + # Maps service_id to generation. + self._generations: dict[ObjectId, int] = collections.defaultdict(int) + # Overall pool generation. + self._generation = 0 + + def get(self, service_id: Optional[ObjectId]) -> int: + """Get the generation for the given service_id.""" + if service_id is None: + return self._generation + return self._generations[service_id] + + def get_overall(self) -> int: + """Get the Pool's overall generation.""" + return self._generation + + def inc(self, service_id: Optional[ObjectId]) -> None: + """Increment the generation for the given service_id.""" + self._generation += 1 + if service_id is None: + for service_id in self._generations: + self._generations[service_id] += 1 + else: + self._generations[service_id] += 1 + + def stale(self, gen: int, service_id: Optional[ObjectId]) -> bool: + """Return if the given generation for a given service_id is stale.""" + return gen != self.get(service_id) + + +class PoolState: + PAUSED = 1 + READY = 2 + CLOSED = 3 # Do *not* explicitly inherit from object or Jython won't call __del__ # http://bugs.jython.org/issue1057 class Pool: - def __init__(self, address, options, handshake=True): + def __init__(self, address: _Address, options: PoolOptions, handshake: bool = True): """ :Parameters: - `address`: a (hostname, port) tuple - `options`: a PoolOptions instance - - `handshake`: whether to call ismaster for each new SocketInfo + - `handshake`: whether to call hello for each new Connection """ + if options.pause_enabled: + self.state = PoolState.PAUSED + else: + self.state = PoolState.READY # Check a socket's health with socket_closed() every once in a while. # Can override for testing: 0 to always check, None to never check. self._check_interval_seconds = 1 # LIFO pool. Sockets are ordered on idle time. Sockets claimed # and returned to pool from the left side. Stale sockets removed # from the right side. - self.sockets = collections.deque() - self.lock = threading.Lock() + self.conns: collections.deque = collections.deque() + self.lock = _create_lock() self.active_sockets = 0 # Monotonically increasing connection ID required for CMAP Events. self.next_connection_id = 1 - self.closed = False # Track whether the sockets in this pool are writeable or not. - self.is_writable = None + self.is_writable: Optional[bool] = None # Keep track of resets, so we notice sockets created before the most # recent reset and close them. - self.pool_id = 0 + # self.generation = 0 + self.gen = _PoolGeneration() self.pid = os.getpid() self.address = address self.opts = options self.handshake = handshake # Don't publish events in Monitor pools. self.enabled_for_cmap = ( - self.handshake and - self.opts.event_listeners is not None and - self.opts.event_listeners.enabled_for_cmap) - - if (self.opts.wait_queue_multiple is None or - self.opts.max_pool_size is None): - max_waiters = None - else: - max_waiters = ( - self.opts.max_pool_size * self.opts.wait_queue_multiple) + self.handshake + and self.opts._event_listeners is not None + and self.opts._event_listeners.enabled_for_cmap + ) - self._socket_semaphore = thread_util.create_semaphore( - self.opts.max_pool_size, max_waiters) - self.socket_checker = SocketChecker() + # The first portion of the wait queue. + # Enforces: maxPoolSize + # Also used for: clearing the wait queue + self.size_cond = threading.Condition(self.lock) + self.requests = 0 + self.max_pool_size = self.opts.max_pool_size + if not self.max_pool_size: + self.max_pool_size = float("inf") + # The second portion of the wait queue. + # Enforces: maxConnecting + # Also used for: clearing the wait queue + self._max_connecting_cond = threading.Condition(self.lock) + self._max_connecting = self.opts.max_connecting + self._pending = 0 if self.enabled_for_cmap: - self.opts.event_listeners.publish_pool_created( - self.address, self.opts.non_default_options) - - def _reset(self, close): + assert self.opts._event_listeners is not None + self.opts._event_listeners.publish_pool_created( + self.address, self.opts.non_default_options + ) + # Similar to active_sockets but includes threads in the wait queue. + self.operation_count = 0 + # Retain references to pinned connections to prevent the CPython GC + # from thinking that a cursor's pinned connection can be GC'd when the + # cursor is GC'd (see PYTHON-2751). + self.__pinned_sockets: set[Connection] = set() + self.ncursors = 0 + self.ntxns = 0 + + def ready(self) -> None: + # Take the lock to avoid the race condition described in PYTHON-2699. with self.lock: + if self.state != PoolState.READY: + self.state = PoolState.READY + if self.enabled_for_cmap: + assert self.opts._event_listeners is not None + self.opts._event_listeners.publish_pool_ready(self.address) + + @property + def closed(self) -> bool: + return self.state == PoolState.CLOSED + + def _reset( + self, close: bool, pause: bool = True, service_id: Optional[ObjectId] = None + ) -> None: + old_state = self.state + with self.size_cond: if self.closed: return - self.pool_id += 1 - self.pid = os.getpid() - sockets, self.sockets = self.sockets, collections.deque() - self.active_sockets = 0 + if self.opts.pause_enabled and pause and not self.opts.load_balanced: + old_state, self.state = self.state, PoolState.PAUSED + self.gen.inc(service_id) + newpid = os.getpid() + if self.pid != newpid: + self.pid = newpid + self.active_sockets = 0 + self.operation_count = 0 + if service_id is None: + sockets, self.conns = self.conns, collections.deque() + else: + discard: collections.deque = collections.deque() + keep: collections.deque = collections.deque() + for conn in self.conns: + if conn.service_id == service_id: + discard.append(conn) + else: + keep.append(conn) + sockets = discard + self.conns = keep + if close: - self.closed = True + self.state = PoolState.CLOSED + # Clear the wait queue + self._max_connecting_cond.notify_all() + self.size_cond.notify_all() - listeners = self.opts.event_listeners + listeners = self.opts._event_listeners # CMAP spec says that close() MUST close sockets before publishing the # PoolClosedEvent but that reset() SHOULD close sockets *after* # publishing the PoolClearedEvent. if close: - for sock_info in sockets: - sock_info.close_socket(ConnectionClosedReason.POOL_CLOSED) + for conn in sockets: + conn.close_conn(ConnectionClosedReason.POOL_CLOSED) if self.enabled_for_cmap: + assert listeners is not None listeners.publish_pool_closed(self.address) else: - if self.enabled_for_cmap: - listeners.publish_pool_cleared(self.address) - for sock_info in sockets: - sock_info.close_socket(ConnectionClosedReason.STALE) + if old_state != PoolState.PAUSED and self.enabled_for_cmap: + assert listeners is not None + listeners.publish_pool_cleared(self.address, service_id=service_id) + for conn in sockets: + conn.close_conn(ConnectionClosedReason.STALE) - def update_is_writable(self, is_writable): + def update_is_writable(self, is_writable: Optional[bool]) -> None: """Updates the is_writable attribute on all sockets currently in the Pool. """ self.is_writable = is_writable with self.lock: - for socket in self.sockets: - socket.update_is_writable(self.is_writable) + for _socket in self.conns: + _socket.update_is_writable(self.is_writable) - def reset(self): - self._reset(close=False) + def reset(self, service_id: Optional[ObjectId] = None) -> None: + self._reset(close=False, service_id=service_id) - def close(self): + def reset_without_pause(self) -> None: + self._reset(close=False, pause=False) + + def close(self) -> None: self._reset(close=True) - def remove_stale_sockets(self, reference_pool_id): + def stale_generation(self, gen: int, service_id: Optional[ObjectId]) -> bool: + return self.gen.stale(gen, service_id) + + def remove_stale_sockets(self, reference_generation: int) -> None: """Removes stale sockets then adds new ones if pool is too small and - has not been reset. The `reference_pool_id` argument specifies the - `pool_id` at the point in time this operation was requested on the + has not been reset. The `reference_generation` argument specifies the + `generation` at the point in time this operation was requested on the pool. """ + # Take the lock to avoid the race condition described in PYTHON-2699. + with self.lock: + if self.state != PoolState.READY: + return + if self.opts.max_idle_time_seconds is not None: with self.lock: - while (self.sockets and - self.sockets[-1].idle_time_seconds() > self.opts.max_idle_time_seconds): - sock_info = self.sockets.pop() - sock_info.close_socket(ConnectionClosedReason.IDLE) + while ( + self.conns + and self.conns[-1].idle_time_seconds() > self.opts.max_idle_time_seconds + ): + conn = self.conns.pop() + conn.close_conn(ConnectionClosedReason.IDLE) while True: - with self.lock: - if (len(self.sockets) + self.active_sockets >= - self.opts.min_pool_size): - # There are enough sockets in the pool. - break - - # We must acquire the semaphore to respect max_pool_size. - if not self._socket_semaphore.acquire(False): - break + with self.size_cond: + # There are enough sockets in the pool. + if len(self.conns) + self.active_sockets >= self.opts.min_pool_size: + return + if self.requests >= self.opts.min_pool_size: + return + self.requests += 1 + incremented = False try: - sock_info = self.connect() + with self._max_connecting_cond: + # If maxConnecting connections are already being created + # by this pool then try again later instead of waiting. + if self._pending >= self._max_connecting: + return + self._pending += 1 + incremented = True + conn = self.connect() with self.lock: # Close connection and return if the pool was reset during # socket creation or while acquiring the pool lock. - if self.pool_id != reference_pool_id: - sock_info.close_socket(ConnectionClosedReason.STALE) - break - self.sockets.appendleft(sock_info) + if self.gen.get_overall() != reference_generation: + conn.close_conn(ConnectionClosedReason.STALE) + return + self.conns.appendleft(conn) finally: - self._socket_semaphore.release() + if incremented: + # Notify after adding the socket to the pool. + with self._max_connecting_cond: + self._pending -= 1 + self._max_connecting_cond.notify() - def connect(self): - """Connect to Mongo and return a new SocketInfo. + with self.size_cond: + self.requests -= 1 + self.size_cond.notify() - Can raise ConnectionFailure or CertificateError. + def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connection: + """Connect to Mongo and return a new Connection. + + Can raise ConnectionFailure. Note that the pool does not keep a reference to the socket -- you - must call return_socket() when you're done with it. + must call checkin() when you're done with it. """ with self.lock: conn_id = self.next_connection_id self.next_connection_id += 1 - listeners = self.opts.event_listeners + listeners = self.opts._event_listeners if self.enabled_for_cmap: + assert listeners is not None listeners.publish_connection_created(self.address, conn_id) - sock = None try: sock = _configured_socket(self.address, self.opts) - except socket.error as error: - if sock is not None: - sock.close() - + except BaseException as error: if self.enabled_for_cmap: + assert listeners is not None listeners.publish_connection_closed( - self.address, conn_id, ConnectionClosedReason.ERROR) + self.address, conn_id, ConnectionClosedReason.ERROR + ) - _raise_connection_failure(self.address, error) + if isinstance(error, (IOError, OSError, SSLError)): + details = _get_timeout_details(self.opts) + _raise_connection_failure(self.address, error, timeout_details=details) - sock_info = SocketInfo(sock, self, self.address, conn_id) - if self.handshake: - sock_info.ismaster(self.opts.metadata, None) - self.is_writable = sock_info.is_writable + raise - return sock_info + conn = Connection(sock, self, self.address, conn_id) # type: ignore[arg-type] + try: + if self.handshake: + conn.hello() + self.is_writable = conn.is_writable + if handler: + handler.contribute_socket(conn, completed_handshake=False) + + conn.authenticate() + except BaseException: + conn.close_conn(ConnectionClosedReason.ERROR) + raise + + return conn @contextlib.contextmanager - def get_socket(self, all_credentials, checkout=False): - """Get a socket from the pool. Use with a "with" statement. + def checkout(self, handler: Optional[_MongoClientErrorHandler] = None) -> Iterator[Connection]: + """Get a connection from the pool. Use with a "with" statement. - Returns a :class:`SocketInfo` object wrapping a connected + Returns a :class:`Connection` object wrapping a connected :class:`socket.socket`. This method should always be used in a with-statement:: - with pool.get_socket(credentials, checkout) as socket_info: - socket_info.send_message(msg) - data = socket_info.receive_message(op_code, request_id) - - The socket is logged in or out as needed to match ``all_credentials`` - using the correct authentication mechanism for the server's wire - protocol version. + with pool.get_conn() as connection: + connection.send_message(msg) + data = connection.receive_message(op_code, request_id) Can raise ConnectionFailure or OperationFailure. :Parameters: - - `all_credentials`: dict, maps auth source to MongoCredential. - - `checkout` (optional): keep socket checked out. + - `handler` (optional): A _MongoClientErrorHandler. """ - listeners = self.opts.event_listeners + listeners = self.opts._event_listeners if self.enabled_for_cmap: + assert listeners is not None listeners.publish_connection_check_out_started(self.address) - # First get a socket, then attempt authentication. Simplifies - # semaphore management in the face of network errors during auth. - sock_info = self._get_socket_no_auth() - checked_auth = False + + conn = self._get_conn(handler=handler) + + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_checked_out(self.address, conn.id) try: - sock_info.check_auth(all_credentials) - checked_auth = True - if self.enabled_for_cmap: - listeners.publish_connection_checked_out( - self.address, sock_info.id) - yield sock_info - except: - # Exception in caller. Decrement semaphore. - self.return_socket(sock_info, publish_checkin=checked_auth) - if self.enabled_for_cmap and not checked_auth: - self.opts.event_listeners.publish_connection_check_out_failed( - self.address, ConnectionCheckOutFailedReason.CONN_ERROR) + yield conn + except BaseException: + # Exception in caller. Ensure the connection gets returned. + # Note that when pinned is True, the session owns the + # connection and it is responsible for checking the connection + # back into the pool. + pinned = conn.pinned_txn or conn.pinned_cursor + if handler: + # Perform SDAM error handling rules while the connection is + # still checked out. + exc_type, exc_val, _ = sys.exc_info() + handler.handle(exc_type, exc_val) + if not pinned and conn.active: + self.checkin(conn) raise - else: - if not checkout: - self.return_socket(sock_info) - - def _get_socket_no_auth(self): - """Get or create a SocketInfo. Can raise ConnectionFailure.""" + if conn.pinned_txn: + with self.lock: + self.__pinned_sockets.add(conn) + self.ntxns += 1 + elif conn.pinned_cursor: + with self.lock: + self.__pinned_sockets.add(conn) + self.ncursors += 1 + elif conn.active: + self.checkin(conn) + + def _raise_if_not_ready(self, emit_event: bool) -> None: + if self.state != PoolState.READY: + if self.enabled_for_cmap and emit_event: + assert self.opts._event_listeners is not None + self.opts._event_listeners.publish_connection_check_out_failed( + self.address, ConnectionCheckOutFailedReason.CONN_ERROR + ) + details = _get_timeout_details(self.opts) + _raise_connection_failure( + self.address, AutoReconnect("connection pool paused"), timeout_details=details + ) + + def _get_conn(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connection: + """Get or create a Connection. Can raise ConnectionFailure.""" # We use the pid here to avoid issues with fork / multiprocessing. # See test.test_client:TestClient.test_fork for an example of # what could go wrong otherwise if self.pid != os.getpid(): - self.reset() + self.reset_without_pause() if self.closed: if self.enabled_for_cmap: - self.opts.event_listeners.publish_connection_check_out_failed( - self.address, ConnectionCheckOutFailedReason.POOL_CLOSED) + assert self.opts._event_listeners is not None + self.opts._event_listeners.publish_connection_check_out_failed( + self.address, ConnectionCheckOutFailedReason.POOL_CLOSED + ) raise _PoolClosedError( - 'Attempted to check out a connection from closed connection ' - 'pool') + "Attempted to check out a connection from closed connection pool" + ) - # Get a free socket or create one. - if not self._socket_semaphore.acquire( - True, self.opts.wait_queue_timeout): - self._raise_wait_queue_timeout() with self.lock: - self.active_sockets += 1 + self.operation_count += 1 + + # Get a free socket or create one. + if _csot.get_timeout(): + deadline = _csot.get_deadline() + elif self.opts.wait_queue_timeout: + deadline = time.monotonic() + self.opts.wait_queue_timeout + else: + deadline = None + + with self.size_cond: + self._raise_if_not_ready(emit_event=True) + while not (self.requests < self.max_pool_size): + if not _cond_wait(self.size_cond, deadline): + # Timed out, notify the next thread to ensure a + # timeout doesn't consume the condition. + if self.requests < self.max_pool_size: + self.size_cond.notify() + self._raise_wait_queue_timeout() + self._raise_if_not_ready(emit_event=True) + self.requests += 1 # We've now acquired the semaphore and must release it on error. + conn = None + incremented = False + emitted_event = False try: - sock_info = None - while sock_info is None: - try: - with self.lock: - sock_info = self.sockets.popleft() - except IndexError: - # Can raise ConnectionFailure or CertificateError. - sock_info = self.connect() - else: - if self._perished(sock_info): - sock_info = None - except Exception: - self._socket_semaphore.release() with self.lock: - self.active_sockets -= 1 - - if self.enabled_for_cmap: - self.opts.event_listeners.publish_connection_check_out_failed( - self.address, ConnectionCheckOutFailedReason.CONN_ERROR) + self.active_sockets += 1 + incremented = True + + while conn is None: + # CMAP: we MUST wait for either maxConnecting OR for a socket + # to be checked back into the pool. + with self._max_connecting_cond: + self._raise_if_not_ready(emit_event=False) + while not (self.conns or self._pending < self._max_connecting): + if not _cond_wait(self._max_connecting_cond, deadline): + # Timed out, notify the next thread to ensure a + # timeout doesn't consume the condition. + if self.conns or self._pending < self._max_connecting: + self._max_connecting_cond.notify() + emitted_event = True + self._raise_wait_queue_timeout() + self._raise_if_not_ready(emit_event=False) + + try: + conn = self.conns.popleft() + except IndexError: + self._pending += 1 + if conn: # We got a socket from the pool + if self._perished(conn): + conn = None + continue + else: # We need to create a new connection + try: + conn = self.connect(handler=handler) + finally: + with self._max_connecting_cond: + self._pending -= 1 + self._max_connecting_cond.notify() + except BaseException: + if conn: + # We checked out a socket but authentication failed. + conn.close_conn(ConnectionClosedReason.ERROR) + with self.size_cond: + self.requests -= 1 + if incremented: + self.active_sockets -= 1 + self.size_cond.notify() + + if self.enabled_for_cmap and not emitted_event: + assert self.opts._event_listeners is not None + self.opts._event_listeners.publish_connection_check_out_failed( + self.address, ConnectionCheckOutFailedReason.CONN_ERROR + ) raise - return sock_info + conn.active = True + return conn - def return_socket(self, sock_info, publish_checkin=True): - """Return the socket to the pool, or if it's closed discard it. + def checkin(self, conn: Connection) -> None: + """Return the connection to the pool, or if it's closed discard it. :Parameters: - - `sock_info`: The socket to check into the pool. - - `publish_checkin`: If False, a ConnectionCheckedInEvent will not - be published. + - `conn`: The connection to check into the pool. """ - listeners = self.opts.event_listeners - if self.enabled_for_cmap and publish_checkin: - listeners.publish_connection_checked_in(self.address, sock_info.id) + txn = conn.pinned_txn + cursor = conn.pinned_cursor + conn.active = False + conn.pinned_txn = False + conn.pinned_cursor = False + self.__pinned_sockets.discard(conn) + listeners = self.opts._event_listeners + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_checked_in(self.address, conn.id) if self.pid != os.getpid(): - self.reset() + self.reset_without_pause() else: if self.closed: - sock_info.close_socket(ConnectionClosedReason.POOL_CLOSED) - elif sock_info.pool_id != self.pool_id: - sock_info.close_socket(ConnectionClosedReason.STALE) - elif not sock_info.closed: - sock_info.update_last_checkin_time() - sock_info.update_is_writable(self.is_writable) + conn.close_conn(ConnectionClosedReason.POOL_CLOSED) + elif conn.closed: + # CMAP requires the closed event be emitted after the check in. + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_closed( + self.address, conn.id, ConnectionClosedReason.ERROR + ) + else: with self.lock: - self.sockets.appendleft(sock_info) - - self._socket_semaphore.release() - with self.lock: + # Hold the lock to ensure this section does not race with + # Pool.reset(). + if self.stale_generation(conn.generation, conn.service_id): + conn.close_conn(ConnectionClosedReason.STALE) + else: + conn.update_last_checkin_time() + conn.update_is_writable(bool(self.is_writable)) + self.conns.appendleft(conn) + # Notify any threads waiting to create a connection. + self._max_connecting_cond.notify() + + with self.size_cond: + if txn: + self.ntxns -= 1 + elif cursor: + self.ncursors -= 1 + self.requests -= 1 self.active_sockets -= 1 + self.operation_count -= 1 + self.size_cond.notify() + + def _perished(self, conn: Connection) -> bool: + """Return True and close the connection if it is "perished". - def _perished(self, sock_info): - """This side-effecty function checks if this socket has been idle for + This side-effecty function checks if this socket has been idle for for longer than the max idle time, or if the socket has been closed by - some external network error. + some external network error, or if the socket's generation is outdated. Checking sockets lets us avoid seeing *some* :class:`~pymongo.errors.AutoReconnect` exceptions on server @@ -1240,35 +1843,58 @@ def _perished(self, sock_info): pool, to keep performance reasonable - we can't avoid AutoReconnects completely anyway. """ - idle_time_seconds = sock_info.idle_time_seconds() + idle_time_seconds = conn.idle_time_seconds() # If socket is idle, open a new one. - if (self.opts.max_idle_time_seconds is not None and - idle_time_seconds > self.opts.max_idle_time_seconds): - sock_info.close_socket(ConnectionClosedReason.IDLE) + if ( + self.opts.max_idle_time_seconds is not None + and idle_time_seconds > self.opts.max_idle_time_seconds + ): + conn.close_conn(ConnectionClosedReason.IDLE) return True - if (self._check_interval_seconds is not None and ( - 0 == self._check_interval_seconds or - idle_time_seconds > self._check_interval_seconds)): - if self.socket_checker.socket_closed(sock_info.sock): - sock_info.close_socket(ConnectionClosedReason.ERROR) + if self._check_interval_seconds is not None and ( + self._check_interval_seconds == 0 or idle_time_seconds > self._check_interval_seconds + ): + if conn.conn_closed(): + conn.close_conn(ConnectionClosedReason.ERROR) return True + if self.stale_generation(conn.generation, conn.service_id): + conn.close_conn(ConnectionClosedReason.STALE) + return True + return False - def _raise_wait_queue_timeout(self): - listeners = self.opts.event_listeners + def _raise_wait_queue_timeout(self) -> NoReturn: + listeners = self.opts._event_listeners if self.enabled_for_cmap: + assert listeners is not None listeners.publish_connection_check_out_failed( - self.address, ConnectionCheckOutFailedReason.TIMEOUT) - raise ConnectionFailure( - 'Timed out while checking out a connection from connection pool ' - 'with max_size %r and wait_queue_timeout %r' % ( - self.opts.max_pool_size, self.opts.wait_queue_timeout)) + self.address, ConnectionCheckOutFailedReason.TIMEOUT + ) + timeout = _csot.get_timeout() or self.opts.wait_queue_timeout + if self.opts.load_balanced: + other_ops = self.active_sockets - self.ncursors - self.ntxns + raise WaitQueueTimeoutError( + "Timeout waiting for connection from the connection pool. " + "maxPoolSize: {}, connections in use by cursors: {}, " + "connections in use by transactions: {}, connections in use " + "by other operations: {}, timeout: {}".format( + self.opts.max_pool_size, + self.ncursors, + self.ntxns, + other_ops, + timeout, + ) + ) + raise WaitQueueTimeoutError( + "Timed out while checking out a connection from connection pool. " + f"maxPoolSize: {self.opts.max_pool_size}, timeout: {timeout}" + ) - def __del__(self): + def __del__(self) -> None: # Avoid ResourceWarnings in Python 3 # Close all sockets without calling reset() or close() because it is # not safe to acquire a lock in __del__. - for sock_info in self.sockets: - sock_info.close_socket(None) + for conn in self.conns: + conn.close_conn(None) diff --git a/pymongo/py.typed b/pymongo/py.typed new file mode 100644 index 0000000000..0f4057061a --- /dev/null +++ b/pymongo/py.typed @@ -0,0 +1,2 @@ +# PEP-561 Support File. +# "Package maintainers who wish to support type checking of their code MUST add a marker file named py.typed to their package supporting typing". diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py new file mode 100644 index 0000000000..6657937e99 --- /dev/null +++ b/pymongo/pyopenssl_context.py @@ -0,0 +1,414 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""A CPython compatible SSLContext implementation wrapping PyOpenSSL's +context. +""" +from __future__ import annotations + +import socket as _socket +import ssl as _stdlibssl +import sys as _sys +import time as _time +from errno import EINTR as _EINTR +from ipaddress import ip_address as _ip_address +from typing import TYPE_CHECKING, Any, Callable, Optional, TypeVar, Union + +from cryptography.x509 import load_der_x509_certificate as _load_der_x509_certificate +from OpenSSL import SSL as _SSL +from OpenSSL import crypto as _crypto +from service_identity import CertificateError as _SICertificateError +from service_identity import VerificationError as _SIVerificationError +from service_identity.pyopenssl import verify_hostname as _verify_hostname +from service_identity.pyopenssl import verify_ip_address as _verify_ip_address + +from pymongo.errors import ConfigurationError as _ConfigurationError +from pymongo.errors import _CertificateError +from pymongo.ocsp_cache import _OCSPCache +from pymongo.ocsp_support import _load_trusted_ca_certs, _ocsp_callback +from pymongo.socket_checker import SocketChecker as _SocketChecker +from pymongo.socket_checker import _errno_from_exception +from pymongo.write_concern import validate_boolean + +if TYPE_CHECKING: + from ssl import VerifyMode + + from cryptography.x509 import Certificate + +_T = TypeVar("_T") + +try: + import certifi + + _HAVE_CERTIFI = True +except ImportError: + _HAVE_CERTIFI = False + +PROTOCOL_SSLv23 = _SSL.SSLv23_METHOD +# Always available +OP_NO_SSLv2 = _SSL.OP_NO_SSLv2 +OP_NO_SSLv3 = _SSL.OP_NO_SSLv3 +OP_NO_COMPRESSION = _SSL.OP_NO_COMPRESSION +# This isn't currently documented for PyOpenSSL +OP_NO_RENEGOTIATION = getattr(_SSL, "OP_NO_RENEGOTIATION", 0) + +# Always available +HAS_SNI = True +IS_PYOPENSSL = True + +# Base Exception class +SSLError = _SSL.Error + +# https://github.com/python/cpython/blob/v3.8.0/Modules/_ssl.c#L2995-L3002 +_VERIFY_MAP = { + _stdlibssl.CERT_NONE: _SSL.VERIFY_NONE, + _stdlibssl.CERT_OPTIONAL: _SSL.VERIFY_PEER, + _stdlibssl.CERT_REQUIRED: _SSL.VERIFY_PEER | _SSL.VERIFY_FAIL_IF_NO_PEER_CERT, +} + +_REVERSE_VERIFY_MAP = {value: key for key, value in _VERIFY_MAP.items()} + + +# For SNI support. According to RFC6066, section 3, IPv4 and IPv6 literals are +# not permitted for SNI hostname. +def _is_ip_address(address: Any) -> bool: + try: + _ip_address(address) + return True + except (ValueError, UnicodeError): + return False + + +# According to the docs for socket.send it can raise +# WantX509LookupError and should be retried. +BLOCKING_IO_ERRORS = (_SSL.WantReadError, _SSL.WantWriteError, _SSL.WantX509LookupError) + + +def _ragged_eof(exc: BaseException) -> bool: + """Return True if the OpenSSL.SSL.SysCallError is a ragged EOF.""" + return exc.args == (-1, "Unexpected EOF") + + +# https://github.com/pyca/pyopenssl/issues/168 +# https://github.com/pyca/pyopenssl/issues/176 +# https://docs.python.org/3/library/ssl.html#notes-on-non-blocking-sockets +class _sslConn(_SSL.Connection): + def __init__( + self, ctx: _SSL.Context, sock: Optional[_socket.socket], suppress_ragged_eofs: bool + ): + self.socket_checker = _SocketChecker() + self.suppress_ragged_eofs = suppress_ragged_eofs + super().__init__(ctx, sock) + + def _call(self, call: Callable[..., _T], *args: Any, **kwargs: Any) -> _T: + timeout = self.gettimeout() + if timeout: + start = _time.monotonic() + while True: + try: + return call(*args, **kwargs) + except BLOCKING_IO_ERRORS as exc: + # Check for closed socket. + if self.fileno() == -1: + if timeout and _time.monotonic() - start > timeout: + raise _socket.timeout("timed out") from None + raise SSLError("Underlying socket has been closed") from None + if isinstance(exc, _SSL.WantReadError): + want_read = True + want_write = False + elif isinstance(exc, _SSL.WantWriteError): + want_read = False + want_write = True + else: + want_read = True + want_write = True + self.socket_checker.select(self, want_read, want_write, timeout) + if timeout and _time.monotonic() - start > timeout: + raise _socket.timeout("timed out") from None + continue + + def do_handshake(self, *args: Any, **kwargs: Any) -> None: + return self._call(super().do_handshake, *args, **kwargs) + + def recv(self, *args: Any, **kwargs: Any) -> bytes: + try: + return self._call(super().recv, *args, **kwargs) + except _SSL.SysCallError as exc: + # Suppress ragged EOFs to match the stdlib. + if self.suppress_ragged_eofs and _ragged_eof(exc): + return b"" + raise + + def recv_into(self, *args: Any, **kwargs: Any) -> int: + try: + return self._call(super().recv_into, *args, **kwargs) + except _SSL.SysCallError as exc: + # Suppress ragged EOFs to match the stdlib. + if self.suppress_ragged_eofs and _ragged_eof(exc): + return 0 + raise + + def sendall(self, buf: bytes, flags: int = 0) -> None: # type: ignore[override] + view = memoryview(buf) + total_length = len(buf) + total_sent = 0 + while total_sent < total_length: + try: + sent = self._call(super().send, view[total_sent:], flags) + # XXX: It's not clear if this can actually happen. PyOpenSSL + # doesn't appear to have any interrupt handling, nor any interrupt + # errors for OpenSSL connections. + except OSError as exc: + if _errno_from_exception(exc) == _EINTR: + continue + raise + # https://github.com/pyca/pyopenssl/blob/19.1.0/src/OpenSSL/SSL.py#L1756 + # https://www.openssl.org/docs/man1.0.2/man3/SSL_write.html + if sent <= 0: + raise OSError("connection closed") + total_sent += sent + + +class _CallbackData: + """Data class which is passed to the OCSP callback.""" + + def __init__(self) -> None: + self.trusted_ca_certs: Optional[list[Certificate]] = None + self.check_ocsp_endpoint: Optional[bool] = None + self.ocsp_response_cache = _OCSPCache() + + +class SSLContext: + """A CPython compatible SSLContext implementation wrapping PyOpenSSL's + context. + """ + + __slots__ = ("_protocol", "_ctx", "_callback_data", "_check_hostname") + + def __init__(self, protocol: int): + self._protocol = protocol + self._ctx = _SSL.Context(self._protocol) + self._callback_data = _CallbackData() + self._check_hostname = True + # OCSP + # XXX: Find a better place to do this someday, since this is client + # side configuration and wrap_socket tries to support both client and + # server side sockets. + self._callback_data.check_ocsp_endpoint = True + self._ctx.set_ocsp_client_callback(callback=_ocsp_callback, data=self._callback_data) + + @property + def protocol(self) -> int: + """The protocol version chosen when constructing the context. + This attribute is read-only. + """ + return self._protocol + + def __get_verify_mode(self) -> VerifyMode: + """Whether to try to verify other peers' certificates and how to + behave if verification fails. This attribute must be one of + ssl.CERT_NONE, ssl.CERT_OPTIONAL or ssl.CERT_REQUIRED. + """ + return _REVERSE_VERIFY_MAP[self._ctx.get_verify_mode()] + + def __set_verify_mode(self, value: VerifyMode) -> None: + """Setter for verify_mode.""" + + def _cb( + _connobj: _SSL.Connection, + _x509obj: _crypto.X509, + _errnum: int, + _errdepth: int, + retcode: int, + ) -> bool: + # It seems we don't need to do anything here. Twisted doesn't, + # and OpenSSL's SSL_CTX_set_verify let's you pass NULL + # for the callback option. It's weird that PyOpenSSL requires + # this. + # This is optional in pyopenssl >= 20 and can be removed once minimum + # supported version is bumped + # See: pyopenssl.org/en/latest/changelog.html#id47 + return bool(retcode) + + self._ctx.set_verify(_VERIFY_MAP[value], _cb) + + verify_mode = property(__get_verify_mode, __set_verify_mode) + + def __get_check_hostname(self) -> bool: + return self._check_hostname + + def __set_check_hostname(self, value: Any) -> None: + validate_boolean("check_hostname", value) + self._check_hostname = value + + check_hostname = property(__get_check_hostname, __set_check_hostname) + + def __get_check_ocsp_endpoint(self) -> Optional[bool]: + return self._callback_data.check_ocsp_endpoint + + def __set_check_ocsp_endpoint(self, value: bool) -> None: + validate_boolean("check_ocsp", value) + self._callback_data.check_ocsp_endpoint = value + + check_ocsp_endpoint = property(__get_check_ocsp_endpoint, __set_check_ocsp_endpoint) + + def __get_options(self) -> None: + # Calling set_options adds the option to the existing bitmask and + # returns the new bitmask. + # https://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Context.set_options + return self._ctx.set_options(0) + + def __set_options(self, value: int) -> None: + # Explcitly convert to int, since newer CPython versions + # use enum.IntFlag for options. The values are the same + # regardless of implementation. + self._ctx.set_options(int(value)) + + options = property(__get_options, __set_options) + + def load_cert_chain( + self, + certfile: Union[str, bytes], + keyfile: Union[str, bytes, None] = None, + password: Optional[str] = None, + ) -> None: + """Load a private key and the corresponding certificate. The certfile + string must be the path to a single file in PEM format containing the + certificate as well as any number of CA certificates needed to + establish the certificate's authenticity. The keyfile string, if + present, must point to a file containing the private key. Otherwise + the private key will be taken from certfile as well. + """ + # Match CPython behavior + # https://github.com/python/cpython/blob/v3.8.0/Modules/_ssl.c#L3930-L3971 + # Password callback MUST be set first or it will be ignored. + if password: + + def _pwcb(_max_length: int, _prompt_twice: bool, _user_data: bytes) -> bytes: + # XXX:We could check the password length against what OpenSSL + # tells us is the max, but we can't raise an exception, so... + # warn? + assert password is not None + return password.encode("utf-8") + + self._ctx.set_passwd_cb(_pwcb) + self._ctx.use_certificate_chain_file(certfile) + self._ctx.use_privatekey_file(keyfile or certfile) + self._ctx.check_privatekey() + + def load_verify_locations( + self, cafile: Optional[str] = None, capath: Optional[str] = None + ) -> None: + """Load a set of "certification authority"(CA) certificates used to + validate other peers' certificates when `~verify_mode` is other than + ssl.CERT_NONE. + """ + self._ctx.load_verify_locations(cafile, capath) + # Manually load the CA certs when get_verified_chain is not available (pyopenssl<20). + if not hasattr(_SSL.Connection, "get_verified_chain"): + assert cafile is not None + self._callback_data.trusted_ca_certs = _load_trusted_ca_certs(cafile) + + def _load_certifi(self) -> None: + """Attempt to load CA certs from certifi.""" + if _HAVE_CERTIFI: + self.load_verify_locations(certifi.where()) + else: + raise _ConfigurationError( + "tlsAllowInvalidCertificates is False but no system " + "CA certificates could be loaded. Please install the " + "certifi package, or provide a path to a CA file using " + "the tlsCAFile option" + ) + + def _load_wincerts(self, store: str) -> None: + """Attempt to load CA certs from Windows trust store.""" + cert_store = self._ctx.get_cert_store() + oid = _stdlibssl.Purpose.SERVER_AUTH.oid + for cert, encoding, trust in _stdlibssl.enum_certificates(store): # type: ignore + if encoding == "x509_asn": + if trust is True or oid in trust: + cert_store.add_cert( + _crypto.X509.from_cryptography(_load_der_x509_certificate(cert)) + ) + + def load_default_certs(self) -> None: + """A PyOpenSSL version of load_default_certs from CPython.""" + # PyOpenSSL is incapable of loading CA certs from Windows, and mostly + # incapable on macOS. + # https://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Context.set_default_verify_paths + if _sys.platform == "win32": + try: + for storename in ("CA", "ROOT"): + self._load_wincerts(storename) + except PermissionError: + # Fall back to certifi + self._load_certifi() + elif _sys.platform == "darwin": + self._load_certifi() + self._ctx.set_default_verify_paths() + + def set_default_verify_paths(self) -> None: + """Specify that the platform provided CA certificates are to be used + for verification purposes. + """ + # Note: See PyOpenSSL's docs for limitations, which are similar + # but not that same as CPython's. + self._ctx.set_default_verify_paths() + + def wrap_socket( + self, + sock: _socket.socket, + server_side: bool = False, + do_handshake_on_connect: bool = True, + suppress_ragged_eofs: bool = True, + server_hostname: Optional[str] = None, + session: Optional[_SSL.Session] = None, + ) -> _sslConn: + """Wrap an existing Python socket connection and return a TLS socket + object. + """ + ssl_conn = _sslConn(self._ctx, sock, suppress_ragged_eofs) + if session: + ssl_conn.set_session(session) + if server_side is True: + ssl_conn.set_accept_state() + else: + # SNI + if server_hostname and not _is_ip_address(server_hostname): + # XXX: Do this in a callback registered with + # SSLContext.set_info_callback? See Twisted for an example. + ssl_conn.set_tlsext_host_name(server_hostname.encode("idna")) + if self.verify_mode != _stdlibssl.CERT_NONE: + # Request a stapled OCSP response. + ssl_conn.request_ocsp() + ssl_conn.set_connect_state() + # If this wasn't true the caller of wrap_socket would call + # do_handshake() + if do_handshake_on_connect: + # XXX: If we do hostname checking in a callback we can get rid + # of this call to do_handshake() since the handshake + # will happen automatically later. + ssl_conn.do_handshake() + # XXX: Do this in a callback registered with + # SSLContext.set_info_callback? See Twisted for an example. + if self.check_hostname and server_hostname is not None: + try: + if _is_ip_address(server_hostname): + _verify_ip_address(ssl_conn, server_hostname) + else: + _verify_hostname(ssl_conn, server_hostname) + except (_SICertificateError, _SIVerificationError) as exc: + raise _CertificateError(str(exc)) from None + return ssl_conn diff --git a/pymongo/read_concern.py b/pymongo/read_concern.py index 3ba8c854a5..0b54ee86f7 100644 --- a/pymongo/read_concern.py +++ b/pymongo/read_concern.py @@ -13,11 +13,12 @@ # limitations under the License. """Tools for working with read concerns.""" +from __future__ import annotations -from bson.py3compat import string_type +from typing import Any, Optional -class ReadConcern(object): +class ReadConcern: """ReadConcern :Parameters: @@ -31,26 +32,26 @@ class ReadConcern(object): """ - def __init__(self, level=None): - if level is None or isinstance(level, string_type): + def __init__(self, level: Optional[str] = None) -> None: + if level is None or isinstance(level, str): self.__level = level else: - raise TypeError( - 'level must be a string or None.') + raise TypeError("level must be a string or None.") @property - def level(self): + def level(self) -> Optional[str]: """The read concern level.""" return self.__level @property - def ok_for_legacy(self): + def ok_for_legacy(self) -> bool: """Return ``True`` if this read concern is compatible with - old wire protocol versions.""" - return self.level is None or self.level == 'local' + old wire protocol versions. + """ + return self.level is None or self.level == "local" @property - def document(self): + def document(self) -> dict[str, Any]: """The document representation of this read concern. .. note:: @@ -59,18 +60,18 @@ def document(self): """ doc = {} if self.__level: - doc['level'] = self.level + doc["level"] = self.level return doc - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, ReadConcern): return self.document == other.document return NotImplemented - def __repr__(self): + def __repr__(self) -> str: if self.level: - return 'ReadConcern(%s)' % self.level - return 'ReadConcern()' + return "ReadConcern(%s)" % self.level + return "ReadConcern()" DEFAULT_READ_CONCERN = ReadConcern() diff --git a/pymongo/read_preferences.py b/pymongo/read_preferences.py index f4425acaa6..986cc772bf 100644 --- a/pymongo/read_preferences.py +++ b/pymongo/read_preferences.py @@ -14,12 +14,21 @@ """Utilities for choosing which member of a replica set to read from.""" -from bson.py3compat import abc, integer_types +from __future__ import annotations + +from collections import abc +from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence + from pymongo import max_staleness_selectors from pymongo.errors import ConfigurationError -from pymongo.server_selectors import (member_with_tags_server_selector, - secondary_with_tags_server_selector) +from pymongo.server_selectors import ( + member_with_tags_server_selector, + secondary_with_tags_server_selector, +) +if TYPE_CHECKING: + from pymongo.server_selectors import Selection + from pymongo.topology_description import TopologyDescription _PRIMARY = 0 _PRIMARY_PREFERRED = 1 @@ -29,50 +38,51 @@ _MONGOS_MODES = ( - 'primary', - 'primaryPreferred', - 'secondary', - 'secondaryPreferred', - 'nearest', + "primary", + "primaryPreferred", + "secondary", + "secondaryPreferred", + "nearest", ) +_Hedge = Mapping[str, Any] +_TagSets = Sequence[Mapping[str, Any]] -def _validate_tag_sets(tag_sets): - """Validate tag sets for a MongoReplicaSetClient. - """ + +def _validate_tag_sets(tag_sets: Optional[_TagSets]) -> Optional[_TagSets]: + """Validate tag sets for a MongoClient.""" if tag_sets is None: return tag_sets - if not isinstance(tag_sets, list): - raise TypeError(( - "Tag sets %r invalid, must be a list") % (tag_sets,)) + if not isinstance(tag_sets, (list, tuple)): + raise TypeError(f"Tag sets {tag_sets!r} invalid, must be a sequence") if len(tag_sets) == 0: - raise ValueError(( - "Tag sets %r invalid, must be None or contain at least one set of" - " tags") % (tag_sets,)) + raise ValueError( + f"Tag sets {tag_sets!r} invalid, must be None or contain at least one set of tags" + ) for tags in tag_sets: if not isinstance(tags, abc.Mapping): raise TypeError( - "Tag set %r invalid, must be an instance of dict, " + f"Tag set {tags!r} invalid, must be an instance of dict, " "bson.son.SON or other type that inherits from " - "collection.Mapping" % (tags,)) + "collection.Mapping" + ) - return tag_sets + return list(tag_sets) -def _invalid_max_staleness_msg(max_staleness): - return ("maxStalenessSeconds must be a positive integer, not %s" % - max_staleness) +def _invalid_max_staleness_msg(max_staleness: Any) -> str: + return "maxStalenessSeconds must be a positive integer, not %s" % max_staleness # Some duplication with common.py to avoid import cycle. -def _validate_max_staleness(max_staleness): +def _validate_max_staleness(max_staleness: Any) -> int: """Validate max_staleness.""" if max_staleness == -1: return -1 - if not isinstance(max_staleness, integer_types): + if not isinstance(max_staleness, int): raise TypeError(_invalid_max_staleness_msg(max_staleness)) if max_staleness <= 0: @@ -81,71 +91,118 @@ def _validate_max_staleness(max_staleness): return max_staleness -class _ServerMode(object): - """Base class for all read preferences. - """ +def _validate_hedge(hedge: Optional[_Hedge]) -> Optional[_Hedge]: + """Validate hedge.""" + if hedge is None: + return None + + if not isinstance(hedge, dict): + raise TypeError(f"hedge must be a dictionary, not {hedge!r}") - __slots__ = ("__mongos_mode", "__mode", "__tag_sets", "__max_staleness") + return hedge - def __init__(self, mode, tag_sets=None, max_staleness=-1): + +class _ServerMode: + """Base class for all read preferences.""" + + __slots__ = ("__mongos_mode", "__mode", "__tag_sets", "__max_staleness", "__hedge") + + def __init__( + self, + mode: int, + tag_sets: Optional[_TagSets] = None, + max_staleness: int = -1, + hedge: Optional[_Hedge] = None, + ) -> None: self.__mongos_mode = _MONGOS_MODES[mode] self.__mode = mode self.__tag_sets = _validate_tag_sets(tag_sets) self.__max_staleness = _validate_max_staleness(max_staleness) + self.__hedge = _validate_hedge(hedge) @property - def name(self): - """The name of this read preference. - """ + def name(self) -> str: + """The name of this read preference.""" return self.__class__.__name__ @property - def mongos_mode(self): - """The mongos mode of this read preference. - """ + def mongos_mode(self) -> str: + """The mongos mode of this read preference.""" return self.__mongos_mode @property - def document(self): - """Read preference as a document. - """ - doc = {'mode': self.__mongos_mode} + def document(self) -> dict[str, Any]: + """Read preference as a document.""" + doc: dict[str, Any] = {"mode": self.__mongos_mode} if self.__tag_sets not in (None, [{}]): - doc['tags'] = self.__tag_sets + doc["tags"] = self.__tag_sets if self.__max_staleness != -1: - doc['maxStalenessSeconds'] = self.__max_staleness + doc["maxStalenessSeconds"] = self.__max_staleness + if self.__hedge not in (None, {}): + doc["hedge"] = self.__hedge return doc @property - def mode(self): - """The mode of this read preference instance. - """ + def mode(self) -> int: + """The mode of this read preference instance.""" return self.__mode @property - def tag_sets(self): + def tag_sets(self) -> _TagSets: """Set ``tag_sets`` to a list of dictionaries like [{'dc': 'ny'}] to read only from members whose ``dc`` tag has the value ``"ny"``. To specify a priority-order for tag sets, provide a list of tag sets: ``[{'dc': 'ny'}, {'dc': 'la'}, {}]``. A final, empty tag set, ``{}``, means "read from any member that matches the mode, - ignoring tags." MongoReplicaSetClient tries each set of tags in turn + ignoring tags." MongoClient tries each set of tags in turn until it finds a set of tags with at least one matching member. + For example, to only send a query to an analytic node:: + + Nearest(tag_sets=[{"node":"analytics"}]) + + Or using :class:`SecondaryPreferred`:: + + SecondaryPreferred(tag_sets=[{"node":"analytics"}]) .. seealso:: `Data-Center Awareness - `_ + `_ """ return list(self.__tag_sets) if self.__tag_sets else [{}] @property - def max_staleness(self): + def max_staleness(self) -> int: """The maximum estimated length of time (in seconds) a replica set secondary can fall behind the primary in replication before it will - no longer be selected for operations, or -1 for no maximum.""" + no longer be selected for operations, or -1 for no maximum. + """ return self.__max_staleness @property - def min_wire_version(self): + def hedge(self) -> Optional[_Hedge]: + """The read preference ``hedge`` parameter. + + A dictionary that configures how the server will perform hedged reads. + It consists of the following keys: + + - ``enabled``: Enables or disables hedged reads in sharded clusters. + + Hedged reads are automatically enabled in MongoDB 4.4+ when using a + ``nearest`` read preference. To explicitly enable hedged reads, set + the ``enabled`` key to ``true``:: + + >>> Nearest(hedge={'enabled': True}) + + To explicitly disable hedged reads, set the ``enabled`` key to + ``False``:: + + >>> Nearest(hedge={'enabled': False}) + + .. versionadded:: 3.11 + """ + return self.__hedge + + @property + def min_wire_version(self) -> int: """The wire protocol version the server must support. Some read preferences impose version requirements on all servers (e.g. @@ -157,35 +214,49 @@ def min_wire_version(self): """ return 0 if self.__max_staleness == -1 else 5 - def __repr__(self): - return "%s(tag_sets=%r, max_staleness=%r)" % ( - self.name, self.__tag_sets, self.__max_staleness) + def __repr__(self) -> str: + return "{}(tag_sets={!r}, max_staleness={!r}, hedge={!r})".format( + self.name, + self.__tag_sets, + self.__max_staleness, + self.__hedge, + ) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, _ServerMode): - return (self.mode == other.mode and - self.tag_sets == other.tag_sets and - self.max_staleness == other.max_staleness) + return ( + self.mode == other.mode + and self.tag_sets == other.tag_sets + and self.max_staleness == other.max_staleness + and self.hedge == other.hedge + ) return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other - def __getstate__(self): + def __getstate__(self) -> dict[str, Any]: """Return value of object for pickling. Needed explicitly because __slots__() defined. """ - return {'mode': self.__mode, - 'tag_sets': self.__tag_sets, - 'max_staleness': self.__max_staleness} - - def __setstate__(self, value): + return { + "mode": self.__mode, + "tag_sets": self.__tag_sets, + "max_staleness": self.__max_staleness, + "hedge": self.__hedge, + } + + def __setstate__(self, value: Mapping[str, Any]) -> None: """Restore from pickling.""" - self.__mode = value['mode'] + self.__mode = value["mode"] self.__mongos_mode = _MONGOS_MODES[self.__mode] - self.__tag_sets = _validate_tag_sets(value['tag_sets']) - self.__max_staleness = _validate_max_staleness(value['max_staleness']) + self.__tag_sets = _validate_tag_sets(value["tag_sets"]) + self.__max_staleness = _validate_max_staleness(value["max_staleness"]) + self.__hedge = _validate_hedge(value["hedge"]) + + def __call__(self, selection: Selection) -> Selection: + return selection class Primary(_ServerMode): @@ -200,17 +271,17 @@ class Primary(_ServerMode): __slots__ = () - def __init__(self): - super(Primary, self).__init__(_PRIMARY) + def __init__(self) -> None: + super().__init__(_PRIMARY) - def __call__(self, selection): + def __call__(self, selection: Selection) -> Selection: """Apply this read preference to a Selection.""" return selection.primary_selection - def __repr__(self): + def __repr__(self) -> str: return "Primary()" - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, _ServerMode): return other.mode == _PRIMARY return NotImplemented @@ -226,6 +297,10 @@ class PrimaryPreferred(_ServerMode): * When connected to a replica set queries are sent to the primary if available, otherwise a secondary. + .. note:: When a :class:`~pymongo.mongo_client.MongoClient` is first + created reads will be routed to an available secondary until the + primary of the replica set is discovered. + :Parameters: - `tag_sets`: The :attr:`~tag_sets` to use if the primary is not available. @@ -234,24 +309,30 @@ class PrimaryPreferred(_ServerMode): replication before it will no longer be selected for operations. Default -1, meaning no maximum. If it is set, it must be at least 90 seconds. + - `hedge`: The :attr:`~hedge` to use if the primary is not available. + + .. versionchanged:: 3.11 + Added ``hedge`` parameter. """ __slots__ = () - def __init__(self, tag_sets=None, max_staleness=-1): - super(PrimaryPreferred, self).__init__(_PRIMARY_PREFERRED, - tag_sets, - max_staleness) + def __init__( + self, + tag_sets: Optional[_TagSets] = None, + max_staleness: int = -1, + hedge: Optional[_Hedge] = None, + ) -> None: + super().__init__(_PRIMARY_PREFERRED, tag_sets, max_staleness, hedge) - def __call__(self, selection): + def __call__(self, selection: Selection) -> Selection: """Apply this read preference to Selection.""" if selection.primary: return selection.primary_selection else: return secondary_with_tags_server_selector( - self.tag_sets, - max_staleness_selectors.select( - self.max_staleness, selection)) + self.tag_sets, max_staleness_selectors.select(self.max_staleness, selection) + ) class Secondary(_ServerMode): @@ -271,19 +352,27 @@ class Secondary(_ServerMode): replication before it will no longer be selected for operations. Default -1, meaning no maximum. If it is set, it must be at least 90 seconds. + - `hedge`: The :attr:`~hedge` for this read preference. + + .. versionchanged:: 3.11 + Added ``hedge`` parameter. """ __slots__ = () - def __init__(self, tag_sets=None, max_staleness=-1): - super(Secondary, self).__init__(_SECONDARY, tag_sets, max_staleness) + def __init__( + self, + tag_sets: Optional[_TagSets] = None, + max_staleness: int = -1, + hedge: Optional[_Hedge] = None, + ) -> None: + super().__init__(_SECONDARY, tag_sets, max_staleness, hedge) - def __call__(self, selection): + def __call__(self, selection: Selection) -> Selection: """Apply this read preference to Selection.""" return secondary_with_tags_server_selector( - self.tag_sets, - max_staleness_selectors.select( - self.max_staleness, selection)) + self.tag_sets, max_staleness_selectors.select(self.max_staleness, selection) + ) class SecondaryPreferred(_ServerMode): @@ -296,6 +385,10 @@ class SecondaryPreferred(_ServerMode): * When connected to a replica set queries are distributed among secondaries, or the primary if no secondary is available. + .. note:: When a :class:`~pymongo.mongo_client.MongoClient` is first + created reads will be routed to the primary of the replica set until + an available secondary is discovered. + :Parameters: - `tag_sets`: The :attr:`~tag_sets` for this read preference. - `max_staleness`: (integer, in seconds) The maximum estimated @@ -303,21 +396,27 @@ class SecondaryPreferred(_ServerMode): replication before it will no longer be selected for operations. Default -1, meaning no maximum. If it is set, it must be at least 90 seconds. + - `hedge`: The :attr:`~hedge` for this read preference. + + .. versionchanged:: 3.11 + Added ``hedge`` parameter. """ __slots__ = () - def __init__(self, tag_sets=None, max_staleness=-1): - super(SecondaryPreferred, self).__init__(_SECONDARY_PREFERRED, - tag_sets, - max_staleness) + def __init__( + self, + tag_sets: Optional[_TagSets] = None, + max_staleness: int = -1, + hedge: Optional[_Hedge] = None, + ) -> None: + super().__init__(_SECONDARY_PREFERRED, tag_sets, max_staleness, hedge) - def __call__(self, selection): + def __call__(self, selection: Selection) -> Selection: """Apply this read preference to Selection.""" secondaries = secondary_with_tags_server_selector( - self.tag_sets, - max_staleness_selectors.select( - self.max_staleness, selection)) + self.tag_sets, max_staleness_selectors.select(self.max_staleness, selection) + ) if secondaries: return secondaries @@ -342,48 +441,102 @@ class Nearest(_ServerMode): replication before it will no longer be selected for operations. Default -1, meaning no maximum. If it is set, it must be at least 90 seconds. + - `hedge`: The :attr:`~hedge` for this read preference. + + .. versionchanged:: 3.11 + Added ``hedge`` parameter. """ __slots__ = () - def __init__(self, tag_sets=None, max_staleness=-1): - super(Nearest, self).__init__(_NEAREST, tag_sets, max_staleness) + def __init__( + self, + tag_sets: Optional[_TagSets] = None, + max_staleness: int = -1, + hedge: Optional[_Hedge] = None, + ) -> None: + super().__init__(_NEAREST, tag_sets, max_staleness, hedge) - def __call__(self, selection): + def __call__(self, selection: Selection) -> Selection: """Apply this read preference to Selection.""" return member_with_tags_server_selector( - self.tag_sets, - max_staleness_selectors.select( - self.max_staleness, selection)) + self.tag_sets, max_staleness_selectors.select(self.max_staleness, selection) + ) + + +class _AggWritePref: + """Agg $out/$merge write preference. + + * If there are readable servers and there is any pre-5.0 server, use + primary read preference. + * Otherwise use `pref` read preference. + + :Parameters: + - `pref`: The read preference to use on MongoDB 5.0+. + """ + __slots__ = ("pref", "effective_pref") -_ALL_READ_PREFERENCES = (Primary, PrimaryPreferred, - Secondary, SecondaryPreferred, Nearest) + def __init__(self, pref: _ServerMode): + self.pref = pref + self.effective_pref: _ServerMode = ReadPreference.PRIMARY + def selection_hook(self, topology_description: TopologyDescription) -> None: + common_wv = topology_description.common_wire_version + if ( + topology_description.has_readable_server(ReadPreference.PRIMARY_PREFERRED) + and common_wv + and common_wv < 13 + ): + self.effective_pref = ReadPreference.PRIMARY + else: + self.effective_pref = self.pref + + def __call__(self, selection: Selection) -> Selection: + """Apply this read preference to a Selection.""" + return self.effective_pref(selection) + + def __repr__(self) -> str: + return f"_AggWritePref(pref={self.pref!r})" + + # Proxy other calls to the effective_pref so that _AggWritePref can be + # used in place of an actual read preference. + def __getattr__(self, name: str) -> Any: + return getattr(self.effective_pref, name) + + +_ALL_READ_PREFERENCES = (Primary, PrimaryPreferred, Secondary, SecondaryPreferred, Nearest) -def make_read_preference(mode, tag_sets, max_staleness=-1): + +def make_read_preference( + mode: int, tag_sets: Optional[_TagSets], max_staleness: int = -1 +) -> _ServerMode: if mode == _PRIMARY: if tag_sets not in (None, [{}]): - raise ConfigurationError("Read preference primary " - "cannot be combined with tags") + raise ConfigurationError("Read preference primary cannot be combined with tags") if max_staleness != -1: - raise ConfigurationError("Read preference primary cannot be " - "combined with maxStalenessSeconds") + raise ConfigurationError( + "Read preference primary cannot be combined with maxStalenessSeconds" + ) return Primary() - return _ALL_READ_PREFERENCES[mode](tag_sets, max_staleness) + return _ALL_READ_PREFERENCES[mode](tag_sets, max_staleness) # type: ignore _MODES = ( - 'PRIMARY', - 'PRIMARY_PREFERRED', - 'SECONDARY', - 'SECONDARY_PREFERRED', - 'NEAREST', + "PRIMARY", + "PRIMARY_PREFERRED", + "SECONDARY", + "SECONDARY_PREFERRED", + "NEAREST", ) -class ReadPreference(object): - """An enum that defines the read preference modes supported by PyMongo. +class ReadPreference: + """An enum that defines some commonly used read preference modes. + + Apps can also create a custom read preference, for example:: + + Nearest(tag_sets=[{"node":"analytics"}]) See :doc:`/examples/high_availability` for code examples. @@ -432,6 +585,7 @@ class ReadPreference(object): - ``NEAREST``: Read from any shard member. """ + PRIMARY = Primary() PRIMARY_PREFERRED = PrimaryPreferred() SECONDARY = Secondary() @@ -439,20 +593,22 @@ class ReadPreference(object): NEAREST = Nearest() -def read_pref_mode_from_name(name): - """Get the read preference mode from mongos/uri name. - """ +def read_pref_mode_from_name(name: str) -> int: + """Get the read preference mode from mongos/uri name.""" return _MONGOS_MODES.index(name) -class MovingAverage(object): +class MovingAverage: """Tracks an exponentially-weighted moving average.""" - def __init__(self): + + average: Optional[float] + + def __init__(self) -> None: self.average = None - def add_sample(self, sample): + def add_sample(self, sample: float) -> None: if sample < 0: - # Likely system time change while waiting for ismaster response + # Likely system time change while waiting for hello response # and not using time.monotonic. Ignore it, the next one will # probably be valid. return @@ -463,9 +619,9 @@ def add_sample(self, sample): # average with alpha = 0.2. self.average = 0.8 * self.average + 0.2 * sample - def get(self): + def get(self) -> Optional[float]: """Get the calculated average, or None if no samples yet.""" return self.average - def reset(self): + def reset(self) -> None: self.average = None diff --git a/pymongo/response.py b/pymongo/response.py index 56cc532f57..5ff6ca707e 100644 --- a/pymongo/response.py +++ b/pymongo/response.py @@ -13,14 +13,30 @@ # limitations under the License. """Represent a response from the server.""" +from __future__ import annotations +from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence, Union -class Response(object): - __slots__ = ('_data', '_address', '_request_id', '_duration', - '_from_command', '_docs') +if TYPE_CHECKING: + from datetime import timedelta - def __init__(self, data, address, request_id, duration, from_command, - docs): + from pymongo.message import _OpMsg, _OpReply + from pymongo.pool import Connection + from pymongo.typings import _Address, _DocumentOut + + +class Response: + __slots__ = ("_data", "_address", "_request_id", "_duration", "_from_command", "_docs") + + def __init__( + self, + data: Union[_OpMsg, _OpReply], + address: _Address, + request_id: int, + duration: Optional[timedelta], + from_command: bool, + docs: Sequence[Mapping[str, Any]], + ): """Represent a response from the server. :Parameters: @@ -38,70 +54,80 @@ def __init__(self, data, address, request_id, duration, from_command, self._docs = docs @property - def data(self): + def data(self) -> Union[_OpMsg, _OpReply]: """Server response's raw BSON bytes.""" return self._data @property - def address(self): + def address(self) -> _Address: """(host, port) of the source server.""" return self._address @property - def request_id(self): + def request_id(self) -> int: """The request id of this operation.""" return self._request_id @property - def duration(self): + def duration(self) -> Optional[timedelta]: """The duration of the operation.""" return self._duration @property - def from_command(self): + def from_command(self) -> bool: """If the response is a result from a db command.""" return self._from_command @property - def docs(self): + def docs(self) -> Sequence[Mapping[str, Any]]: """The decoded document(s).""" return self._docs -class ExhaustResponse(Response): - __slots__ = ('_socket_info', '_pool') - def __init__(self, data, address, socket_info, pool, request_id, duration, - from_command, docs): +class PinnedResponse(Response): + __slots__ = ("_conn", "_more_to_come") + + def __init__( + self, + data: Union[_OpMsg, _OpReply], + address: _Address, + conn: Connection, + request_id: int, + duration: Optional[timedelta], + from_command: bool, + docs: list[_DocumentOut], + more_to_come: bool, + ): """Represent a response to an exhaust cursor's initial query. :Parameters: - `data`: A network response message. - `address`: (host, port) of the source server. - - `socket_info`: The SocketInfo used for the initial query. - - `pool`: The Pool from which the SocketInfo came. + - `conn`: The Connection used for the initial query. - `request_id`: The request id of this operation. - `duration`: The duration of the operation. - `from_command`: If the response is the result of a db command. + - `docs`: List of documents. + - `more_to_come`: Bool indicating whether cursor is ready to be + exhausted. """ - super(ExhaustResponse, self).__init__(data, - address, - request_id, - duration, - from_command, docs) - self._socket_info = socket_info - self._pool = pool + super().__init__(data, address, request_id, duration, from_command, docs) + self._conn = conn + self._more_to_come = more_to_come @property - def socket_info(self): - """The SocketInfo used for the initial query. + def conn(self) -> Connection: + """The Connection used for the initial query. The server will send batches on this socket, without waiting for getMores from the client, until the result set is exhausted or there is an error. """ - return self._socket_info + return self._conn @property - def pool(self): - """The Pool from which the SocketInfo came.""" - return self._pool + def more_to_come(self) -> bool: + """If true, server is ready to send batches on the socket until the + result set is exhausted or there is an error. + """ + return self._more_to_come diff --git a/pymongo/results.py b/pymongo/results.py index a5025e9f48..20c6023cd2 100644 --- a/pymongo/results.py +++ b/pymongo/results.py @@ -13,28 +13,36 @@ # limitations under the License. """Result class definitions.""" +from __future__ import annotations + +from typing import Any, Mapping, Optional, cast from pymongo.errors import InvalidOperation -class _WriteResult(object): +class _WriteResult: """Base class for write result classes.""" __slots__ = ("__acknowledged",) - def __init__(self, acknowledged): + def __init__(self, acknowledged: bool) -> None: self.__acknowledged = acknowledged - def _raise_if_unacknowledged(self, property_name): + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.__acknowledged})" + + def _raise_if_unacknowledged(self, property_name: str) -> None: """Raise an exception on property access if unacknowledged.""" if not self.__acknowledged: - raise InvalidOperation("A value for %s is not available when " - "the write is unacknowledged. Check the " - "acknowledged attribute to avoid this " - "error." % (property_name,)) + raise InvalidOperation( + f"A value for {property_name} is not available when " + "the write is unacknowledged. Check the " + "acknowledged attribute to avoid this " + "error." + ) @property - def acknowledged(self): + def acknowledged(self) -> bool: """Is this the result of an acknowledged write operation? The :attr:`acknowledged` attribute will be ``False`` when using @@ -42,7 +50,7 @@ def acknowledged(self): .. note:: If the :attr:`acknowledged` attribute is ``False`` all other - attibutes of this class will raise + attributes of this class will raise :class:`~pymongo.errors.InvalidOperation` when accessed. Values for other attributes cannot be determined if the write operation was unacknowledged. @@ -54,33 +62,41 @@ def acknowledged(self): class InsertOneResult(_WriteResult): - """The return type for :meth:`~pymongo.collection.Collection.insert_one`. - """ + """The return type for :meth:`~pymongo.collection.Collection.insert_one`.""" - __slots__ = ("__inserted_id", "__acknowledged") + __slots__ = ("__inserted_id",) - def __init__(self, inserted_id, acknowledged): + def __init__(self, inserted_id: Any, acknowledged: bool) -> None: self.__inserted_id = inserted_id - super(InsertOneResult, self).__init__(acknowledged) + super().__init__(acknowledged) + + def __repr__(self) -> str: + return ( + f"{self.__class__.__name__}({self.__inserted_id!r}, acknowledged={self.acknowledged})" + ) @property - def inserted_id(self): + def inserted_id(self) -> Any: """The inserted document's _id.""" return self.__inserted_id class InsertManyResult(_WriteResult): - """The return type for :meth:`~pymongo.collection.Collection.insert_many`. - """ + """The return type for :meth:`~pymongo.collection.Collection.insert_many`.""" - __slots__ = ("__inserted_ids", "__acknowledged") + __slots__ = ("__inserted_ids",) - def __init__(self, inserted_ids, acknowledged): + def __init__(self, inserted_ids: list[Any], acknowledged: bool) -> None: self.__inserted_ids = inserted_ids - super(InsertManyResult, self).__init__(acknowledged) + super().__init__(acknowledged) + + def __repr__(self) -> str: + return ( + f"{self.__class__.__name__}({self.__inserted_ids!r}, acknowledged={self.acknowledged})" + ) @property - def inserted_ids(self): + def inserted_ids(self) -> list[Any]: """A list of _ids of the inserted documents, in the order provided. .. note:: If ``False`` is passed for the `ordered` parameter to @@ -97,63 +113,67 @@ class UpdateResult(_WriteResult): :meth:`~pymongo.collection.Collection.replace_one`. """ - __slots__ = ("__raw_result", "__acknowledged") + __slots__ = ("__raw_result",) - def __init__(self, raw_result, acknowledged): + def __init__(self, raw_result: Optional[Mapping[str, Any]], acknowledged: bool): self.__raw_result = raw_result - super(UpdateResult, self).__init__(acknowledged) + super().__init__(acknowledged) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.__raw_result!r}, acknowledged={self.acknowledged})" @property - def raw_result(self): + def raw_result(self) -> Optional[Mapping[str, Any]]: """The raw result document returned by the server.""" return self.__raw_result @property - def matched_count(self): + def matched_count(self) -> int: """The number of documents matched for this update.""" self._raise_if_unacknowledged("matched_count") if self.upserted_id is not None: return 0 + assert self.__raw_result is not None return self.__raw_result.get("n", 0) @property - def modified_count(self): - """The number of documents modified. - - .. note:: modified_count is only reported by MongoDB 2.6 and later. - When connected to an earlier server version, or in certain mixed - version sharding configurations, this attribute will be set to - ``None``. - """ + def modified_count(self) -> int: + """The number of documents modified.""" self._raise_if_unacknowledged("modified_count") - return self.__raw_result.get("nModified") + assert self.__raw_result is not None + return cast(int, self.__raw_result.get("nModified")) @property - def upserted_id(self): + def upserted_id(self) -> Any: """The _id of the inserted document if an upsert took place. Otherwise ``None``. """ self._raise_if_unacknowledged("upserted_id") + assert self.__raw_result is not None return self.__raw_result.get("upserted") class DeleteResult(_WriteResult): """The return type for :meth:`~pymongo.collection.Collection.delete_one` - and :meth:`~pymongo.collection.Collection.delete_many`""" + and :meth:`~pymongo.collection.Collection.delete_many` + """ - __slots__ = ("__raw_result", "__acknowledged") + __slots__ = ("__raw_result",) - def __init__(self, raw_result, acknowledged): + def __init__(self, raw_result: Mapping[str, Any], acknowledged: bool) -> None: self.__raw_result = raw_result - super(DeleteResult, self).__init__(acknowledged) + super().__init__(acknowledged) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.__raw_result!r}, acknowledged={self.acknowledged})" @property - def raw_result(self): + def raw_result(self) -> Mapping[str, Any]: """The raw result document returned by the server.""" return self.__raw_result @property - def deleted_count(self): + def deleted_count(self) -> int: """The number of documents deleted.""" self._raise_if_unacknowledged("deleted_count") return self.__raw_result.get("n", 0) @@ -162,9 +182,9 @@ def deleted_count(self): class BulkWriteResult(_WriteResult): """An object wrapper for bulk API write results.""" - __slots__ = ("__bulk_api_result", "__acknowledged") + __slots__ = ("__bulk_api_result",) - def __init__(self, bulk_api_result, acknowledged): + def __init__(self, bulk_api_result: dict[str, Any], acknowledged: bool) -> None: """Create a BulkWriteResult instance. :Parameters: @@ -174,53 +194,50 @@ def __init__(self, bulk_api_result, acknowledged): :exc:`~pymongo.errors.InvalidOperation`. """ self.__bulk_api_result = bulk_api_result - super(BulkWriteResult, self).__init__(acknowledged) + super().__init__(acknowledged) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.__bulk_api_result!r}, acknowledged={self.acknowledged})" @property - def bulk_api_result(self): + def bulk_api_result(self) -> dict[str, Any]: """The raw bulk API result.""" return self.__bulk_api_result @property - def inserted_count(self): + def inserted_count(self) -> int: """The number of documents inserted.""" self._raise_if_unacknowledged("inserted_count") - return self.__bulk_api_result.get("nInserted") + return cast(int, self.__bulk_api_result.get("nInserted")) @property - def matched_count(self): + def matched_count(self) -> int: """The number of documents matched for an update.""" self._raise_if_unacknowledged("matched_count") - return self.__bulk_api_result.get("nMatched") + return cast(int, self.__bulk_api_result.get("nMatched")) @property - def modified_count(self): - """The number of documents modified. - - .. note:: modified_count is only reported by MongoDB 2.6 and later. - When connected to an earlier server version, or in certain mixed - version sharding configurations, this attribute will be set to - ``None``. - """ + def modified_count(self) -> int: + """The number of documents modified.""" self._raise_if_unacknowledged("modified_count") - return self.__bulk_api_result.get("nModified") + return cast(int, self.__bulk_api_result.get("nModified")) @property - def deleted_count(self): + def deleted_count(self) -> int: """The number of documents deleted.""" self._raise_if_unacknowledged("deleted_count") - return self.__bulk_api_result.get("nRemoved") + return cast(int, self.__bulk_api_result.get("nRemoved")) @property - def upserted_count(self): + def upserted_count(self) -> int: """The number of documents upserted.""" self._raise_if_unacknowledged("upserted_count") - return self.__bulk_api_result.get("nUpserted") + return cast(int, self.__bulk_api_result.get("nUpserted")) @property - def upserted_ids(self): + def upserted_ids(self) -> Optional[dict[int, Any]]: """A map of operation index to the _id of the upserted document.""" self._raise_if_unacknowledged("upserted_ids") if self.__bulk_api_result: - return dict((upsert["index"], upsert["_id"]) - for upsert in self.bulk_api_result["upserted"]) + return {upsert["index"]: upsert["_id"] for upsert in self.bulk_api_result["upserted"]} + return None diff --git a/pymongo/saslprep.py b/pymongo/saslprep.py index baa1a40663..02c845079a 100644 --- a/pymongo/saslprep.py +++ b/pymongo/saslprep.py @@ -13,23 +13,30 @@ # limitations under the License. """An implementation of RFC4013 SASLprep.""" +from __future__ import annotations -from bson.py3compat import text_type as _text_type +from typing import Any, Optional try: import stringprep except ImportError: HAVE_STRINGPREP = False - def saslprep(data): + + def saslprep( + data: Any, prohibit_unassigned_code_points: Optional[bool] = True # noqa: ARG001 + ) -> Any: """SASLprep dummy""" - if isinstance(data, _text_type): + if isinstance(data, str): raise TypeError( "The stringprep module is not available. Usernames and " - "passwords must be ASCII strings.") + "passwords must be instances of bytes." + ) return data + else: HAVE_STRINGPREP = True import unicodedata + # RFC4013 section 2.3 prohibited output. _PROHIBITED = ( # A strict reading of RFC 4013 requires table c12 here, but @@ -43,15 +50,16 @@ def saslprep(data): stringprep.in_table_c6, stringprep.in_table_c7, stringprep.in_table_c8, - stringprep.in_table_c9) + stringprep.in_table_c9, + ) - def saslprep(data, prohibit_unassigned_code_points=True): + def saslprep(data: Any, prohibit_unassigned_code_points: Optional[bool] = True) -> Any: """An implementation of RFC4013 SASLprep. :Parameters: - `data`: The string to SASLprep. Unicode strings - (python 2.x unicode, 3.x str) are supported. Byte strings - (python 2.x str, 3.x bytes) are ignored. + (:class:`str`) are supported. Byte strings + (:class:`bytes`) are ignored. - `prohibit_unassigned_code_points`: True / False. RFC 3454 and RFCs for various SASL mechanisms distinguish between `queries` (unassigned code points allowed) and @@ -61,11 +69,13 @@ def saslprep(data, prohibit_unassigned_code_points=True): :Returns: The SASLprep'ed version of `data`. """ - if not isinstance(data, _text_type): + prohibited: Any + + if not isinstance(data, str): return data if prohibit_unassigned_code_points: - prohibited = _PROHIBITED + (stringprep.in_table_a1,) + prohibited = (*_PROHIBITED, stringprep.in_table_a1) else: prohibited = _PROHIBITED @@ -75,13 +85,13 @@ def saslprep(data, prohibit_unassigned_code_points=True): # commonly mapped to nothing characters to, well, nothing. in_table_c12 = stringprep.in_table_c12 in_table_b1 = stringprep.in_table_b1 - data = u"".join( - [u"\u0020" if in_table_c12(elt) else elt - for elt in data if not in_table_b1(elt)]) + data = "".join( + ["\u0020" if in_table_c12(elt) else elt for elt in data if not in_table_b1(elt)] + ) # RFC3454 section 2, step 2 - Normalize # RFC4013 section 2.2 normalization - data = unicodedata.ucd_3_2_0.normalize('NFKC', data) + data = unicodedata.ucd_3_2_0.normalize("NFKC", data) in_table_d1 = stringprep.in_table_d1 if in_table_d1(data[0]): @@ -92,17 +102,16 @@ def saslprep(data, prohibit_unassigned_code_points=True): raise ValueError("SASLprep: failed bidirectional check") # RFC3454, Section 6, #2. If a string contains any RandALCat # character, it MUST NOT contain any LCat character. - prohibited = prohibited + (stringprep.in_table_d2,) + prohibited = (*prohibited, stringprep.in_table_d2) else: # RFC3454, Section 6, #3. Following the logic of #3, if # the first character is not a RandALCat, no other character # can be either. - prohibited = prohibited + (in_table_d1,) + prohibited = (*prohibited, in_table_d1) # RFC3454 section 2, step 3 and 4 - Prohibit and check bidi for char in data: if any(in_table(char) for in_table in prohibited): - raise ValueError( - "SASLprep: failed prohibited character check") + raise ValueError("SASLprep: failed prohibited character check") return data diff --git a/pymongo/server.py b/pymongo/server.py index b1473dfe90..f431fd0140 100644 --- a/pymongo/server.py +++ b/pymongo/server.py @@ -13,23 +13,43 @@ # permissions and limitations under the License. """Communicate with one MongoDB server in a topology.""" +from __future__ import annotations from datetime import datetime +from typing import TYPE_CHECKING, Any, Callable, ContextManager, Optional, Union from bson import _decode_all_selective - -from pymongo.errors import NotMasterError, OperationFailure -from pymongo.helpers import _check_command_response -from pymongo.message import _convert_exception -from pymongo.response import Response, ExhaustResponse -from pymongo.server_type import SERVER_TYPE - -_CURSOR_DOC_FIELDS = {'cursor': {'firstBatch': 1, 'nextBatch': 1}} - - -class Server(object): - def __init__(self, server_description, pool, monitor, topology_id=None, - listeners=None, events=None): +from pymongo.errors import NotPrimaryError, OperationFailure +from pymongo.helpers import _check_command_response, _handle_reauth +from pymongo.message import _convert_exception, _GetMore, _OpMsg, _Query +from pymongo.response import PinnedResponse, Response + +if TYPE_CHECKING: + from queue import Queue + from weakref import ReferenceType + + from bson.objectid import ObjectId + from pymongo.mongo_client import _MongoClientErrorHandler + from pymongo.monitor import Monitor + from pymongo.monitoring import _EventListeners + from pymongo.pool import Connection, Pool + from pymongo.read_preferences import _ServerMode + from pymongo.server_description import ServerDescription + from pymongo.typings import _DocumentOut + +_CURSOR_DOC_FIELDS = {"cursor": {"firstBatch": 1, "nextBatch": 1}} + + +class Server: + def __init__( + self, + server_description: ServerDescription, + pool: Pool, + monitor: Monitor, + topology_id: Optional[ObjectId] = None, + listeners: Optional[_EventListeners] = None, + events: Optional[ReferenceType[Queue]] = None, + ) -> None: """Represent one MongoDB server.""" self._description = server_description self._pool = pool @@ -39,42 +59,50 @@ def __init__(self, server_description, pool, monitor, topology_id=None, self._listener = listeners self._events = None if self._publish: - self._events = events() + self._events = events() # type: ignore[misc] - def open(self): + def open(self) -> None: """Start monitoring, or restart after a fork. Multiple calls have no effect. """ - self._monitor.open() + if not self._pool.opts.load_balanced: + self._monitor.open() - def reset(self): + def reset(self, service_id: Optional[ObjectId] = None) -> None: """Clear the connection pool.""" - self.pool.reset() + self.pool.reset(service_id) - def close(self): + def close(self) -> None: """Clear the connection pool and stop the monitor. Reconnect with open(). """ if self._publish: - self._events.put((self._listener.publish_server_closed, - (self._description.address, self._topology_id))) + assert self._listener is not None + assert self._events is not None + self._events.put( + ( + self._listener.publish_server_closed, + (self._description.address, self._topology_id), + ) + ) self._monitor.close() - self._pool.reset() + self._pool.reset_without_pause() - def request_check(self): + def request_check(self) -> None: """Check the server's state soon.""" self._monitor.request_check() - def run_operation_with_response( - self, - sock_info, - operation, - set_slave_okay, - listeners, - exhaust, - unpack_res): + @_handle_reauth + def run_operation( + self, + conn: Connection, + operation: Union[_Query, _GetMore], + read_preference: _ServerMode, + listeners: Optional[_EventListeners], + unpack_res: Callable[..., list[_DocumentOut]], + ) -> Response: """Run a _Query or _GetMore operation and return a Response object. This method is used only to run _Query/_GetMore operations from @@ -82,41 +110,42 @@ def run_operation_with_response( Can raise ConnectionFailure, OperationFailure, etc. :Parameters: + - `conn`: A Connection instance. - `operation`: A _Query or _GetMore object. - - `set_slave_okay`: Pass to operation.get_message. - - `all_credentials`: dict, maps auth source to MongoCredential. + - `read_preference`: The read preference to use. - `listeners`: Instance of _EventListeners or None. - - `exhaust`: If True, then this is an exhaust cursor operation. - `unpack_res`: A callable that decodes the wire protocol response. """ duration = None + assert listeners is not None publish = listeners.enabled_for_commands if publish: start = datetime.now() - send_message = not operation.exhaust_mgr - - if send_message: - use_cmd = operation.use_command(sock_info, exhaust) - message = operation.get_message( - set_slave_okay, sock_info, use_cmd) - request_id, data, max_doc_size = self._split_message(message) - else: - use_cmd = False + use_cmd = operation.use_command(conn) + more_to_come = operation.conn_mgr and operation.conn_mgr.more_to_come + if more_to_come: request_id = 0 + else: + message = operation.get_message(read_preference, conn, use_cmd) + request_id, data, max_doc_size = self._split_message(message) if publish: - cmd, dbn = operation.as_command(sock_info) + cmd, dbn = operation.as_command(conn) + if "$db" not in cmd: + cmd["$db"] = dbn + assert listeners is not None listeners.publish_command_start( - cmd, dbn, request_id, sock_info.address) + cmd, dbn, request_id, conn.address, service_id=conn.service_id + ) start = datetime.now() try: - if send_message: - sock_info.send_message(data, max_doc_size) - reply = sock_info.receive_message(request_id) + if more_to_come: + reply = conn.receive_message(None) else: - reply = sock_info.receive_message(None) + conn.send_message(data, max_doc_size) + reply = conn.receive_message(request_id) # Unpack and check for command errors. if use_cmd: @@ -125,25 +154,34 @@ def run_operation_with_response( else: user_fields = None legacy_response = True - docs = unpack_res(reply, operation.cursor_id, - operation.codec_options, - legacy_response=legacy_response, - user_fields=user_fields) + docs = unpack_res( + reply, + operation.cursor_id, + operation.codec_options, + legacy_response=legacy_response, + user_fields=user_fields, + ) if use_cmd: first = docs[0] - operation.client._process_response( - first, operation.session) - _check_command_response(first) + operation.client._process_response(first, operation.session) + _check_command_response(first, conn.max_wire_version) except Exception as exc: if publish: duration = datetime.now() - start - if isinstance(exc, (NotMasterError, OperationFailure)): - failure = exc.details + if isinstance(exc, (NotPrimaryError, OperationFailure)): + failure: _DocumentOut = exc.details # type: ignore[assignment] else: failure = _convert_exception(exc) + assert listeners is not None listeners.publish_command_failure( - duration, failure, operation.name, - request_id, sock_info.address) + duration, + failure, + operation.name, + request_id, + conn.address, + service_id=conn.service_id, + database_name=dbn, + ) raise if publish: @@ -151,40 +189,56 @@ def run_operation_with_response( # Must publish in find / getMore / explain command response # format. if use_cmd: - res = docs[0] + res: _DocumentOut = docs[0] elif operation.name == "explain": res = docs[0] if docs else {} else: - res = {"cursor": {"id": reply.cursor_id, - "ns": operation.namespace()}, - "ok": 1} + res = {"cursor": {"id": reply.cursor_id, "ns": operation.namespace()}, "ok": 1} # type: ignore[union-attr] if operation.name == "find": res["cursor"]["firstBatch"] = docs else: res["cursor"]["nextBatch"] = docs + assert listeners is not None listeners.publish_command_success( - duration, res, operation.name, request_id, - sock_info.address) + duration, + res, + operation.name, + request_id, + conn.address, + service_id=conn.service_id, + database_name=dbn, + ) # Decrypt response. client = operation.client if client and client._encrypter: if use_cmd: - decrypted = client._encrypter.decrypt( - reply.raw_command_response()) - docs = _decode_all_selective( - decrypted, operation.codec_options, user_fields) + decrypted = client._encrypter.decrypt(reply.raw_command_response()) + docs = _decode_all_selective(decrypted, operation.codec_options, user_fields) - if exhaust: - response = ExhaustResponse( + response: Response + + if client._should_pin_cursor(operation.session) or operation.exhaust: + conn.pin_cursor() + if isinstance(reply, _OpMsg): + # In OP_MSG, the server keeps sending only if the + # more_to_come flag is set. + more_to_come = reply.more_to_come + else: + # In OP_REPLY, the server keeps sending until cursor_id is 0. + more_to_come = bool(operation.exhaust and reply.cursor_id) + if operation.conn_mgr: + operation.conn_mgr.update_exhaust(more_to_come) + response = PinnedResponse( data=reply, address=self._description.address, - socket_info=sock_info, - pool=self._pool, + conn=conn, duration=duration, request_id=request_id, from_command=use_cmd, - docs=docs) + docs=docs, + more_to_come=more_to_come, + ) else: response = Response( data=reply, @@ -192,41 +246,43 @@ def run_operation_with_response( duration=duration, request_id=request_id, from_command=use_cmd, - docs=docs) + docs=docs, + ) return response - def get_socket(self, all_credentials, checkout=False): - return self.pool.get_socket(all_credentials, checkout) + def checkout( + self, handler: Optional[_MongoClientErrorHandler] = None + ) -> ContextManager[Connection]: + return self.pool.checkout(handler) @property - def description(self): + def description(self) -> ServerDescription: return self._description @description.setter - def description(self, server_description): + def description(self, server_description: ServerDescription) -> None: assert server_description.address == self._description.address self._description = server_description @property - def pool(self): + def pool(self) -> Pool: return self._pool - def _split_message(self, message): + def _split_message( + self, message: Union[tuple[int, Any], tuple[int, Any, int]] + ) -> tuple[int, Any, int]: """Return request_id, data, max_doc_size. :Parameters: - `message`: (request_id, data, max_doc_size) or (request_id, data) """ if len(message) == 3: - return message + return message # type: ignore[return-value] else: # get_more and kill_cursors messages don't include BSON documents. - request_id, data = message + request_id, data = message # type: ignore[misc] return request_id, data, 0 - def __str__(self): - d = self._description - return '' % ( - d.address[0], d.address[1], - SERVER_TYPE._fields[d.server_type]) + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {self._description!r}>" diff --git a/pymongo/server_api.py b/pymongo/server_api.py new file mode 100644 index 0000000000..90505bc5ae --- /dev/null +++ b/pymongo/server_api.py @@ -0,0 +1,175 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Support for MongoDB Stable API. + +.. _versioned-api-ref: + +MongoDB Stable API +===================== + +Starting in MongoDB 5.0, applications can specify the server API version +to use when creating a :class:`~pymongo.mongo_client.MongoClient`. Doing so +ensures that the driver behaves in a manner compatible with that server API +version, regardless of the server's actual release version. + +Declaring an API Version +```````````````````````` + +.. attention:: Stable API requires MongoDB >=5.0. + +To configure MongoDB Stable API, pass the ``server_api`` keyword option to +:class:`~pymongo.mongo_client.MongoClient`:: + + >>> from pymongo.mongo_client import MongoClient + >>> from pymongo.server_api import ServerApi + >>> + >>> # Declare API version "1" for MongoClient "client" + >>> server_api = ServerApi('1') + >>> client = MongoClient(server_api=server_api) + +The declared API version is applied to all commands run through ``client``, +including those sent through the generic +:meth:`~pymongo.database.Database.command` helper. + +.. note:: Declaring an API version on the + :class:`~pymongo.mongo_client.MongoClient` **and** specifying stable + API options in :meth:`~pymongo.database.Database.command` command document + is not supported and will lead to undefined behaviour. + +To run any command without declaring a server API version or using a different +API version, create a separate :class:`~pymongo.mongo_client.MongoClient` +instance. + +Strict Mode +``````````` + +Configuring ``strict`` mode will cause the MongoDB server to reject all +commands that are not part of the declared :attr:`ServerApi.version`. This +includes command options and aggregation pipeline stages. + +For example:: + + >>> server_api = ServerApi('1', strict=True) + >>> client = MongoClient(server_api=server_api) + >>> client.test.command('count', 'test') + Traceback (most recent call last): + ... + pymongo.errors.OperationFailure: Provided apiStrict:true, but the command count is not in API Version 1, full error: {'ok': 0.0, 'errmsg': 'Provided apiStrict:true, but the command count is not in API Version 1', 'code': 323, 'codeName': 'APIStrictError' + +Detecting API Deprecations +`````````````````````````` + +The ``deprecationErrors`` option can be used to enable command failures +when using functionality that is deprecated from the configured +:attr:`ServerApi.version`. For example:: + + >>> server_api = ServerApi('1', deprecation_errors=True) + >>> client = MongoClient(server_api=server_api) + +Note that at the time of this writing, no deprecated APIs exist. + +Classes +======= +""" +from __future__ import annotations + +from typing import Any, MutableMapping, Optional + + +class ServerApiVersion: + """An enum that defines values for :attr:`ServerApi.version`. + + .. versionadded:: 3.12 + """ + + V1 = "1" + """Server API version "1".""" + + +class ServerApi: + """MongoDB Stable API.""" + + def __init__( + self, version: str, strict: Optional[bool] = None, deprecation_errors: Optional[bool] = None + ): + """Options to configure MongoDB Stable API. + + :Parameters: + - `version`: The API version string. Must be one of the values in + :class:`ServerApiVersion`. + - `strict` (optional): Set to ``True`` to enable API strict mode. + Defaults to ``None`` which means "use the server's default". + - `deprecation_errors` (optional): Set to ``True`` to enable + deprecation errors. Defaults to ``None`` which means "use the + server's default". + + .. versionadded:: 3.12 + """ + if version != ServerApiVersion.V1: + raise ValueError(f"Unknown ServerApi version: {version}") + if strict is not None and not isinstance(strict, bool): + raise TypeError( + "Wrong type for ServerApi strict, value must be an instance " + f"of bool, not {type(strict)}" + ) + if deprecation_errors is not None and not isinstance(deprecation_errors, bool): + raise TypeError( + "Wrong type for ServerApi deprecation_errors, value must be " + f"an instance of bool, not {type(deprecation_errors)}" + ) + self._version = version + self._strict = strict + self._deprecation_errors = deprecation_errors + + @property + def version(self) -> str: + """The API version setting. + + This value is sent to the server in the "apiVersion" field. + """ + return self._version + + @property + def strict(self) -> Optional[bool]: + """The API strict mode setting. + + When set, this value is sent to the server in the "apiStrict" field. + """ + return self._strict + + @property + def deprecation_errors(self) -> Optional[bool]: + """The API deprecation errors setting. + + When set, this value is sent to the server in the + "apiDeprecationErrors" field. + """ + return self._deprecation_errors + + +def _add_to_command(cmd: MutableMapping[str, Any], server_api: Optional[ServerApi]) -> None: + """Internal helper which adds API versioning options to a command. + + :Parameters: + - `cmd`: The command. + - `server_api` (optional): A :class:`ServerApi` or ``None``. + """ + if not server_api: + return + cmd["apiVersion"] = server_api.version + if server_api.strict is not None: + cmd["apiStrict"] = server_api.strict + if server_api.deprecation_errors is not None: + cmd["apiDeprecationErrors"] = server_api.deprecation_errors diff --git a/pymongo/server_description.py b/pymongo/server_description.py index 04e9dbfe77..3b4131f327 100644 --- a/pymongo/server_description.py +++ b/pymongo/server_description.py @@ -13,81 +13,116 @@ # limitations under the License. """Represent one server the driver is connected to.""" +from __future__ import annotations + +import time +import warnings +from typing import Any, Mapping, Optional from bson import EPOCH_NAIVE +from bson.objectid import ObjectId +from pymongo.hello import Hello from pymongo.server_type import SERVER_TYPE -from pymongo.ismaster import IsMaster -from pymongo.monotonic import time as _time +from pymongo.typings import ClusterTime, _Address -class ServerDescription(object): +class ServerDescription: """Immutable representation of one server. :Parameters: - `address`: A (host, port) pair - - `ismaster`: Optional IsMaster instance + - `hello`: Optional Hello instance - `round_trip_time`: Optional float - `error`: Optional, the last error attempting to connect to the server + - `round_trip_time`: Optional float, the min latency from the most recent samples """ __slots__ = ( - '_address', '_server_type', '_all_hosts', '_tags', '_replica_set_name', - '_primary', '_max_bson_size', '_max_message_size', - '_max_write_batch_size', '_min_wire_version', '_max_wire_version', - '_round_trip_time', '_me', '_is_writable', '_is_readable', - '_ls_timeout_minutes', '_error', '_set_version', '_election_id', - '_cluster_time', '_last_write_date', '_last_update_time') + "_address", + "_server_type", + "_all_hosts", + "_tags", + "_replica_set_name", + "_primary", + "_max_bson_size", + "_max_message_size", + "_max_write_batch_size", + "_min_wire_version", + "_max_wire_version", + "_round_trip_time", + "_min_round_trip_time", + "_me", + "_is_writable", + "_is_readable", + "_ls_timeout_minutes", + "_error", + "_set_version", + "_election_id", + "_cluster_time", + "_last_write_date", + "_last_update_time", + "_topology_version", + ) def __init__( - self, - address, - ismaster=None, - round_trip_time=None, - error=None): + self, + address: _Address, + hello: Optional[Hello] = None, + round_trip_time: Optional[float] = None, + error: Optional[Exception] = None, + min_round_trip_time: float = 0.0, + ) -> None: self._address = address - if not ismaster: - ismaster = IsMaster({}) - - self._server_type = ismaster.server_type - self._all_hosts = ismaster.all_hosts - self._tags = ismaster.tags - self._replica_set_name = ismaster.replica_set_name - self._primary = ismaster.primary - self._max_bson_size = ismaster.max_bson_size - self._max_message_size = ismaster.max_message_size - self._max_write_batch_size = ismaster.max_write_batch_size - self._min_wire_version = ismaster.min_wire_version - self._max_wire_version = ismaster.max_wire_version - self._set_version = ismaster.set_version - self._election_id = ismaster.election_id - self._cluster_time = ismaster.cluster_time - self._is_writable = ismaster.is_writable - self._is_readable = ismaster.is_readable - self._ls_timeout_minutes = ismaster.logical_session_timeout_minutes + if not hello: + hello = Hello({}) + + self._server_type = hello.server_type + self._all_hosts = hello.all_hosts + self._tags = hello.tags + self._replica_set_name = hello.replica_set_name + self._primary = hello.primary + self._max_bson_size = hello.max_bson_size + self._max_message_size = hello.max_message_size + self._max_write_batch_size = hello.max_write_batch_size + self._min_wire_version = hello.min_wire_version + self._max_wire_version = hello.max_wire_version + self._set_version = hello.set_version + self._election_id = hello.election_id + self._cluster_time = hello.cluster_time + self._is_writable = hello.is_writable + self._is_readable = hello.is_readable + self._ls_timeout_minutes = hello.logical_session_timeout_minutes self._round_trip_time = round_trip_time - self._me = ismaster.me - self._last_update_time = _time() + self._min_round_trip_time = min_round_trip_time + self._me = hello.me + self._last_update_time = time.monotonic() self._error = error - - if ismaster.last_write_date: + self._topology_version = hello.topology_version + if error: + details = getattr(error, "details", None) + if isinstance(details, dict): + self._topology_version = details.get("topologyVersion") + + self._last_write_date: Optional[float] + if hello.last_write_date: # Convert from datetime to seconds. - delta = ismaster.last_write_date - EPOCH_NAIVE + delta = hello.last_write_date - EPOCH_NAIVE self._last_write_date = delta.total_seconds() else: self._last_write_date = None @property - def address(self): + def address(self) -> _Address: """The address (host, port) of this server.""" return self._address @property - def server_type(self): + def server_type(self) -> int: """The type of this server.""" return self._server_type @property - def server_type_name(self): + def server_type_name(self) -> str: """The server type as a human readable string. .. versionadded:: 3.4 @@ -95,78 +130,83 @@ def server_type_name(self): return SERVER_TYPE._fields[self._server_type] @property - def all_hosts(self): + def all_hosts(self) -> set[tuple[str, int]]: """List of hosts, passives, and arbiters known to this server.""" return self._all_hosts @property - def tags(self): + def tags(self) -> Mapping[str, Any]: return self._tags @property - def replica_set_name(self): + def replica_set_name(self) -> Optional[str]: """Replica set name or None.""" return self._replica_set_name @property - def primary(self): + def primary(self) -> Optional[tuple[str, int]]: """This server's opinion about who the primary is, or None.""" return self._primary @property - def max_bson_size(self): + def max_bson_size(self) -> int: return self._max_bson_size @property - def max_message_size(self): + def max_message_size(self) -> int: return self._max_message_size @property - def max_write_batch_size(self): + def max_write_batch_size(self) -> int: return self._max_write_batch_size @property - def min_wire_version(self): + def min_wire_version(self) -> int: return self._min_wire_version @property - def max_wire_version(self): + def max_wire_version(self) -> int: return self._max_wire_version @property - def set_version(self): + def set_version(self) -> Optional[int]: return self._set_version @property - def election_id(self): + def election_id(self) -> Optional[ObjectId]: return self._election_id @property - def cluster_time(self): + def cluster_time(self) -> Optional[ClusterTime]: return self._cluster_time @property - def election_tuple(self): + def election_tuple(self) -> tuple[Optional[int], Optional[ObjectId]]: + warnings.warn( + "'election_tuple' is deprecated, use 'set_version' and 'election_id' instead", + DeprecationWarning, + stacklevel=2, + ) return self._set_version, self._election_id @property - def me(self): + def me(self) -> Optional[tuple[str, int]]: return self._me @property - def logical_session_timeout_minutes(self): + def logical_session_timeout_minutes(self) -> Optional[int]: return self._ls_timeout_minutes @property - def last_write_date(self): + def last_write_date(self) -> Optional[float]: return self._last_write_date @property - def last_update_time(self): + def last_update_time(self) -> float: return self._last_update_time @property - def round_trip_time(self): + def round_trip_time(self) -> Optional[float]: """The current average latency or None.""" # This override is for unittesting only! if self._address in self._host_to_round_trip_time: @@ -175,37 +215,87 @@ def round_trip_time(self): return self._round_trip_time @property - def error(self): + def min_round_trip_time(self) -> float: + """The min latency from the most recent samples.""" + return self._min_round_trip_time + + @property + def error(self) -> Optional[Exception]: """The last error attempting to connect to the server, or None.""" return self._error @property - def is_writable(self): + def is_writable(self) -> bool: return self._is_writable @property - def is_readable(self): + def is_readable(self) -> bool: return self._is_readable @property - def mongos(self): + def mongos(self) -> bool: return self._server_type == SERVER_TYPE.Mongos @property - def is_server_type_known(self): + def is_server_type_known(self) -> bool: return self.server_type != SERVER_TYPE.Unknown @property - def retryable_writes_supported(self): + def retryable_writes_supported(self) -> bool: """Checks if this server supports retryable writes.""" return ( - self._ls_timeout_minutes is not None and - self._server_type in (SERVER_TYPE.Mongos, SERVER_TYPE.RSPrimary)) + self._ls_timeout_minutes is not None + and self._server_type in (SERVER_TYPE.Mongos, SERVER_TYPE.RSPrimary) + ) or self._server_type == SERVER_TYPE.LoadBalancer @property - def retryable_reads_supported(self): + def retryable_reads_supported(self) -> bool: """Checks if this server supports retryable writes.""" return self._max_wire_version >= 6 + @property + def topology_version(self) -> Optional[Mapping[str, Any]]: + return self._topology_version + + def to_unknown(self, error: Optional[Exception] = None) -> ServerDescription: + unknown = ServerDescription(self.address, error=error) + unknown._topology_version = self.topology_version + return unknown + + def __eq__(self, other: Any) -> bool: + if isinstance(other, ServerDescription): + return ( + (self._address == other.address) + and (self._server_type == other.server_type) + and (self._min_wire_version == other.min_wire_version) + and (self._max_wire_version == other.max_wire_version) + and (self._me == other.me) + and (self._all_hosts == other.all_hosts) + and (self._tags == other.tags) + and (self._replica_set_name == other.replica_set_name) + and (self._set_version == other.set_version) + and (self._election_id == other.election_id) + and (self._primary == other.primary) + and (self._ls_timeout_minutes == other.logical_session_timeout_minutes) + and (self._error == other.error) + ) + + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __repr__(self) -> str: + errmsg = "" + if self.error: + errmsg = f", error={self.error!r}" + return "<{} {} server_type: {}, rtt: {}{}>".format( + self.__class__.__name__, + self.address, + self.server_type_name, + self.round_trip_time, + errmsg, + ) + # For unittesting only. Use under no circumstances! - _host_to_round_trip_time = {} + _host_to_round_trip_time: dict = {} diff --git a/pymongo/server_selectors.py b/pymongo/server_selectors.py index 01f13065c2..c22ad599ee 100644 --- a/pymongo/server_selectors.py +++ b/pymongo/server_selectors.py @@ -13,15 +13,27 @@ # permissions and limitations under the License. """Criteria to select some ServerDescriptions from a TopologyDescription.""" +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence, TypeVar, cast from pymongo.server_type import SERVER_TYPE +if TYPE_CHECKING: + from pymongo.server_description import ServerDescription + from pymongo.topology_description import TopologyDescription + + +T = TypeVar("T") +TagSet = Mapping[str, Any] +TagSets = Sequence[TagSet] + -class Selection(object): +class Selection: """Input or output of a server selector function.""" @classmethod - def from_topology_description(cls, topology_description): + def from_topology_description(cls, topology_description: TopologyDescription) -> Selection: known_servers = topology_description.known_servers primary = None for sd in known_servers: @@ -29,88 +41,92 @@ def from_topology_description(cls, topology_description): primary = sd break - return Selection(topology_description, - topology_description.known_servers, - topology_description.common_wire_version, - primary) - - def __init__(self, - topology_description, - server_descriptions, - common_wire_version, - primary): + return Selection( + topology_description, + topology_description.known_servers, + topology_description.common_wire_version, + primary, + ) + + def __init__( + self, + topology_description: TopologyDescription, + server_descriptions: list[ServerDescription], + common_wire_version: Optional[int], + primary: Optional[ServerDescription], + ): self.topology_description = topology_description self.server_descriptions = server_descriptions self.primary = primary self.common_wire_version = common_wire_version - def with_server_descriptions(self, server_descriptions): - return Selection(self.topology_description, - server_descriptions, - self.common_wire_version, - self.primary) + def with_server_descriptions(self, server_descriptions: list[ServerDescription]) -> Selection: + return Selection( + self.topology_description, server_descriptions, self.common_wire_version, self.primary + ) - def secondary_with_max_last_write_date(self): + def secondary_with_max_last_write_date(self) -> Optional[ServerDescription]: secondaries = secondary_server_selector(self) if secondaries.server_descriptions: - return max(secondaries.server_descriptions, - key=lambda sd: sd.last_write_date) + return max( + secondaries.server_descriptions, key=lambda sd: cast(float, sd.last_write_date) + ) + return None @property - def primary_selection(self): + def primary_selection(self) -> Selection: primaries = [self.primary] if self.primary else [] return self.with_server_descriptions(primaries) @property - def heartbeat_frequency(self): + def heartbeat_frequency(self) -> int: return self.topology_description.heartbeat_frequency @property - def topology_type(self): + def topology_type(self) -> int: return self.topology_description.topology_type - def __bool__(self): + def __bool__(self) -> bool: return bool(self.server_descriptions) - __nonzero__ = __bool__ # Python 2. - - def __getitem__(self, item): + def __getitem__(self, item: int) -> ServerDescription: return self.server_descriptions[item] -def any_server_selector(selection): +def any_server_selector(selection: T) -> T: return selection -def readable_server_selector(selection): +def readable_server_selector(selection: Selection) -> Selection: return selection.with_server_descriptions( - [s for s in selection.server_descriptions if s.is_readable]) + [s for s in selection.server_descriptions if s.is_readable] + ) -def writable_server_selector(selection): +def writable_server_selector(selection: Selection) -> Selection: return selection.with_server_descriptions( - [s for s in selection.server_descriptions if s.is_writable]) + [s for s in selection.server_descriptions if s.is_writable] + ) -def secondary_server_selector(selection): +def secondary_server_selector(selection: Selection) -> Selection: return selection.with_server_descriptions( - [s for s in selection.server_descriptions - if s.server_type == SERVER_TYPE.RSSecondary]) + [s for s in selection.server_descriptions if s.server_type == SERVER_TYPE.RSSecondary] + ) -def arbiter_server_selector(selection): +def arbiter_server_selector(selection: Selection) -> Selection: return selection.with_server_descriptions( - [s for s in selection.server_descriptions - if s.server_type == SERVER_TYPE.RSArbiter]) + [s for s in selection.server_descriptions if s.server_type == SERVER_TYPE.RSArbiter] + ) -def writable_preferred_server_selector(selection): +def writable_preferred_server_selector(selection: Selection) -> Selection: """Like PrimaryPreferred but doesn't use tags or latency.""" - return (writable_server_selector(selection) or - secondary_server_selector(selection)) + return writable_server_selector(selection) or secondary_server_selector(selection) -def apply_single_tag_set(tag_set, selection): +def apply_single_tag_set(tag_set: TagSet, selection: Selection) -> Selection: """All servers matching one tag set. A tag set is a dict. A server matches if its tags are a superset: @@ -118,7 +134,8 @@ def apply_single_tag_set(tag_set, selection): The empty tag set {} matches any server. """ - def tags_match(server_tags): + + def tags_match(server_tags: Mapping[str, Any]) -> bool: for key, value in tag_set.items(): if key not in server_tags or server_tags[key] != value: return False @@ -126,10 +143,11 @@ def tags_match(server_tags): return True return selection.with_server_descriptions( - [s for s in selection.server_descriptions if tags_match(s.tags)]) + [s for s in selection.server_descriptions if tags_match(s.tags)] + ) -def apply_tag_sets(tag_sets, selection): +def apply_tag_sets(tag_sets: TagSets, selection: Selection) -> Selection: """All servers match a list of tag sets. tag_sets is a list of dicts. The empty tag set {} matches any server, @@ -146,11 +164,11 @@ def apply_tag_sets(tag_sets, selection): return selection.with_server_descriptions([]) -def secondary_with_tags_server_selector(tag_sets, selection): +def secondary_with_tags_server_selector(tag_sets: TagSets, selection: Selection) -> Selection: """All near-enough secondaries matching the tag sets.""" return apply_tag_sets(tag_sets, secondary_server_selector(selection)) -def member_with_tags_server_selector(tag_sets, selection): +def member_with_tags_server_selector(tag_sets: TagSets, selection: Selection) -> Selection: """All near-enough members matching the tag sets.""" return apply_tag_sets(tag_sets, readable_server_selector(selection)) diff --git a/pymongo/server_type.py b/pymongo/server_type.py index c231aa04c2..937855cc7a 100644 --- a/pymongo/server_type.py +++ b/pymongo/server_type.py @@ -13,11 +13,21 @@ # limitations under the License. """Type codes for MongoDB servers.""" +from __future__ import annotations -from collections import namedtuple +from typing import NamedTuple -SERVER_TYPE = namedtuple('ServerType', - ['Unknown', 'Mongos', 'RSPrimary', 'RSSecondary', - 'RSArbiter', 'RSOther', 'RSGhost', - 'Standalone'])(*range(8)) +class _ServerType(NamedTuple): + Unknown: int + Mongos: int + RSPrimary: int + RSSecondary: int + RSArbiter: int + RSOther: int + RSGhost: int + Standalone: int + LoadBalancer: int + + +SERVER_TYPE = _ServerType(*range(9)) diff --git a/pymongo/settings.py b/pymongo/settings.py index 2a02f05d5d..4a3e7be4cd 100644 --- a/pymongo/settings.py +++ b/pymongo/settings.py @@ -13,117 +13,156 @@ # permissions and limitations under the License. """Represent MongoClient's configuration.""" +from __future__ import annotations import threading +import traceback +from typing import Any, Collection, Optional, Type, Union from bson.objectid import ObjectId from pymongo import common, monitor, pool from pymongo.common import LOCAL_THRESHOLD_MS, SERVER_SELECTION_TIMEOUT from pymongo.errors import ConfigurationError -from pymongo.pool import PoolOptions +from pymongo.pool import Pool, PoolOptions from pymongo.server_description import ServerDescription -from pymongo.topology_description import TOPOLOGY_TYPE - - -class TopologySettings(object): - def __init__(self, - seeds=None, - replica_set_name=None, - pool_class=None, - pool_options=None, - monitor_class=None, - condition_class=None, - local_threshold_ms=LOCAL_THRESHOLD_MS, - server_selection_timeout=SERVER_SELECTION_TIMEOUT, - heartbeat_frequency=common.HEARTBEAT_FREQUENCY, - server_selector=None, - fqdn=None): +from pymongo.topology_description import TOPOLOGY_TYPE, _ServerSelector + + +class TopologySettings: + def __init__( + self, + seeds: Optional[Collection[tuple[str, int]]] = None, + replica_set_name: Optional[str] = None, + pool_class: Optional[Type[Pool]] = None, + pool_options: Optional[PoolOptions] = None, + monitor_class: Optional[Type[monitor.Monitor]] = None, + condition_class: Optional[Type[threading.Condition]] = None, + local_threshold_ms: int = LOCAL_THRESHOLD_MS, + server_selection_timeout: int = SERVER_SELECTION_TIMEOUT, + heartbeat_frequency: int = common.HEARTBEAT_FREQUENCY, + server_selector: Optional[_ServerSelector] = None, + fqdn: Optional[str] = None, + direct_connection: Optional[bool] = False, + load_balanced: Optional[bool] = None, + srv_service_name: str = common.SRV_SERVICE_NAME, + srv_max_hosts: int = 0, + server_monitoring_mode: str = common.SERVER_MONITORING_MODE, + ): """Represent MongoClient's configuration. Take a list of (host, port) pairs and optional replica set name. """ if heartbeat_frequency < common.MIN_HEARTBEAT_INTERVAL: raise ConfigurationError( - "heartbeatFrequencyMS cannot be less than %d" % ( - common.MIN_HEARTBEAT_INTERVAL * 1000,)) + "heartbeatFrequencyMS cannot be less than %d" + % (common.MIN_HEARTBEAT_INTERVAL * 1000,) + ) - self._seeds = seeds or [('localhost', 27017)] + self._seeds: Collection[tuple[str, int]] = seeds or [("localhost", 27017)] self._replica_set_name = replica_set_name - self._pool_class = pool_class or pool.Pool - self._pool_options = pool_options or PoolOptions() - self._monitor_class = monitor_class or monitor.Monitor - self._condition_class = condition_class or threading.Condition + self._pool_class: Type[Pool] = pool_class or pool.Pool + self._pool_options: PoolOptions = pool_options or PoolOptions() + self._monitor_class: Type[monitor.Monitor] = monitor_class or monitor.Monitor + self._condition_class: Type[threading.Condition] = condition_class or threading.Condition self._local_threshold_ms = local_threshold_ms self._server_selection_timeout = server_selection_timeout self._server_selector = server_selector self._fqdn = fqdn self._heartbeat_frequency = heartbeat_frequency - self._direct = (len(self._seeds) == 1 and not replica_set_name) + self._direct = direct_connection + self._load_balanced = load_balanced + self._srv_service_name = srv_service_name + self._srv_max_hosts = srv_max_hosts or 0 + self._server_monitoring_mode = server_monitoring_mode + self._topology_id = ObjectId() + # Store the allocation traceback to catch unclosed clients in the + # test suite. + self._stack = "".join(traceback.format_stack()) @property - def seeds(self): + def seeds(self) -> Collection[tuple[str, int]]: """List of server addresses.""" return self._seeds @property - def replica_set_name(self): + def replica_set_name(self) -> Optional[str]: return self._replica_set_name @property - def pool_class(self): + def pool_class(self) -> Type[Pool]: return self._pool_class @property - def pool_options(self): + def pool_options(self) -> PoolOptions: return self._pool_options @property - def monitor_class(self): + def monitor_class(self) -> Type[monitor.Monitor]: return self._monitor_class @property - def condition_class(self): + def condition_class(self) -> Type[threading.Condition]: return self._condition_class @property - def local_threshold_ms(self): + def local_threshold_ms(self) -> int: return self._local_threshold_ms @property - def server_selection_timeout(self): + def server_selection_timeout(self) -> int: return self._server_selection_timeout @property - def server_selector(self): + def server_selector(self) -> Optional[_ServerSelector]: return self._server_selector @property - def heartbeat_frequency(self): + def heartbeat_frequency(self) -> int: return self._heartbeat_frequency @property - def fqdn(self): + def fqdn(self) -> Optional[str]: return self._fqdn @property - def direct(self): + def direct(self) -> Optional[bool]: """Connect directly to a single server, or use a set of servers? True if there is one seed and no replica_set_name. """ return self._direct - def get_topology_type(self): - if self.direct: + @property + def load_balanced(self) -> Optional[bool]: + """True if the client was configured to connect to a load balancer.""" + return self._load_balanced + + @property + def srv_service_name(self) -> str: + """The srvServiceName.""" + return self._srv_service_name + + @property + def srv_max_hosts(self) -> int: + """The srvMaxHosts.""" + return self._srv_max_hosts + + @property + def server_monitoring_mode(self) -> str: + """The serverMonitoringMode.""" + return self._server_monitoring_mode + + def get_topology_type(self) -> int: + if self.load_balanced: + return TOPOLOGY_TYPE.LoadBalanced + elif self.direct: return TOPOLOGY_TYPE.Single elif self.replica_set_name is not None: return TOPOLOGY_TYPE.ReplicaSetNoPrimary else: return TOPOLOGY_TYPE.Unknown - def get_server_descriptions(self): + def get_server_descriptions(self) -> dict[Union[tuple[str, int], Any], ServerDescription]: """Initial dict of (address, ServerDescription) for all seeds.""" - return dict([ - (address, ServerDescription(address)) - for address in self.seeds]) + return {address: ServerDescription(address) for address in self.seeds} diff --git a/pymongo/socket_checker.py b/pymongo/socket_checker.py new file mode 100644 index 0000000000..78861854ab --- /dev/null +++ b/pymongo/socket_checker.py @@ -0,0 +1,105 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Select / poll helper""" +from __future__ import annotations + +import errno +import select +import sys +from typing import Any, Optional, cast + +# PYTHON-2320: Jython does not fully support poll on SSL sockets, +# https://bugs.jython.org/issue2900 +_HAVE_POLL = hasattr(select, "poll") and not sys.platform.startswith("java") +_SelectError = getattr(select, "error", OSError) + + +def _errno_from_exception(exc: BaseException) -> Optional[int]: + if hasattr(exc, "errno"): + return cast(int, exc.errno) + if exc.args: + return cast(int, exc.args[0]) + return None + + +class SocketChecker: + def __init__(self) -> None: + self._poller: Optional[select.poll] + if _HAVE_POLL: + self._poller = select.poll() + else: + self._poller = None + + def select( + self, sock: Any, read: bool = False, write: bool = False, timeout: Optional[float] = 0 + ) -> bool: + """Select for reads or writes with a timeout in seconds (or None). + + Returns True if the socket is readable/writable, False on timeout. + """ + res: Any + while True: + try: + if self._poller: + mask = select.POLLERR | select.POLLHUP + if read: + mask = mask | select.POLLIN | select.POLLPRI + if write: + mask = mask | select.POLLOUT + self._poller.register(sock, mask) + try: + # poll() timeout is in milliseconds. select() + # timeout is in seconds. + timeout_ = None if timeout is None else timeout * 1000 + res = self._poller.poll(timeout_) + # poll returns a possibly-empty list containing + # (fd, event) 2-tuples for the descriptors that have + # events or errors to report. Return True if the list + # is not empty. + return bool(res) + finally: + self._poller.unregister(sock) + else: + rlist = [sock] if read else [] + wlist = [sock] if write else [] + res = select.select(rlist, wlist, [sock], timeout) + # select returns a 3-tuple of lists of objects that are + # ready: subsets of the first three arguments. Return + # True if any of the lists are not empty. + return any(res) + except (_SelectError, OSError) as exc: # type: ignore + if _errno_from_exception(exc) in (errno.EINTR, errno.EAGAIN): + continue + raise + + def socket_closed(self, sock: Any) -> bool: + """Return True if we know socket has been closed, False otherwise.""" + try: + return self.select(sock, read=True) + except (RuntimeError, KeyError): + # RuntimeError is raised during a concurrent poll. KeyError + # is raised by unregister if the socket is not in the poller. + # These errors should not be possible since we protect the + # poller with a mutex. + raise + except ValueError: + # ValueError is raised by register/unregister/select if the + # socket file descriptor is negative or outside the range for + # select (> 1023). + return True + except Exception: + # Any other exceptions should be attributed to a closed + # or invalid socket. + return True diff --git a/pymongo/son_manipulator.py b/pymongo/son_manipulator.py deleted file mode 100644 index f470d6f338..0000000000 --- a/pymongo/son_manipulator.py +++ /dev/null @@ -1,191 +0,0 @@ -# Copyright 2009-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""**DEPRECATED**: Manipulators that can edit SON objects as they enter and exit -a database. - -The :class:`~pymongo.son_manipulator.SONManipulator` API has limitations as a -technique for transforming your data. Instead, it is more flexible and -straightforward to transform outgoing documents in your own code before passing -them to PyMongo, and transform incoming documents after receiving them from -PyMongo. SON Manipulators will be removed from PyMongo in 4.0. - -PyMongo does **not** apply SON manipulators to documents passed to -the modern methods :meth:`~pymongo.collection.Collection.bulk_write`, -:meth:`~pymongo.collection.Collection.insert_one`, -:meth:`~pymongo.collection.Collection.insert_many`, -:meth:`~pymongo.collection.Collection.update_one`, or -:meth:`~pymongo.collection.Collection.update_many`. SON manipulators are -**not** applied to documents returned by the modern methods -:meth:`~pymongo.collection.Collection.find_one_and_delete`, -:meth:`~pymongo.collection.Collection.find_one_and_replace`, and -:meth:`~pymongo.collection.Collection.find_one_and_update`. -""" - -from bson.dbref import DBRef -from bson.objectid import ObjectId -from bson.py3compat import abc -from bson.son import SON - - -class SONManipulator(object): - """A base son manipulator. - - This manipulator just saves and restores objects without changing them. - """ - - def will_copy(self): - """Will this SON manipulator make a copy of the incoming document? - - Derived classes that do need to make a copy should override this - method, returning True instead of False. All non-copying manipulators - will be applied first (so that the user's document will be updated - appropriately), followed by copying manipulators. - """ - return False - - def transform_incoming(self, son, collection): - """Manipulate an incoming SON object. - - :Parameters: - - `son`: the SON object to be inserted into the database - - `collection`: the collection the object is being inserted into - """ - if self.will_copy(): - return SON(son) - return son - - def transform_outgoing(self, son, collection): - """Manipulate an outgoing SON object. - - :Parameters: - - `son`: the SON object being retrieved from the database - - `collection`: the collection this object was stored in - """ - if self.will_copy(): - return SON(son) - return son - - -class ObjectIdInjector(SONManipulator): - """A son manipulator that adds the _id field if it is missing. - - .. versionchanged:: 2.7 - ObjectIdInjector is no longer used by PyMongo, but remains in this - module for backwards compatibility. - """ - - def transform_incoming(self, son, collection): - """Add an _id field if it is missing. - """ - if not "_id" in son: - son["_id"] = ObjectId() - return son - - -# This is now handled during BSON encoding (for performance reasons), -# but I'm keeping this here as a reference for those implementing new -# SONManipulators. -class ObjectIdShuffler(SONManipulator): - """A son manipulator that moves _id to the first position. - """ - - def will_copy(self): - """We need to copy to be sure that we are dealing with SON, not a dict. - """ - return True - - def transform_incoming(self, son, collection): - """Move _id to the front if it's there. - """ - if not "_id" in son: - return son - transformed = SON({"_id": son["_id"]}) - transformed.update(son) - return transformed - - -class NamespaceInjector(SONManipulator): - """A son manipulator that adds the _ns field. - """ - - def transform_incoming(self, son, collection): - """Add the _ns field to the incoming object - """ - son["_ns"] = collection.name - return son - - -class AutoReference(SONManipulator): - """Transparently reference and de-reference already saved embedded objects. - - This manipulator should probably only be used when the NamespaceInjector is - also being used, otherwise it doesn't make too much sense - documents can - only be auto-referenced if they have an *_ns* field. - - NOTE: this will behave poorly if you have a circular reference. - - TODO: this only works for documents that are in the same database. To fix - this we'll need to add a DatabaseInjector that adds *_db* and then make - use of the optional *database* support for DBRefs. - """ - - def __init__(self, db): - self.database = db - - def will_copy(self): - """We need to copy so the user's document doesn't get transformed refs. - """ - return True - - def transform_incoming(self, son, collection): - """Replace embedded documents with DBRefs. - """ - - def transform_value(value): - if isinstance(value, abc.MutableMapping): - if "_id" in value and "_ns" in value: - return DBRef(value["_ns"], transform_value(value["_id"])) - else: - return transform_dict(SON(value)) - elif isinstance(value, list): - return [transform_value(v) for v in value] - return value - - def transform_dict(object): - for (key, value) in object.items(): - object[key] = transform_value(value) - return object - - return transform_dict(SON(son)) - - def transform_outgoing(self, son, collection): - """Replace DBRefs with embedded documents. - """ - - def transform_value(value): - if isinstance(value, DBRef): - return self.database.dereference(value) - elif isinstance(value, list): - return [transform_value(v) for v in value] - elif isinstance(value, abc.MutableMapping): - return transform_dict(SON(value)) - return value - - def transform_dict(object): - for (key, value) in object.items(): - object[key] = transform_value(value) - return object - - return transform_dict(SON(son)) diff --git a/pymongo/srv_resolver.py b/pymongo/srv_resolver.py index 3ccd529372..76c8b5161c 100644 --- a/pymongo/srv_resolver.py +++ b/pymongo/srv_resolver.py @@ -13,95 +13,126 @@ # permissions and limitations under the License. """Support for resolving hosts and options from mongodb+srv:// URIs.""" +from __future__ import annotations + +import ipaddress +import random +from typing import Any, Optional, Union try: from dns import resolver + _HAVE_DNSPYTHON = True except ImportError: _HAVE_DNSPYTHON = False -from bson.py3compat import PY3 - from pymongo.common import CONNECT_TIMEOUT from pymongo.errors import ConfigurationError -if PY3: - # dnspython can return bytes or str from various parts - # of its API depending on version. We always want str. - def maybe_decode(text): - if isinstance(text, bytes): - return text.decode() - return text -else: - def maybe_decode(text): - return text +# dnspython can return bytes or str from various parts +# of its API depending on version. We always want str. +def maybe_decode(text: Union[str, bytes]) -> str: + if isinstance(text, bytes): + return text.decode() + return text + + +# PYTHON-2667 Lazily call dns.resolver methods for compatibility with eventlet. +def _resolve(*args: Any, **kwargs: Any) -> resolver.Answer: + if hasattr(resolver, "resolve"): + # dnspython >= 2 + return resolver.resolve(*args, **kwargs) + # dnspython 1.X + return resolver.query(*args, **kwargs) + +_INVALID_HOST_MSG = ( + "Invalid URI host: %s is not a valid hostname for 'mongodb+srv://'. " + "Did you mean to use 'mongodb://'?" +) -class _SrvResolver(object): - def __init__(self, fqdn, connect_timeout=None): + +class _SrvResolver: + def __init__( + self, + fqdn: str, + connect_timeout: Optional[float], + srv_service_name: str, + srv_max_hosts: int = 0, + ): self.__fqdn = fqdn + self.__srv = srv_service_name self.__connect_timeout = connect_timeout or CONNECT_TIMEOUT - + self.__srv_max_hosts = srv_max_hosts or 0 # Validate the fully qualified domain name. + try: + ipaddress.ip_address(fqdn) + raise ConfigurationError(_INVALID_HOST_MSG % ("an IP address",)) + except ValueError: + pass + try: self.__plist = self.__fqdn.split(".")[1:] except Exception: - raise ConfigurationError("Invalid URI host: %s" % (fqdn,)) + raise ConfigurationError(_INVALID_HOST_MSG % (fqdn,)) from None self.__slen = len(self.__plist) if self.__slen < 2: - raise ConfigurationError("Invalid URI host: %s" % (fqdn,)) + raise ConfigurationError(_INVALID_HOST_MSG % (fqdn,)) - def get_options(self): + def get_options(self) -> Optional[str]: try: - results = resolver.query(self.__fqdn, 'TXT', - lifetime=self.__connect_timeout) + results = _resolve(self.__fqdn, "TXT", lifetime=self.__connect_timeout) except (resolver.NoAnswer, resolver.NXDOMAIN): # No TXT records return None except Exception as exc: - raise ConfigurationError(str(exc)) + raise ConfigurationError(str(exc)) from None if len(results) > 1: - raise ConfigurationError('Only one TXT record is supported') - return ( - b'&'.join([b''.join(res.strings) for res in results])).decode( - 'utf-8') + raise ConfigurationError("Only one TXT record is supported") + return (b"&".join([b"".join(res.strings) for res in results])).decode("utf-8") - def _resolve_uri(self, encapsulate_errors): + def _resolve_uri(self, encapsulate_errors: bool) -> resolver.Answer: try: - results = resolver.query('_mongodb._tcp.' + self.__fqdn, 'SRV', - lifetime=self.__connect_timeout) + results = _resolve( + "_" + self.__srv + "._tcp." + self.__fqdn, "SRV", lifetime=self.__connect_timeout + ) except Exception as exc: if not encapsulate_errors: # Raise the original error. raise # Else, raise all errors as ConfigurationError. - raise ConfigurationError(str(exc)) + raise ConfigurationError(str(exc)) from None return results - def _get_srv_response_and_hosts(self, encapsulate_errors): + def _get_srv_response_and_hosts( + self, encapsulate_errors: bool + ) -> tuple[resolver.Answer, list[tuple[str, Any]]]: results = self._resolve_uri(encapsulate_errors) # Construct address tuples nodes = [ - (maybe_decode(res.target.to_text(omit_final_dot=True)), res.port) - for res in results] + (maybe_decode(res.target.to_text(omit_final_dot=True)), res.port) for res in results + ] # Validate hosts for node in nodes: try: - nlist = node[0].split(".")[1:][-self.__slen:] + nlist = node[0].lower().split(".")[1:][-self.__slen :] except Exception: - raise ConfigurationError("Invalid SRV host: %s" % (node[0],)) + raise ConfigurationError(f"Invalid SRV host: {node[0]}") from None if self.__plist != nlist: - raise ConfigurationError("Invalid SRV host: %s" % (node[0],)) - + raise ConfigurationError(f"Invalid SRV host: {node[0]}") + if self.__srv_max_hosts: + nodes = random.sample(nodes, min(self.__srv_max_hosts, len(nodes))) return results, nodes - def get_hosts(self): + def get_hosts(self) -> list[tuple[str, Any]]: _, nodes = self._get_srv_response_and_hosts(True) return nodes - def get_hosts_and_min_ttl(self): + def get_hosts_and_min_ttl(self) -> tuple[list[tuple[str, Any]], int]: results, nodes = self._get_srv_response_and_hosts(False) - return nodes, results.rrset.ttl + rrset = results.rrset + ttl = rrset.ttl if rrset else 0 + return nodes, ttl diff --git a/pymongo/ssl_context.py b/pymongo/ssl_context.py index 6afb2d2d56..1a0424208f 100644 --- a/pymongo/ssl_context.py +++ b/pymongo/ssl_context.py @@ -13,84 +13,28 @@ # permissions and limitations under the License. """A fake SSLContext implementation.""" +from __future__ import annotations -try: - import ssl -except ImportError: - pass +import ssl as _ssl +# PROTOCOL_TLS_CLIENT is Python 3.6+ +PROTOCOL_SSLv23 = getattr(_ssl, "PROTOCOL_TLS_CLIENT", _ssl.PROTOCOL_SSLv23) +OP_NO_SSLv2 = getattr(_ssl, "OP_NO_SSLv2", 0) +OP_NO_SSLv3 = getattr(_ssl, "OP_NO_SSLv3", 0) +OP_NO_COMPRESSION = getattr(_ssl, "OP_NO_COMPRESSION", 0) +# Python 3.7+, OpenSSL 1.1.0h+ +OP_NO_RENEGOTIATION = getattr(_ssl, "OP_NO_RENEGOTIATION", 0) -class SSLContext(object): - """A fake SSLContext. +HAS_SNI = getattr(_ssl, "HAS_SNI", False) +IS_PYOPENSSL = False - This implements an API similar to ssl.SSLContext from python 3.2 - but does not implement methods or properties that would be - incompatible with ssl.wrap_socket from python 2.7 < 2.7.9. +# Errors raised by SSL sockets when in non-blocking mode. +BLOCKING_IO_ERRORS = (_ssl.SSLWantReadError, _ssl.SSLWantWriteError) - You must pass protocol which must be one of the PROTOCOL_* constants - defined in the ssl module. ssl.PROTOCOL_SSLv23 is recommended for maximum - interoperability. - """ +# Base Exception class +SSLError = _ssl.SSLError - __slots__ = ('_cafile', '_certfile', - '_keyfile', '_protocol', '_verify_mode') +from ssl import SSLContext # noqa: F401,E402 - def __init__(self, protocol): - self._cafile = None - self._certfile = None - self._keyfile = None - self._protocol = protocol - self._verify_mode = ssl.CERT_NONE - - @property - def protocol(self): - """The protocol version chosen when constructing the context. - This attribute is read-only. - """ - return self._protocol - - def __get_verify_mode(self): - """Whether to try to verify other peers' certificates and how to - behave if verification fails. This attribute must be one of - ssl.CERT_NONE, ssl.CERT_OPTIONAL or ssl.CERT_REQUIRED. - """ - return self._verify_mode - - def __set_verify_mode(self, value): - """Setter for verify_mode.""" - self._verify_mode = value - - verify_mode = property(__get_verify_mode, __set_verify_mode) - - def load_cert_chain(self, certfile, keyfile=None): - """Load a private key and the corresponding certificate. The certfile - string must be the path to a single file in PEM format containing the - certificate as well as any number of CA certificates needed to - establish the certificate's authenticity. The keyfile string, if - present, must point to a file containing the private key. Otherwise - the private key will be taken from certfile as well. - """ - self._certfile = certfile - self._keyfile = keyfile - - def load_verify_locations(self, cafile=None, dummy=None): - """Load a set of "certification authority"(CA) certificates used to - validate other peers' certificates when `~verify_mode` is other than - ssl.CERT_NONE. - """ - self._cafile = cafile - - def wrap_socket(self, sock, server_side=False, - do_handshake_on_connect=True, - suppress_ragged_eofs=True, dummy=None): - """Wrap an existing Python socket sock and return an ssl.SSLSocket - object. - """ - return ssl.wrap_socket(sock, keyfile=self._keyfile, - certfile=self._certfile, - server_side=server_side, - cert_reqs=self._verify_mode, - ssl_version=self._protocol, - ca_certs=self._cafile, - do_handshake_on_connect=do_handshake_on_connect, - suppress_ragged_eofs=suppress_ragged_eofs) +if hasattr(_ssl, "VERIFY_CRL_CHECK_LEAF"): + from ssl import VERIFY_CRL_CHECK_LEAF # noqa: F401 diff --git a/pymongo/ssl_match_hostname.py b/pymongo/ssl_match_hostname.py deleted file mode 100644 index 49e3dd6576..0000000000 --- a/pymongo/ssl_match_hostname.py +++ /dev/null @@ -1,135 +0,0 @@ -# Backport of the match_hostname logic from python 3.5, with small -# changes to support IP address matching on python 2.7 and 3.4. - -import re -import sys - -try: - # Python 3.4+, or the ipaddress module from pypi. - from ipaddress import ip_address -except ImportError: - ip_address = lambda address: None - -# ipaddress.ip_address requires unicode -if sys.version_info[0] < 3: - _unicode = unicode -else: - _unicode = lambda value: value - - -class CertificateError(ValueError): - pass - - -def _dnsname_match(dn, hostname, max_wildcards=1): - """Matching according to RFC 6125, section 6.4.3 - - http://tools.ietf.org/html/rfc6125#section-6.4.3 - """ - pats = [] - if not dn: - return False - - parts = dn.split(r'.') - leftmost = parts[0] - remainder = parts[1:] - - wildcards = leftmost.count('*') - if wildcards > max_wildcards: - # Issue #17980: avoid denials of service by refusing more - # than one wildcard per fragment. A survey of established - # policy among SSL implementations showed it to be a - # reasonable choice. - raise CertificateError( - "too many wildcards in certificate DNS name: " + repr(dn)) - - # speed up common case w/o wildcards - if not wildcards: - return dn.lower() == hostname.lower() - - # RFC 6125, section 6.4.3, subitem 1. - # The client SHOULD NOT attempt to match a presented identifier in which - # the wildcard character comprises a label other than the left-most label. - if leftmost == '*': - # When '*' is a fragment by itself, it matches a non-empty dotless - # fragment. - pats.append('[^.]+') - elif leftmost.startswith('xn--') or hostname.startswith('xn--'): - # RFC 6125, section 6.4.3, subitem 3. - # The client SHOULD NOT attempt to match a presented identifier - # where the wildcard character is embedded within an A-label or - # U-label of an internationalized domain name. - pats.append(re.escape(leftmost)) - else: - # Otherwise, '*' matches any dotless string, e.g. www* - pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) - - # add the remaining fragments, ignore any wildcards - for frag in remainder: - pats.append(re.escape(frag)) - - pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) - return pat.match(hostname) - - -def _ipaddress_match(ipname, host_ip): - """Exact matching of IP addresses. - - RFC 6125 explicitly doesn't define an algorithm for this - (section 1.7.2 - "Out of Scope"). - """ - # OpenSSL may add a trailing newline to a subjectAltName's IP address - ip = ip_address(_unicode(ipname).rstrip()) - return ip == host_ip - - -def match_hostname(cert, hostname): - """Verify that *cert* (in decoded format as returned by - SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 - rules are followed. - - CertificateError is raised on failure. On success, the function - returns nothing. - """ - if not cert: - raise ValueError("empty or no certificate, match_hostname needs a " - "SSL socket or SSL context with either " - "CERT_OPTIONAL or CERT_REQUIRED") - try: - host_ip = ip_address(_unicode(hostname)) - except (ValueError, UnicodeError): - # Not an IP address (common case) - host_ip = None - dnsnames = [] - san = cert.get('subjectAltName', ()) - for key, value in san: - if key == 'DNS': - if host_ip is None and _dnsname_match(value, hostname): - return - dnsnames.append(value) - elif key == 'IP Address': - if host_ip is not None and _ipaddress_match(value, host_ip): - return - dnsnames.append(value) - if not dnsnames: - # The subject is only checked when there is no dNSName entry - # in subjectAltName - for sub in cert.get('subject', ()): - for key, value in sub: - # XXX according to RFC 2818, the most specific Common Name - # must be used. - if key == 'commonName': - if _dnsname_match(value, hostname): - return - dnsnames.append(value) - if len(dnsnames) > 1: - raise CertificateError("hostname %r " - "doesn't match either of %s" - % (hostname, ', '.join(map(repr, dnsnames)))) - elif len(dnsnames) == 1: - raise CertificateError("hostname %r " - "doesn't match %r" - % (hostname, dnsnames[0])) - else: - raise CertificateError("no appropriate commonName or " - "subjectAltName fields were found") diff --git a/pymongo/ssl_support.py b/pymongo/ssl_support.py index 4976017daa..3c9ee01ef1 100644 --- a/pymongo/ssl_support.py +++ b/pymongo/ssl_support.py @@ -13,192 +13,92 @@ # permissions and limitations under the License. """Support for SSL in PyMongo.""" +from __future__ import annotations -import atexit -import sys -import threading +from typing import Optional -HAVE_SSL = True -try: - import ssl -except ImportError: - HAVE_SSL = False +from pymongo.errors import ConfigurationError -HAVE_CERTIFI = False -try: - import certifi - HAVE_CERTIFI = True -except ImportError: - pass +HAVE_SSL = True -HAVE_WINCERTSTORE = False try: - from wincertstore import CertFile - HAVE_WINCERTSTORE = True + import pymongo.pyopenssl_context as _ssl except ImportError: - pass - -from bson.py3compat import string_type -from pymongo.errors import ConfigurationError - -_WINCERTSLOCK = threading.Lock() -_WINCERTS = None - -_PY37PLUS = sys.version_info[:2] >= (3, 7) - -if HAVE_SSL: try: - # Python 2.7.9+, PyPy 2.5.1+, etc. - from ssl import SSLContext + import pymongo.ssl_context as _ssl # type: ignore[no-redef] except ImportError: - from pymongo.ssl_context import SSLContext - - def validate_cert_reqs(option, value): - """Validate the cert reqs are valid. It must be None or one of the - three values ``ssl.CERT_NONE``, ``ssl.CERT_OPTIONAL`` or - ``ssl.CERT_REQUIRED``. - """ - if value is None: - return value - elif isinstance(value, string_type) and hasattr(ssl, value): - value = getattr(ssl, value) - - if value in (ssl.CERT_NONE, ssl.CERT_OPTIONAL, ssl.CERT_REQUIRED): - return value - raise ValueError("The value of %s must be one of: " - "`ssl.CERT_NONE`, `ssl.CERT_OPTIONAL` or " - "`ssl.CERT_REQUIRED`" % (option,)) - - def validate_allow_invalid_certs(option, value): - """Validate the option to allow invalid certificates is valid.""" - # Avoid circular import. - from pymongo.common import validate_boolean_or_string - boolean_cert_reqs = validate_boolean_or_string(option, value) - if boolean_cert_reqs: - return ssl.CERT_NONE - return ssl.CERT_REQUIRED - - def _load_wincerts(): - """Set _WINCERTS to an instance of wincertstore.Certfile.""" - global _WINCERTS - - certfile = CertFile() - certfile.addstore("CA") - certfile.addstore("ROOT") - atexit.register(certfile.close) + HAVE_SSL = False - _WINCERTS = certfile - # XXX: Possible future work. - # - OCSP? Not supported by python at all. - # http://bugs.python.org/issue17123 - # - Adding an ssl_context keyword argument to MongoClient? This might - # be useful for sites that have unusual requirements rather than - # trying to expose every SSLContext option through a keyword/uri - # parameter. - def get_ssl_context(*args): +if HAVE_SSL: + # Note: The validate* functions below deal with users passing + # CPython ssl module constants to configure certificate verification + # at a high level. This is legacy behavior, but requires us to + # import the ssl module even if we're only using it for this purpose. + import ssl as _stdlibssl # noqa: F401 + from ssl import CERT_NONE, CERT_REQUIRED + + HAS_SNI = _ssl.HAS_SNI + IPADDR_SAFE = True + SSLError = _ssl.SSLError + BLOCKING_IO_ERRORS = _ssl.BLOCKING_IO_ERRORS + + def get_ssl_context( + certfile: Optional[str], + passphrase: Optional[str], + ca_certs: Optional[str], + crlfile: Optional[str], + allow_invalid_certificates: bool, + allow_invalid_hostnames: bool, + disable_ocsp_endpoint_check: bool, + ) -> _ssl.SSLContext: """Create and return an SSLContext object.""" - (certfile, - keyfile, - passphrase, - ca_certs, - cert_reqs, - crlfile, - match_hostname) = args - verify_mode = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs - # Note PROTOCOL_SSLv23 is about the most misleading name imaginable. - # This configures the server and client to negotiate the - # highest protocol version they both support. A very good thing. - # PROTOCOL_TLS_CLIENT was added in CPython 3.6, deprecating - # PROTOCOL_SSLv23. - ctx = SSLContext( - getattr(ssl, "PROTOCOL_TLS_CLIENT", ssl.PROTOCOL_SSLv23)) - # SSLContext.check_hostname was added in CPython 2.7.9 and 3.4. - # PROTOCOL_TLS_CLIENT (added in Python 3.6) enables it by default. - if hasattr(ctx, "check_hostname"): - if _PY37PLUS and verify_mode != ssl.CERT_NONE: - # Python 3.7 uses OpenSSL's hostname matching implementation - # making it the obvious version to start using this with. - # Python 3.6 might have been a good version, but it suffers - # from https://bugs.python.org/issue32185. - # We'll use our bundled match_hostname for older Python - # versions, which also supports IP address matching - # with Python < 3.5. - ctx.check_hostname = match_hostname - else: - ctx.check_hostname = False + verify_mode = CERT_NONE if allow_invalid_certificates else CERT_REQUIRED + ctx = _ssl.SSLContext(_ssl.PROTOCOL_SSLv23) + if verify_mode != CERT_NONE: + ctx.check_hostname = not allow_invalid_hostnames + else: + ctx.check_hostname = False + if hasattr(ctx, "check_ocsp_endpoint"): + ctx.check_ocsp_endpoint = not disable_ocsp_endpoint_check if hasattr(ctx, "options"): # Explicitly disable SSLv2, SSLv3 and TLS compression. Note that # up to date versions of MongoDB 2.4 and above already disable # SSLv2 and SSLv3, python disables SSLv2 by default in >= 2.7.7 - # and >= 3.3.4 and SSLv3 in >= 3.4.3. There is no way for us to do - # any of this explicitly for python 2.7 before 2.7.9. - ctx.options |= getattr(ssl, "OP_NO_SSLv2", 0) - ctx.options |= getattr(ssl, "OP_NO_SSLv3", 0) - # OpenSSL >= 1.0.0 - ctx.options |= getattr(ssl, "OP_NO_COMPRESSION", 0) - # Python 3.7+ with OpenSSL >= 1.1.0h - ctx.options |= getattr(ssl, "OP_NO_RENEGOTIATION", 0) + # and >= 3.3.4 and SSLv3 in >= 3.4.3. + ctx.options |= _ssl.OP_NO_SSLv2 + ctx.options |= _ssl.OP_NO_SSLv3 + ctx.options |= _ssl.OP_NO_COMPRESSION + ctx.options |= _ssl.OP_NO_RENEGOTIATION if certfile is not None: try: - if passphrase is not None: - vi = sys.version_info - # Since python just added a new parameter to an existing method - # this seems to be about the best we can do. - if (vi[0] == 2 and vi < (2, 7, 9) or - vi[0] == 3 and vi < (3, 3)): - raise ConfigurationError( - "Support for ssl_pem_passphrase requires " - "python 2.7.9+ (pypy 2.5.1+) or 3.3+") - ctx.load_cert_chain(certfile, keyfile, passphrase) - else: - ctx.load_cert_chain(certfile, keyfile) - except ssl.SSLError as exc: - raise ConfigurationError( - "Private key doesn't match certificate: %s" % (exc,)) + ctx.load_cert_chain(certfile, None, passphrase) + except _ssl.SSLError as exc: + raise ConfigurationError(f"Private key doesn't match certificate: {exc}") from None if crlfile is not None: - if not hasattr(ctx, "verify_flags"): - raise ConfigurationError( - "Support for ssl_crlfile requires " - "python 2.7.9+ (pypy 2.5.1+) or 3.4+") + if _ssl.IS_PYOPENSSL: + raise ConfigurationError("tlsCRLFile cannot be used with PyOpenSSL") # Match the server's behavior. - ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF + ctx.verify_flags = getattr( # type:ignore[attr-defined] + _ssl, "VERIFY_CRL_CHECK_LEAF", 0 + ) ctx.load_verify_locations(crlfile) if ca_certs is not None: ctx.load_verify_locations(ca_certs) - elif cert_reqs != ssl.CERT_NONE: - # CPython >= 2.7.9 or >= 3.4.0, pypy >= 2.5.1 - if hasattr(ctx, "load_default_certs"): - ctx.load_default_certs() - # Python >= 3.2.0, useless on Windows. - elif (sys.platform != "win32" and - hasattr(ctx, "set_default_verify_paths")): - ctx.set_default_verify_paths() - elif sys.platform == "win32" and HAVE_WINCERTSTORE: - with _WINCERTSLOCK: - if _WINCERTS is None: - _load_wincerts() - ctx.load_verify_locations(_WINCERTS.name) - elif HAVE_CERTIFI: - ctx.load_verify_locations(certifi.where()) - else: - raise ConfigurationError( - "`ssl_cert_reqs` is not ssl.CERT_NONE and no system " - "CA certificates could be loaded. `ssl_ca_certs` is " - "required.") + elif verify_mode != CERT_NONE: + ctx.load_default_certs() ctx.verify_mode = verify_mode return ctx + else: - def validate_cert_reqs(option, dummy): - """No ssl module, raise ConfigurationError.""" - raise ConfigurationError("The value of %s is set but can't be " - "validated. The ssl module is not available" - % (option,)) - def validate_allow_invalid_certs(option, dummy): - """No ssl module, raise ConfigurationError.""" - return validate_cert_reqs(option, dummy) + class SSLError(Exception): # type: ignore + pass + + HAS_SNI = False + IPADDR_SAFE = False + BLOCKING_IO_ERRORS = () # type: ignore - def get_ssl_context(*dummy): + def get_ssl_context(*dummy): # type: ignore """No ssl module, raise ConfigurationError.""" raise ConfigurationError("The ssl module is not available.") diff --git a/pymongo/thread_util.py b/pymongo/thread_util.py deleted file mode 100644 index 3869ec322f..0000000000 --- a/pymongo/thread_util.py +++ /dev/null @@ -1,131 +0,0 @@ -# Copyright 2012-2015 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Utilities for multi-threading support.""" - -import threading -try: - from time import monotonic as _time -except ImportError: - from time import time as _time - -from pymongo.monotonic import time as _time -from pymongo.errors import ExceededMaxWaiters - - -### Begin backport from CPython 3.2 for timeout support for Semaphore.acquire -class Semaphore: - - # After Tim Peters' semaphore class, but not quite the same (no maximum) - - def __init__(self, value=1): - if value < 0: - raise ValueError("semaphore initial value must be >= 0") - self._cond = threading.Condition(threading.Lock()) - self._value = value - - def acquire(self, blocking=True, timeout=None): - if not blocking and timeout is not None: - raise ValueError("can't specify timeout for non-blocking acquire") - rc = False - endtime = None - self._cond.acquire() - while self._value == 0: - if not blocking: - break - if timeout is not None: - if endtime is None: - endtime = _time() + timeout - else: - timeout = endtime - _time() - if timeout <= 0: - break - self._cond.wait(timeout) - else: - self._value = self._value - 1 - rc = True - self._cond.release() - return rc - - __enter__ = acquire - - def release(self): - self._cond.acquire() - self._value = self._value + 1 - self._cond.notify() - self._cond.release() - - def __exit__(self, t, v, tb): - self.release() - - @property - def counter(self): - return self._value - - -class BoundedSemaphore(Semaphore): - """Semaphore that checks that # releases is <= # acquires""" - def __init__(self, value=1): - Semaphore.__init__(self, value) - self._initial_value = value - - def release(self): - if self._value >= self._initial_value: - raise ValueError("Semaphore released too many times") - return Semaphore.release(self) -### End backport from CPython 3.2 - - -class DummySemaphore(object): - def __init__(self, value=None): - pass - - def acquire(self, blocking=True, timeout=None): - return True - - def release(self): - pass - - -class MaxWaitersBoundedSemaphore(object): - def __init__(self, semaphore_class, value=1, max_waiters=1): - self.waiter_semaphore = semaphore_class(max_waiters) - self.semaphore = semaphore_class(value) - - def acquire(self, blocking=True, timeout=None): - if not self.waiter_semaphore.acquire(False): - raise ExceededMaxWaiters() - try: - return self.semaphore.acquire(blocking, timeout) - finally: - self.waiter_semaphore.release() - - def __getattr__(self, name): - return getattr(self.semaphore, name) - - -class MaxWaitersBoundedSemaphoreThread(MaxWaitersBoundedSemaphore): - def __init__(self, value=1, max_waiters=1): - MaxWaitersBoundedSemaphore.__init__( - self, BoundedSemaphore, value, max_waiters) - - -def create_semaphore(max_size, max_waiters): - if max_size is None: - return DummySemaphore() - else: - if max_waiters is None: - return BoundedSemaphore(max_size) - else: - return MaxWaitersBoundedSemaphoreThread(max_size, max_waiters) diff --git a/pymongo/topology.py b/pymongo/topology.py index a3cfe1e79e..786be3ec93 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -14,39 +14,58 @@ """Internal class to monitor a topology of one or more servers.""" +from __future__ import annotations + import os +import queue import random -import threading +import time import warnings import weakref - -from bson.py3compat import itervalues, PY3 -if PY3: - import queue as Queue -else: - import Queue - -from pymongo import common -from pymongo import periodic_executor -from pymongo.pool import PoolOptions -from pymongo.topology_description import (updated_topology_description, - _updated_topology_description_srv_polling, - TopologyDescription, - SRV_POLLING_TOPOLOGIES, TOPOLOGY_TYPE) -from pymongo.errors import ServerSelectionTimeoutError, ConfigurationError +from typing import TYPE_CHECKING, Any, Callable, Mapping, Optional, cast + +from pymongo import _csot, common, helpers, periodic_executor +from pymongo.client_session import _ServerSession, _ServerSessionPool +from pymongo.errors import ( + ConfigurationError, + ConnectionFailure, + InvalidOperation, + NetworkTimeout, + NotPrimaryError, + OperationFailure, + PyMongoError, + ServerSelectionTimeoutError, + WriteError, +) +from pymongo.hello import Hello +from pymongo.lock import _create_lock from pymongo.monitor import SrvMonitor -from pymongo.monotonic import time as _time +from pymongo.pool import Pool, PoolOptions from pymongo.server import Server -from pymongo.server_selectors import (any_server_selector, - arbiter_server_selector, - secondary_server_selector, - readable_server_selector, - writable_server_selector, - Selection) -from pymongo.client_session import _ServerSessionPool - - -def process_events_queue(queue_ref): +from pymongo.server_description import ServerDescription +from pymongo.server_selectors import ( + Selection, + any_server_selector, + arbiter_server_selector, + readable_server_selector, + secondary_server_selector, + writable_server_selector, +) +from pymongo.topology_description import ( + SRV_POLLING_TOPOLOGIES, + TOPOLOGY_TYPE, + TopologyDescription, + _updated_topology_description_srv_polling, + updated_topology_description, +) + +if TYPE_CHECKING: + from bson import ObjectId + from pymongo.settings import TopologySettings + from pymongo.typings import ClusterTime, _Address + + +def process_events_queue(queue_ref: weakref.ReferenceType[queue.Queue]) -> bool: q = queue_ref() if not q: return False # Cancel PeriodicExecutor. @@ -54,7 +73,7 @@ def process_events_queue(queue_ref): while True: try: event = q.get_nowait() - except Queue.Empty: + except queue.Empty: break else: fn, args = event @@ -63,25 +82,25 @@ def process_events_queue(queue_ref): return True # Continue PeriodicExecutor. -class Topology(object): +class Topology: """Monitor a topology of one or more servers.""" - def __init__(self, topology_settings): + + def __init__(self, topology_settings: TopologySettings): self._topology_id = topology_settings._topology_id - self._listeners = topology_settings._pool_options.event_listeners - pub = self._listeners is not None - self._publish_server = pub and self._listeners.enabled_for_server - self._publish_tp = pub and self._listeners.enabled_for_topology + self._listeners = topology_settings._pool_options._event_listeners + self._publish_server = self._listeners is not None and self._listeners.enabled_for_server + self._publish_tp = self._listeners is not None and self._listeners.enabled_for_topology # Create events queue if there are publishers. self._events = None - self.__events_executor = None + self.__events_executor: Any = None if self._publish_server or self._publish_tp: - self._events = Queue.Queue(maxsize=100) + self._events = queue.Queue(maxsize=100) if self._publish_tp: - self._events.put((self._listeners.publish_topology_opened, - (self._topology_id,))) + assert self._events is not None + self._events.put((self._listeners.publish_topology_opened, (self._topology_id,))) self._settings = topology_settings topology_description = TopologyDescription( topology_settings.get_topology_type(), @@ -89,40 +108,51 @@ def __init__(self, topology_settings): topology_settings.replica_set_name, None, None, - topology_settings) + topology_settings, + ) self._description = topology_description if self._publish_tp: - initial_td = TopologyDescription(TOPOLOGY_TYPE.Unknown, {}, None, - None, None, self._settings) - self._events.put(( - self._listeners.publish_topology_description_changed, - (initial_td, self._description, self._topology_id))) + assert self._events is not None + initial_td = TopologyDescription( + TOPOLOGY_TYPE.Unknown, {}, None, None, None, self._settings + ) + self._events.put( + ( + self._listeners.publish_topology_description_changed, + (initial_td, self._description, self._topology_id), + ) + ) for seed in topology_settings.seeds: if self._publish_server: - self._events.put((self._listeners.publish_server_opened, - (seed, self._topology_id))) + assert self._events is not None + self._events.put((self._listeners.publish_server_opened, (seed, self._topology_id))) # Store the seed list to help diagnose errors in _error_message(). self._seed_addresses = list(topology_description.server_descriptions()) self._opened = False - self._lock = threading.Lock() + self._closed = False + self._lock = _create_lock() self._condition = self._settings.condition_class(self._lock) - self._servers = {} - self._pid = None - self._max_cluster_time = None + self._servers: dict[_Address, Server] = {} + self._pid: Optional[int] = None + self._max_cluster_time: Optional[ClusterTime] = None self._session_pool = _ServerSessionPool() if self._publish_server or self._publish_tp: - def target(): + assert self._events is not None + weak: weakref.ReferenceType[queue.Queue] + + def target() -> bool: return process_events_queue(weak) executor = periodic_executor.PeriodicExecutor( interval=common.EVENTS_QUEUE_FREQUENCY, - min_interval=0.5, + min_interval=common.MIN_HEARTBEAT_INTERVAL, target=target, - name="pymongo_events_thread") + name="pymongo_events_thread", + ) # We strongly reference the executor and it weakly references # the queue via this closure. When the topology is freed, stop @@ -132,10 +162,10 @@ def target(): executor.open() self._srv_monitor = None - if self._settings.fqdn is not None: + if self._settings.fqdn is not None and not self._settings.load_balanced: self._srv_monitor = SrvMonitor(self, self._settings) - def open(self): + def open(self) -> None: """Start monitoring, or restart after a fork. No effect if called multiple times. @@ -147,27 +177,42 @@ def open(self): forking. """ + pid = os.getpid() if self._pid is None: - self._pid = os.getpid() - else: - if os.getpid() != self._pid: - warnings.warn( - "MongoClient opened before fork. Create MongoClient only " - "after forking. See PyMongo's documentation for details: " - "http://api.mongodb.org/python/current/faq.html#" - "is-pymongo-fork-safe") - with self._lock: - # Reset the session pool to avoid duplicate sessions in - # the child process. - self._session_pool.reset() + self._pid = pid + elif pid != self._pid: + self._pid = pid + warnings.warn( + "MongoClient opened before fork. May not be entirely fork-safe, " + "proceed with caution. See PyMongo's documentation for details: " + "https://pymongo.readthedocs.io/en/stable/faq.html#" + "is-pymongo-fork-safe", + stacklevel=2, + ) + with self._lock: + # Close servers and clear the pools. + for server in self._servers.values(): + server.close() + # Reset the session pool to avoid duplicate sessions in + # the child process. + self._session_pool.reset() with self._lock: self._ensure_opened() - def select_servers(self, - selector, - server_selection_timeout=None, - address=None): + def get_server_selection_timeout(self) -> float: + # CSOT: use remaining timeout when set. + timeout = _csot.remaining() + if timeout is None: + return self._settings.server_selection_timeout + return timeout + + def select_servers( + self, + selector: Callable[[Selection], Selection], + server_selection_timeout: Optional[float] = None, + address: Optional[_Address] = None, + ) -> list[Server]: """Return a list of Servers matching selector, or time out. :Parameters: @@ -184,29 +229,36 @@ def select_servers(self, `server_selection_timeout` if no matching servers are found. """ if server_selection_timeout is None: - server_timeout = self._settings.server_selection_timeout + server_timeout = self.get_server_selection_timeout() else: server_timeout = server_selection_timeout with self._lock: - server_descriptions = self._select_servers_loop( - selector, server_timeout, address) - - return [self.get_server_by_address(sd.address) - for sd in server_descriptions] - - def _select_servers_loop(self, selector, timeout, address): + server_descriptions = self._select_servers_loop(selector, server_timeout, address) + + return [ + cast(Server, self.get_server_by_address(sd.address)) for sd in server_descriptions + ] + + def _select_servers_loop( + self, + selector: Callable[[Selection], Selection], + timeout: float, + address: Optional[_Address], + ) -> list[ServerDescription]: """select_servers() guts. Hold the lock when calling this.""" - now = _time() + now = time.monotonic() end_time = now + timeout server_descriptions = self._description.apply_selector( - selector, address, custom_selector=self._settings.server_selector) + selector, address, custom_selector=self._settings.server_selector + ) while not server_descriptions: # No suitable servers. if timeout == 0 or now > end_time: raise ServerSelectionTimeoutError( - self._error_message(selector)) + f"{self._error_message(selector)}, Timeout: {timeout}s, Topology Description: {self.description!r}" + ) self._ensure_opened() self._request_check_all() @@ -217,25 +269,44 @@ def _select_servers_loop(self, selector, timeout, address): # held the lock until now. self._condition.wait(common.MIN_HEARTBEAT_INTERVAL) self._description.check_compatible() - now = _time() + now = time.monotonic() server_descriptions = self._description.apply_selector( - selector, address, - custom_selector=self._settings.server_selector) + selector, address, custom_selector=self._settings.server_selector + ) self._description.check_compatible() return server_descriptions - def select_server(self, - selector, - server_selection_timeout=None, - address=None): + def _select_server( + self, + selector: Callable[[Selection], Selection], + server_selection_timeout: Optional[float] = None, + address: Optional[_Address] = None, + ) -> Server: + servers = self.select_servers(selector, server_selection_timeout, address) + if len(servers) == 1: + return servers[0] + server1, server2 = random.sample(servers, 2) + if server1.pool.operation_count <= server2.pool.operation_count: + return server1 + else: + return server2 + + def select_server( + self, + selector: Callable[[Selection], Selection], + server_selection_timeout: Optional[float] = None, + address: Optional[_Address] = None, + ) -> Server: """Like select_servers, but choose a random server if several match.""" - return random.choice(self.select_servers(selector, - server_selection_timeout, - address)) - - def select_server_by_address(self, address, - server_selection_timeout=None): + server = self._select_server(selector, server_selection_timeout, address) + if _csot.get_timeout(): + _csot.set_rtt(server.description.min_round_trip_time) + return server + + def select_server_by_address( + self, address: _Address, server_selection_timeout: Optional[int] = None + ) -> Server: """Return a Server for "address", reconnecting if necessary. If the server's type is not known, request an immediate check of all @@ -253,51 +324,76 @@ def select_server_by_address(self, address, Raises exc:`ServerSelectionTimeoutError` after `server_selection_timeout` if no matching servers are found. """ - return self.select_server(any_server_selector, - server_selection_timeout, - address) + return self.select_server(any_server_selector, server_selection_timeout, address) - def _process_change(self, server_description): + def _process_change( + self, server_description: ServerDescription, reset_pool: bool = False + ) -> None: """Process a new ServerDescription on an opened topology. Hold the lock when calling this. """ td_old = self._description - if self._publish_server: - old_server_description = td_old._server_descriptions[ - server_description.address] - self._events.put(( - self._listeners.publish_server_description_changed, - (old_server_description, server_description, - server_description.address, self._topology_id))) - - self._description = updated_topology_description( - self._description, server_description) - + sd_old = td_old._server_descriptions[server_description.address] + if _is_stale_server_description(sd_old, server_description): + # This is a stale hello response. Ignore it. + return + + new_td = updated_topology_description(self._description, server_description) + # CMAP: Ensure the pool is "ready" when the server is selectable. + if server_description.is_readable or ( + server_description.is_server_type_known and new_td.topology_type == TOPOLOGY_TYPE.Single + ): + server = self._servers.get(server_description.address) + if server: + server.pool.ready() + + suppress_event = (self._publish_server or self._publish_tp) and sd_old == server_description + if self._publish_server and not suppress_event: + assert self._events is not None + self._events.put( + ( + self._listeners.publish_server_description_changed, + (sd_old, server_description, server_description.address, self._topology_id), + ) + ) + + self._description = new_td self._update_servers() self._receive_cluster_time_no_lock(server_description.cluster_time) - if self._publish_tp: - self._events.put(( - self._listeners.publish_topology_description_changed, - (td_old, self._description, self._topology_id))) + if self._publish_tp and not suppress_event: + assert self._events is not None + self._events.put( + ( + self._listeners.publish_topology_description_changed, + (td_old, self._description, self._topology_id), + ) + ) # Shutdown SRV polling for unsupported cluster types. # This is only applicable if the old topology was Unknown, and the # new one is something other than Unknown or Sharded. - if self._srv_monitor and (td_old.topology_type == TOPOLOGY_TYPE.Unknown - and self._description.topology_type not in - SRV_POLLING_TOPOLOGIES): + if self._srv_monitor and ( + td_old.topology_type == TOPOLOGY_TYPE.Unknown + and self._description.topology_type not in SRV_POLLING_TOPOLOGIES + ): self._srv_monitor.close() + # Clear the pool from a failed heartbeat. + if reset_pool: + server = self._servers.get(server_description.address) + if server: + server.pool.reset() + # Wake waiters in select_servers(). self._condition.notify_all() - def on_change(self, server_description): - """Process a new ServerDescription after an ismaster call completes.""" + def on_change(self, server_description: ServerDescription, reset_pool: bool = False) -> None: + """Process a new ServerDescription after an hello call completes.""" # We do no I/O holding the lock. with self._lock: - # Monitors may continue working on ismaster calls for some time + # Monitors may continue working on hello calls for some time # after a call to Topology.close, so this method may be called at # any time. Ensure the topology is open before processing the # change. @@ -305,33 +401,37 @@ def on_change(self, server_description): # once. Check if it's still in the description or if some state- # change removed it. E.g., we got a host list from the primary # that didn't include this server. - if (self._opened and - self._description.has_server(server_description.address)): - self._process_change(server_description) + if self._opened and self._description.has_server(server_description.address): + self._process_change(server_description, reset_pool) - def _process_srv_update(self, seedlist): + def _process_srv_update(self, seedlist: list[tuple[str, Any]]) -> None: """Process a new seedlist on an opened topology. Hold the lock when calling this. """ td_old = self._description - self._description = _updated_topology_description_srv_polling( - self._description, seedlist) + if td_old.topology_type not in SRV_POLLING_TOPOLOGIES: + return + self._description = _updated_topology_description_srv_polling(self._description, seedlist) self._update_servers() if self._publish_tp: - self._events.put(( - self._listeners.publish_topology_description_changed, - (td_old, self._description, self._topology_id))) - - def on_srv_update(self, seedlist): + assert self._events is not None + self._events.put( + ( + self._listeners.publish_topology_description_changed, + (td_old, self._description, self._topology_id), + ) + ) + + def on_srv_update(self, seedlist: list[tuple[str, Any]]) -> None: """Process a new list of nodes obtained from scanning SRV records.""" # We do no I/O holding the lock. with self._lock: if self._opened: self._process_srv_update(seedlist) - def get_server_by_address(self, address): + def get_server_by_address(self, address: _Address) -> Optional[Server]: """Get a Server or None. Returns the current version of the server immediately, even if it's @@ -341,10 +441,10 @@ def get_server_by_address(self, address): """ return self._servers.get(address) - def has_server(self, address): + def has_server(self, address: _Address) -> bool: return address in self._servers - def get_primary(self): + def get_primary(self) -> Optional[_Address]: """Return primary's address or None.""" # Implemented here in Topology instead of MongoClient, so it can lock. with self._lock: @@ -354,30 +454,32 @@ def get_primary(self): return writable_server_selector(self._new_selection())[0].address - def _get_replica_set_members(self, selector): + def _get_replica_set_members(self, selector: Callable[[Selection], Selection]) -> set[_Address]: """Return set of replica set member addresses.""" # Implemented here in Topology instead of MongoClient, so it can lock. with self._lock: topology_type = self._description.topology_type - if topology_type not in (TOPOLOGY_TYPE.ReplicaSetWithPrimary, - TOPOLOGY_TYPE.ReplicaSetNoPrimary): + if topology_type not in ( + TOPOLOGY_TYPE.ReplicaSetWithPrimary, + TOPOLOGY_TYPE.ReplicaSetNoPrimary, + ): return set() - return set([sd.address for sd in selector(self._new_selection())]) + return {sd.address for sd in iter(selector(self._new_selection()))} - def get_secondaries(self): + def get_secondaries(self) -> set[_Address]: """Return set of secondary addresses.""" return self._get_replica_set_members(secondary_server_selector) - def get_arbiters(self): + def get_arbiters(self) -> set[_Address]: """Return set of arbiter addresses.""" return self._get_replica_set_members(arbiter_server_selector) - def max_cluster_time(self): + def max_cluster_time(self) -> Optional[ClusterTime]: """Return a document, the highest seen $clusterTime.""" return self._max_cluster_time - def _receive_cluster_time_no_lock(self, cluster_time): + def _receive_cluster_time_no_lock(self, cluster_time: Optional[Mapping[str, Any]]) -> None: # Driver Sessions Spec: "Whenever a driver receives a cluster time from # a server it MUST compare it to the current highest seen cluster time # for the deployment. If the new cluster time is higher than the @@ -386,59 +488,53 @@ def _receive_cluster_time_no_lock(self, cluster_time): # value of the clusterTime embedded field." if cluster_time: # ">" uses bson.timestamp.Timestamp's comparison operator. - if (not self._max_cluster_time - or cluster_time['clusterTime'] > - self._max_cluster_time['clusterTime']): + if ( + not self._max_cluster_time + or cluster_time["clusterTime"] > self._max_cluster_time["clusterTime"] + ): self._max_cluster_time = cluster_time - def receive_cluster_time(self, cluster_time): + def receive_cluster_time(self, cluster_time: Optional[Mapping[str, Any]]) -> None: with self._lock: self._receive_cluster_time_no_lock(cluster_time) - def request_check_all(self, wait_time=5): + def request_check_all(self, wait_time: int = 5) -> None: """Wake all monitors, wait for at least one to check its server.""" with self._lock: self._request_check_all() self._condition.wait(wait_time) - def reset_pool(self, address): - with self._lock: - server = self._servers.get(address) - if server: - server.pool.reset() - - def reset_server(self, address): - """Clear our pool for a server and mark it Unknown. + def data_bearing_servers(self) -> list[ServerDescription]: + """Return a list of all data-bearing servers. - Do *not* request an immediate check. + This includes any server that might be selected for an operation. """ - with self._lock: - self._reset_server(address, reset_pool=True) + if self._description.topology_type == TOPOLOGY_TYPE.Single: + return self._description.known_servers + return self._description.readable_servers - def reset_server_and_request_check(self, address): - """Clear our pool for a server, mark it Unknown, and check it soon.""" - with self._lock: - self._reset_server(address, reset_pool=True) - self._request_check(address) - - def mark_server_unknown_and_request_check(self, address): - """Mark a server Unknown, and check it soon.""" - with self._lock: - self._reset_server(address, reset_pool=False) - self._request_check(address) - - def update_pool(self): + def update_pool(self) -> None: # Remove any stale sockets and add new sockets if pool is too small. servers = [] with self._lock: - for server in self._servers.values(): - servers.append((server, server._pool.pool_id)) - - for server, pool_id in servers: - server._pool.remove_stale_sockets(pool_id) - - def close(self): - """Clear pools and terminate monitors. Topology reopens on demand.""" + # Only update pools for data-bearing servers. + for sd in self.data_bearing_servers(): + server = self._servers[sd.address] + servers.append((server, server.pool.gen.get_overall())) + + for server, generation in servers: + try: + server.pool.remove_stale_sockets(generation) + except PyMongoError as exc: + ctx = _ErrorContext(exc, 0, generation, False, None) + self.handle_error(server.description.address, ctx) + raise + + def close(self) -> None: + """Clear pools and terminate monitors. Topology does not reopen on + demand. Any further operations will raise + :exc:`~.errors.InvalidOperation`. + """ with self._lock: for server in self._servers.values(): server.close() @@ -454,72 +550,82 @@ def close(self): self._srv_monitor.close() self._opened = False + self._closed = True # Publish only after releasing the lock. if self._publish_tp: - self._events.put((self._listeners.publish_topology_closed, - (self._topology_id,))) + assert self._events is not None + self._events.put((self._listeners.publish_topology_closed, (self._topology_id,))) if self._publish_server or self._publish_tp: self.__events_executor.close() @property - def description(self): + def description(self) -> TopologyDescription: return self._description - def pop_all_sessions(self): + def pop_all_sessions(self) -> list[_ServerSession]: """Pop all session ids from the pool.""" with self._lock: return self._session_pool.pop_all() - def get_server_session(self): - """Start or resume a server session, or raise ConfigurationError.""" + def _check_implicit_session_support(self) -> None: with self._lock: - session_timeout = self._description.logical_session_timeout_minutes - if session_timeout is None: - # Maybe we need an initial scan? Can raise ServerSelectionError. - if self._description.topology_type == TOPOLOGY_TYPE.Single: - if not self._description.has_known_servers: - self._select_servers_loop( - any_server_selector, - self._settings.server_selection_timeout, - None) - elif not self._description.readable_servers: + self._check_session_support() + + def _check_session_support(self) -> float: + """Internal check for session support on clusters.""" + if self._settings.load_balanced: + # Sessions never time out in load balanced mode. + return float("inf") + session_timeout = self._description.logical_session_timeout_minutes + if session_timeout is None: + # Maybe we need an initial scan? Can raise ServerSelectionError. + if self._description.topology_type == TOPOLOGY_TYPE.Single: + if not self._description.has_known_servers: self._select_servers_loop( - readable_server_selector, - self._settings.server_selection_timeout, - None) + any_server_selector, self.get_server_selection_timeout(), None + ) + elif not self._description.readable_servers: + self._select_servers_loop( + readable_server_selector, self.get_server_selection_timeout(), None + ) session_timeout = self._description.logical_session_timeout_minutes if session_timeout is None: - raise ConfigurationError( - "Sessions are not supported by this MongoDB deployment") + raise ConfigurationError("Sessions are not supported by this MongoDB deployment") + return session_timeout + def get_server_session(self) -> _ServerSession: + """Start or resume a server session, or raise ConfigurationError.""" + with self._lock: + session_timeout = self._check_session_support() return self._session_pool.get_server_session(session_timeout) - def return_server_session(self, server_session, lock): + def return_server_session(self, server_session: _ServerSession, lock: bool) -> None: if lock: with self._lock: - session_timeout = \ - self._description.logical_session_timeout_minutes - if session_timeout is not None: - self._session_pool.return_server_session(server_session, - session_timeout) + self._session_pool.return_server_session( + server_session, self._description.logical_session_timeout_minutes + ) else: # Called from a __del__ method, can't use a lock. self._session_pool.return_server_session_no_lock(server_session) - def _new_selection(self): + def _new_selection(self) -> Selection: """A Selection object, initially including all known servers. Hold the lock when calling this. """ return Selection.from_topology_description(self._description) - def _ensure_opened(self): + def _ensure_opened(self) -> None: """Start monitors, or restart after a fork. Hold the lock when calling this. """ + if self._closed: + raise InvalidOperation("Cannot use MongoClient after close") + if not self._opened: self._opened = True self._update_servers() @@ -529,44 +635,123 @@ def _ensure_opened(self): self.__events_executor.open() # Start the SRV polling thread. - if self._srv_monitor and (self.description.topology_type in - SRV_POLLING_TOPOLOGIES): + if self._srv_monitor and (self.description.topology_type in SRV_POLLING_TOPOLOGIES): self._srv_monitor.open() + if self._settings.load_balanced: + # Emit initial SDAM events for load balancer mode. + self._process_change( + ServerDescription( + self._seed_addresses[0], + Hello({"ok": 1, "serviceId": self._topology_id, "maxWireVersion": 13}), + ) + ) + # Ensure that the monitors are open. - for server in itervalues(self._servers): + for server in self._servers.values(): server.open() - def _reset_server(self, address, reset_pool): - """Mark a server Unknown and optionally reset it's pool. - - Hold the lock when calling this. Does *not* request an immediate check. - """ + def _is_stale_error(self, address: _Address, err_ctx: _ErrorContext) -> bool: server = self._servers.get(address) + if server is None: + # Another thread removed this server from the topology. + return True + + if server._pool.stale_generation(err_ctx.sock_generation, err_ctx.service_id): + # This is an outdated error from a previous pool version. + return True + + # topologyVersion check, ignore error when cur_tv >= error_tv: + cur_tv = server.description.topology_version + error = err_ctx.error + error_tv = None + if error and hasattr(error, "details"): + if isinstance(error.details, dict): + error_tv = error.details.get("topologyVersion") + + return _is_stale_error_topology_version(cur_tv, error_tv) + + def _handle_error(self, address: _Address, err_ctx: _ErrorContext) -> None: + if self._is_stale_error(address, err_ctx): + return + + server = self._servers[address] + error = err_ctx.error + service_id = err_ctx.service_id + + # Ignore a handshake error if the server is behind a load balancer but + # the service ID is unknown. This indicates that the error happened + # when dialing the connection or during the MongoDB handshake, so we + # don't know the service ID to use for clearing the pool. + if self._settings.load_balanced and not service_id and not err_ctx.completed_handshake: + return + + if isinstance(error, NetworkTimeout) and err_ctx.completed_handshake: + # The socket has been closed. Don't reset the server. + # Server Discovery And Monitoring Spec: "When an application + # operation fails because of any network error besides a socket + # timeout...." + return + elif isinstance(error, WriteError): + # Ignore writeErrors. + return + elif isinstance(error, (NotPrimaryError, OperationFailure)): + # As per the SDAM spec if: + # - the server sees a "not primary" error, and + # - the server is not shutting down, and + # - the server version is >= 4.2, then + # we keep the existing connection pool, but mark the server type + # as Unknown and request an immediate check of the server. + # Otherwise, we clear the connection pool, mark the server as + # Unknown and request an immediate check of the server. + if hasattr(error, "code"): + err_code = error.code + else: + # Default error code if one does not exist. + default = 10107 if isinstance(error, NotPrimaryError) else None + err_code = error.details.get("code", default) # type: ignore[union-attr] + if err_code in helpers._NOT_PRIMARY_CODES: + is_shutting_down = err_code in helpers._SHUTDOWN_CODES + # Mark server Unknown, clear the pool, and request check. + if not self._settings.load_balanced: + self._process_change(ServerDescription(address, error=error)) + if is_shutting_down or (err_ctx.max_wire_version <= 7): + # Clear the pool. + server.reset(service_id) + server.request_check() + elif not err_ctx.completed_handshake: + # Unknown command error during the connection handshake. + if not self._settings.load_balanced: + self._process_change(ServerDescription(address, error=error)) + # Clear the pool. + server.reset(service_id) + elif isinstance(error, ConnectionFailure): + # "Client MUST replace the server's description with type Unknown + # ... MUST NOT request an immediate check of the server." + if not self._settings.load_balanced: + self._process_change(ServerDescription(address, error=error)) + # Clear the pool. + server.reset(service_id) + # "When a client marks a server Unknown from `Network error when + # reading or writing`_, clients MUST cancel the hello check on + # that server and close the current monitoring connection." + server._monitor.cancel_check() + + def handle_error(self, address: _Address, err_ctx: _ErrorContext) -> None: + """Handle an application error. + + May reset the server to Unknown, clear the pool, and request an + immediate check depending on the error and the context. + """ + with self._lock: + self._handle_error(address, err_ctx) - # "server" is None if another thread removed it from the topology. - if server: - if reset_pool: - server.reset() - - # Mark this server Unknown. - self._description = self._description.reset_server(address) - self._update_servers() - - def _request_check(self, address): - """Wake one monitor. Hold the lock when calling this.""" - server = self._servers.get(address) - - # "server" is None if another thread removed it from the topology. - if server: - server.request_check() - - def _request_check_all(self): + def _request_check_all(self) -> None: """Wake all monitors. Hold the lock when calling this.""" for server in self._servers.values(): server.request_check() - def _update_servers(self): + def _update_servers(self) -> None: """Sync our Servers from TopologyDescription.server_descriptions. Hold the lock while calling this. @@ -577,10 +762,11 @@ def _update_servers(self): server_description=sd, topology=self, pool=self._create_pool_for_monitor(address), - topology_settings=self._settings) + topology_settings=self._settings, + ) weak = None - if self._publish_server: + if self._publish_server and self._events is not None: weak = weakref.ref(self._events) server = Server( server_description=sd, @@ -588,7 +774,8 @@ def _update_servers(self): monitor=monitor, topology_id=self._topology_id, listeners=self._listeners, - events=weak) + events=weak, + ) self._servers[address] = server server.open() @@ -599,18 +786,17 @@ def _update_servers(self): self._servers[address].description = sd # Update is_writable value of the pool, if it changed. if was_writable != sd.is_writable: - self._servers[address].pool.update_is_writable( - sd.is_writable) + self._servers[address].pool.update_is_writable(sd.is_writable) for address, server in list(self._servers.items()): if not self._description.has_server(address): server.close() self._servers.pop(address) - def _create_pool_for_server(self, address): + def _create_pool_for_server(self, address: _Address) -> Pool: return self._settings.pool_class(address, self._settings.pool_options) - def _create_pool_for_monitor(self, address): + def _create_pool_for_monitor(self, address: _Address) -> Pool: options = self._settings.pool_options # According to the Server Discovery And Monitoring Spec, monitors use @@ -619,50 +805,55 @@ def _create_pool_for_monitor(self, address): monitor_pool_options = PoolOptions( connect_timeout=options.connect_timeout, socket_timeout=options.connect_timeout, - ssl_context=options.ssl_context, - ssl_match_hostname=options.ssl_match_hostname, - event_listeners=options.event_listeners, + ssl_context=options._ssl_context, + tls_allow_invalid_hostnames=options.tls_allow_invalid_hostnames, + event_listeners=options._event_listeners, appname=options.appname, - driver=options.driver) + driver=options.driver, + pause_enabled=False, + server_api=options.server_api, + ) - return self._settings.pool_class(address, monitor_pool_options, - handshake=False) + return self._settings.pool_class(address, monitor_pool_options, handshake=False) - def _error_message(self, selector): + def _error_message(self, selector: Callable[[Selection], Selection]) -> str: """Format an error message if server selection fails. Hold the lock when calling this. """ is_replica_set = self._description.topology_type in ( TOPOLOGY_TYPE.ReplicaSetWithPrimary, - TOPOLOGY_TYPE.ReplicaSetNoPrimary) + TOPOLOGY_TYPE.ReplicaSetNoPrimary, + ) if is_replica_set: - server_plural = 'replica set members' + server_plural = "replica set members" elif self._description.topology_type == TOPOLOGY_TYPE.Sharded: - server_plural = 'mongoses' + server_plural = "mongoses" else: - server_plural = 'servers' + server_plural = "servers" if self._description.known_servers: # We've connected, but no servers match the selector. if selector is writable_server_selector: if is_replica_set: - return 'No primary available for writes' + return "No primary available for writes" else: - return 'No %s available for writes' % server_plural + return "No %s available for writes" % server_plural else: - return 'No %s match selector "%s"' % (server_plural, selector) + return f'No {server_plural} match selector "{selector}"' else: addresses = list(self._description.server_descriptions()) servers = list(self._description.server_descriptions().values()) if not servers: if is_replica_set: # We removed all servers because of the wrong setName? - return 'No %s available for replica set name "%s"' % ( - server_plural, self._settings.replica_set_name) + return 'No {} available for replica set name "{}"'.format( + server_plural, + self._settings.replica_set_name, + ) else: - return 'No %s available' % server_plural + return "No %s available" % server_plural # 1 or more servers, all Unknown. Are they unknown for one reason? error = servers[0].error @@ -670,17 +861,73 @@ def _error_message(self, selector): if same: if error is None: # We're still discovering. - return 'No %s found yet' % server_plural + return "No %s found yet" % server_plural - if (is_replica_set and not - set(addresses).intersection(self._seed_addresses)): + if is_replica_set and not set(addresses).intersection(self._seed_addresses): # We replaced our seeds with new hosts but can't reach any. return ( - 'Could not reach any servers in %s. Replica set is' - ' configured with internal hostnames or IPs?' % - addresses) + "Could not reach any servers in %s. Replica set is" + " configured with internal hostnames or IPs?" % addresses + ) return str(error) else: - return ','.join(str(server.error) for server in servers - if server.error) + return ",".join(str(server.error) for server in servers if server.error) + + def __repr__(self) -> str: + msg = "" + if not self._opened: + msg = "CLOSED " + return f"<{self.__class__.__name__} {msg}{self._description!r}>" + + def eq_props(self) -> tuple[tuple[_Address, ...], Optional[str], Optional[str], str]: + """The properties to use for MongoClient/Topology equality checks.""" + ts = self._settings + return (tuple(sorted(ts.seeds)), ts.replica_set_name, ts.fqdn, ts.srv_service_name) + + def __eq__(self, other: object) -> bool: + if isinstance(other, self.__class__): + return self.eq_props() == other.eq_props() + return NotImplemented + + def __hash__(self) -> int: + return hash(self.eq_props()) + + +class _ErrorContext: + """An error with context for SDAM error handling.""" + + def __init__( + self, + error: BaseException, + max_wire_version: int, + sock_generation: int, + completed_handshake: bool, + service_id: Optional[ObjectId], + ): + self.error = error + self.max_wire_version = max_wire_version + self.sock_generation = sock_generation + self.completed_handshake = completed_handshake + self.service_id = service_id + + +def _is_stale_error_topology_version( + current_tv: Optional[Mapping[str, Any]], error_tv: Optional[Mapping[str, Any]] +) -> bool: + """Return True if the error's topologyVersion is <= current.""" + if current_tv is None or error_tv is None: + return False + if current_tv["processId"] != error_tv["processId"]: + return False + return current_tv["counter"] >= error_tv["counter"] + + +def _is_stale_server_description(current_sd: ServerDescription, new_sd: ServerDescription) -> bool: + """Return True if the new topologyVersion is < current.""" + current_tv, new_tv = current_sd.topology_version, new_sd.topology_version + if current_tv is None or new_tv is None: + return False + if current_tv["processId"] != new_tv["processId"]: + return False + return current_tv["counter"] > new_tv["counter"] diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index b3b912f8de..141f74edf3 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -13,34 +13,60 @@ # permissions and limitations under the License. """Represent a deployment of MongoDB servers.""" - -from collections import namedtuple - +from __future__ import annotations + +from random import sample +from typing import ( + Any, + Callable, + List, + Mapping, + MutableMapping, + NamedTuple, + Optional, + cast, +) + +from bson.min_key import MinKey +from bson.objectid import ObjectId from pymongo import common from pymongo.errors import ConfigurationError -from pymongo.read_preferences import ReadPreference +from pymongo.read_preferences import ReadPreference, _AggWritePref, _ServerMode from pymongo.server_description import ServerDescription from pymongo.server_selectors import Selection from pymongo.server_type import SERVER_TYPE +from pymongo.typings import _Address # Enumeration for various kinds of MongoDB cluster topologies. -TOPOLOGY_TYPE = namedtuple('TopologyType', ['Single', 'ReplicaSetNoPrimary', - 'ReplicaSetWithPrimary', 'Sharded', - 'Unknown'])(*range(5)) +class _TopologyType(NamedTuple): + Single: int + ReplicaSetNoPrimary: int + ReplicaSetWithPrimary: int + Sharded: int + Unknown: int + LoadBalanced: int + + +TOPOLOGY_TYPE = _TopologyType(*range(6)) # Topologies compatible with SRV record polling. -SRV_POLLING_TOPOLOGIES = (TOPOLOGY_TYPE.Unknown, TOPOLOGY_TYPE.Sharded) +SRV_POLLING_TOPOLOGIES: tuple[int, int] = (TOPOLOGY_TYPE.Unknown, TOPOLOGY_TYPE.Sharded) + +_ServerSelector = Callable[[List[ServerDescription]], List[ServerDescription]] -class TopologyDescription(object): - def __init__(self, - topology_type, - server_descriptions, - replica_set_name, - max_set_version, - max_election_id, - topology_settings): + +class TopologyDescription: + def __init__( + self, + topology_type: int, + server_descriptions: dict[_Address, ServerDescription], + replica_set_name: Optional[str], + max_set_version: Optional[int], + max_election_id: Optional[ObjectId], + topology_settings: Any, + ) -> None: """Representation of a deployment of MongoDB servers. :Parameters: @@ -63,7 +89,28 @@ def __init__(self, # Is PyMongo compatible with all servers' wire protocols? self._incompatible_err = None + if self._topology_type != TOPOLOGY_TYPE.LoadBalanced: + self._init_incompatible_err() + + # Server Discovery And Monitoring Spec: Whenever a client updates the + # TopologyDescription from an hello response, it MUST set + # TopologyDescription.logicalSessionTimeoutMinutes to the smallest + # logicalSessionTimeoutMinutes value among ServerDescriptions of all + # data-bearing server types. If any have a null + # logicalSessionTimeoutMinutes, then + # TopologyDescription.logicalSessionTimeoutMinutes MUST be set to null. + readable_servers = self.readable_servers + if not readable_servers: + self._ls_timeout_minutes = None + elif any(s.logical_session_timeout_minutes is None for s in readable_servers): + self._ls_timeout_minutes = None + else: + self._ls_timeout_minutes = min( # type: ignore[type-var] + s.logical_session_timeout_minutes for s in readable_servers + ) + def _init_incompatible_err(self) -> None: + """Internal compatibility check for non-load balanced topologies.""" for s in self._server_descriptions.values(): if not s.is_server_type_known: continue @@ -73,49 +120,43 @@ def __init__(self, server_too_new = ( # Server too new. s.min_wire_version is not None - and s.min_wire_version > common.MAX_SUPPORTED_WIRE_VERSION) + and s.min_wire_version > common.MAX_SUPPORTED_WIRE_VERSION + ) server_too_old = ( # Server too old. s.max_wire_version is not None - and s.max_wire_version < common.MIN_SUPPORTED_WIRE_VERSION) + and s.max_wire_version < common.MIN_SUPPORTED_WIRE_VERSION + ) if server_too_new: self._incompatible_err = ( - "Server at %s:%d requires wire version %d, but this " + "Server at %s:%d requires wire version %d, but this " # type: ignore "version of PyMongo only supports up to %d." - % (s.address[0], s.address[1], - s.min_wire_version, common.MAX_SUPPORTED_WIRE_VERSION)) + % ( + s.address[0], + s.address[1] or 0, + s.min_wire_version, + common.MAX_SUPPORTED_WIRE_VERSION, + ) + ) elif server_too_old: self._incompatible_err = ( - "Server at %s:%d reports wire version %d, but this " + "Server at %s:%d reports wire version %d, but this " # type: ignore "version of PyMongo requires at least %d (MongoDB %s)." - % (s.address[0], s.address[1], - s.max_wire_version, - common.MIN_SUPPORTED_WIRE_VERSION, - common.MIN_SUPPORTED_SERVER_VERSION)) + % ( + s.address[0], + s.address[1] or 0, + s.max_wire_version, + common.MIN_SUPPORTED_WIRE_VERSION, + common.MIN_SUPPORTED_SERVER_VERSION, + ) + ) break - # Server Discovery And Monitoring Spec: Whenever a client updates the - # TopologyDescription from an ismaster response, it MUST set - # TopologyDescription.logicalSessionTimeoutMinutes to the smallest - # logicalSessionTimeoutMinutes value among ServerDescriptions of all - # data-bearing server types. If any have a null - # logicalSessionTimeoutMinutes, then - # TopologyDescription.logicalSessionTimeoutMinutes MUST be set to null. - readable_servers = self.readable_servers - if not readable_servers: - self._ls_timeout_minutes = None - elif any(s.logical_session_timeout_minutes is None - for s in readable_servers): - self._ls_timeout_minutes = None - else: - self._ls_timeout_minutes = min(s.logical_session_timeout_minutes - for s in readable_servers) - - def check_compatible(self): + def check_compatible(self) -> None: """Raise ConfigurationError if any server is incompatible. A server is incompatible if its wire protocol version range does not @@ -124,14 +165,15 @@ def check_compatible(self): if self._incompatible_err: raise ConfigurationError(self._incompatible_err) - def has_server(self, address): + def has_server(self, address: _Address) -> bool: return address in self._server_descriptions - def reset_server(self, address): + def reset_server(self, address: _Address) -> TopologyDescription: """A copy of this description, with one server marked Unknown.""" - return updated_topology_description(self, ServerDescription(address)) + unknown_sd = self._server_descriptions[address].to_unknown() + return updated_topology_description(self, unknown_sd) - def reset(self): + def reset(self) -> TopologyDescription: """A copy of this description, with all servers marked Unknown.""" if self._topology_type == TOPOLOGY_TYPE.ReplicaSetWithPrimary: topology_type = TOPOLOGY_TYPE.ReplicaSetNoPrimary @@ -139,8 +181,7 @@ def reset(self): topology_type = self._topology_type # The default ServerDescription's type is Unknown. - sds = dict((address, ServerDescription(address)) - for address in self._server_descriptions) + sds = {address: ServerDescription(address) for address in self._server_descriptions} return TopologyDescription( topology_type, @@ -148,20 +189,22 @@ def reset(self): self._replica_set_name, self._max_set_version, self._max_election_id, - self._topology_settings) + self._topology_settings, + ) - def server_descriptions(self): - """Dict of (address, - :class:`~pymongo.server_description.ServerDescription`).""" + def server_descriptions(self) -> dict[_Address, ServerDescription]: + """dict of (address, + :class:`~pymongo.server_description.ServerDescription`). + """ return self._server_descriptions.copy() @property - def topology_type(self): + def topology_type(self) -> int: """The type of this topology.""" return self._topology_type @property - def topology_type_name(self): + def topology_type_name(self) -> str: """The topology type as a human readable string. .. versionadded:: 3.4 @@ -169,44 +212,42 @@ def topology_type_name(self): return TOPOLOGY_TYPE._fields[self._topology_type] @property - def replica_set_name(self): + def replica_set_name(self) -> Optional[str]: """The replica set name.""" return self._replica_set_name @property - def max_set_version(self): + def max_set_version(self) -> Optional[int]: """Greatest setVersion seen from a primary, or None.""" return self._max_set_version @property - def max_election_id(self): + def max_election_id(self) -> Optional[ObjectId]: """Greatest electionId seen from a primary, or None.""" return self._max_election_id @property - def logical_session_timeout_minutes(self): + def logical_session_timeout_minutes(self) -> Optional[int]: """Minimum logical session timeout, or None.""" return self._ls_timeout_minutes @property - def known_servers(self): + def known_servers(self) -> list[ServerDescription]: """List of Servers of types besides Unknown.""" - return [s for s in self._server_descriptions.values() - if s.is_server_type_known] + return [s for s in self._server_descriptions.values() if s.is_server_type_known] @property - def has_known_servers(self): + def has_known_servers(self) -> bool: """Whether there are any Servers of types besides Unknown.""" - return any(s for s in self._server_descriptions.values() - if s.is_server_type_known) + return any(s for s in self._server_descriptions.values() if s.is_server_type_known) @property - def readable_servers(self): + def readable_servers(self) -> list[ServerDescription]: """List of readable Servers.""" return [s for s in self._server_descriptions.values() if s.is_readable] @property - def common_wire_version(self): + def common_wire_version(self) -> Optional[int]: """Minimum of all servers' max wire versions, or None.""" servers = self.known_servers if servers: @@ -215,53 +256,80 @@ def common_wire_version(self): return None @property - def heartbeat_frequency(self): + def heartbeat_frequency(self) -> int: return self._topology_settings.heartbeat_frequency - def apply_selector(self, selector, address, custom_selector=None): - - def apply_local_threshold(selection): - if not selection: - return [] - - settings = self._topology_settings + @property + def srv_max_hosts(self) -> int: + return self._topology_settings._srv_max_hosts + + def _apply_local_threshold(self, selection: Optional[Selection]) -> list[ServerDescription]: + if not selection: + return [] + # Round trip time in seconds. + fastest = min(cast(float, s.round_trip_time) for s in selection.server_descriptions) + threshold = self._topology_settings.local_threshold_ms / 1000.0 + return [ + s + for s in selection.server_descriptions + if (cast(float, s.round_trip_time) - fastest) <= threshold + ] + + def apply_selector( + self, + selector: Any, + address: Optional[_Address] = None, + custom_selector: Optional[_ServerSelector] = None, + ) -> list[ServerDescription]: + """List of servers matching the provided selector(s). - # Round trip time in seconds. - fastest = min( - s.round_trip_time for s in selection.server_descriptions) - threshold = settings.local_threshold_ms / 1000.0 - return [s for s in selection.server_descriptions - if (s.round_trip_time - fastest) <= threshold] + :Parameters: + - `selector`: a callable that takes a Selection as input and returns + a Selection as output. For example, an instance of a read + preference from :mod:`~pymongo.read_preferences`. + - `address` (optional): A server address to select. + - `custom_selector` (optional): A callable that augments server + selection rules. Accepts a list of + :class:`~pymongo.server_description.ServerDescription` objects and + return a list of server descriptions that should be considered + suitable for the desired operation. - if getattr(selector, 'min_wire_version', 0): + .. versionadded:: 3.4 + """ + if getattr(selector, "min_wire_version", 0): common_wv = self.common_wire_version if common_wv and common_wv < selector.min_wire_version: raise ConfigurationError( "%s requires min wire version %d, but topology's min" - " wire version is %d" % (selector, - selector.min_wire_version, - common_wv)) + " wire version is %d" % (selector, selector.min_wire_version, common_wv) + ) + + if isinstance(selector, _AggWritePref): + selector.selection_hook(self) - if self.topology_type == TOPOLOGY_TYPE.Single: - # Ignore selectors for standalone. + if self.topology_type == TOPOLOGY_TYPE.Unknown: + return [] + elif self.topology_type in (TOPOLOGY_TYPE.Single, TOPOLOGY_TYPE.LoadBalanced): + # Ignore selectors for standalone and load balancer mode. return self.known_servers - elif address: + if address: # Ignore selectors when explicit address is requested. description = self.server_descriptions().get(address) return [description] if description else [] - elif self.topology_type == TOPOLOGY_TYPE.Sharded: - # Ignore read preference. - selection = Selection.from_topology_description(self) - else: - selection = selector(Selection.from_topology_description(self)) + + selection = Selection.from_topology_description(self) + # Ignore read preference for sharded clusters. + if self.topology_type != TOPOLOGY_TYPE.Sharded: + selection = selector(selection) # Apply custom selector followed by localThresholdMS. if custom_selector is not None and selection: selection = selection.with_server_descriptions( - custom_selector(selection.server_descriptions)) - return apply_local_threshold(selection) + custom_selector(selection.server_descriptions) + ) + return self._apply_local_threshold(selection) - def has_readable_server(self, read_preference=ReadPreference.PRIMARY): + def has_readable_server(self, read_preference: _ServerMode = ReadPreference.PRIMARY) -> bool: """Does this topology have any readable servers available matching the given read preference? @@ -276,9 +344,9 @@ def has_readable_server(self, read_preference=ReadPreference.PRIMARY): .. versionadded:: 3.4 """ common.validate_read_preference("read_preference", read_preference) - return any(self.apply_selector(read_preference, None)) + return any(self.apply_selector(read_preference)) - def has_writable_server(self): + def has_writable_server(self) -> bool: """Does this topology have a writable server available? .. note:: When connected directly to a single server this method @@ -288,8 +356,18 @@ def has_writable_server(self): """ return self.has_readable_server(ReadPreference.PRIMARY) + def __repr__(self) -> str: + # Sort the servers by address. + servers = sorted(self._server_descriptions.values(), key=lambda sd: sd.address) + return "<{} id: {}, topology_type: {}, servers: {!r}>".format( + self.__class__.__name__, + self._topology_settings._topology_id, + self.topology_type_name, + servers, + ) + -# If topology type is Unknown and we receive an ismaster response, what should +# If topology type is Unknown and we receive a hello response, what should # the new topology type be? _SERVER_TYPE_TO_TOPOLOGY_TYPE = { SERVER_TYPE.Mongos: TOPOLOGY_TYPE.Sharded, @@ -297,18 +375,21 @@ def has_writable_server(self): SERVER_TYPE.RSSecondary: TOPOLOGY_TYPE.ReplicaSetNoPrimary, SERVER_TYPE.RSArbiter: TOPOLOGY_TYPE.ReplicaSetNoPrimary, SERVER_TYPE.RSOther: TOPOLOGY_TYPE.ReplicaSetNoPrimary, + # Note: SERVER_TYPE.LoadBalancer and Unknown are intentionally left out. } -def updated_topology_description(topology_description, server_description): +def updated_topology_description( + topology_description: TopologyDescription, server_description: ServerDescription +) -> TopologyDescription: """Return an updated copy of a TopologyDescription. :Parameters: - `topology_description`: the current TopologyDescription - `server_description`: a new ServerDescription that resulted from - an ismaster call + a hello call - Called after attempting (successfully or not) to call ismaster on the + Called after attempting (successfully or not) to call hello on the server at server_description.address. Does not modify topology_description. """ address = server_description.address @@ -328,6 +409,15 @@ def updated_topology_description(topology_description, server_description): sds[address] = server_description if topology_type == TOPOLOGY_TYPE.Single: + # Set server type to Unknown if replica set name does not match. + if set_name is not None and set_name != server_description.replica_set_name: + error = ConfigurationError( + "client is configured to connect to a replica set named " + "'{}' but this node belongs to a set named '{}'".format( + set_name, server_description.replica_set_name + ) + ) + sds[address] = server_description.to_unknown(error=error) # Single type never changes. return TopologyDescription( TOPOLOGY_TYPE.Single, @@ -335,12 +425,16 @@ def updated_topology_description(topology_description, server_description): set_name, max_set_version, max_election_id, - topology_description._topology_settings) + topology_description._topology_settings, + ) if topology_type == TOPOLOGY_TYPE.Unknown: - if server_type == SERVER_TYPE.Standalone: - sds.pop(address) - + if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.LoadBalancer): + if len(topology_description._topology_settings.seeds) == 1: + topology_type = TOPOLOGY_TYPE.Single + else: + # Remove standalone from Topology when given multiple seeds. + sds.pop(address) elif server_type not in (SERVER_TYPE.Unknown, SERVER_TYPE.RSGhost): topology_type = _SERVER_TYPE_TO_TOPOLOGY_TYPE[server_type] @@ -353,21 +447,14 @@ def updated_topology_description(topology_description, server_description): sds.pop(address) elif server_type == SERVER_TYPE.RSPrimary: - (topology_type, - set_name, - max_set_version, - max_election_id) = _update_rs_from_primary(sds, - set_name, - server_description, - max_set_version, - max_election_id) - - elif server_type in ( - SERVER_TYPE.RSSecondary, - SERVER_TYPE.RSArbiter, - SERVER_TYPE.RSOther): + (topology_type, set_name, max_set_version, max_election_id) = _update_rs_from_primary( + sds, set_name, server_description, max_set_version, max_election_id + ) + + elif server_type in (SERVER_TYPE.RSSecondary, SERVER_TYPE.RSArbiter, SERVER_TYPE.RSOther): topology_type, set_name = _update_rs_no_primary_from_member( - sds, set_name, server_description) + sds, set_name, server_description + ) elif topology_type == TOPOLOGY_TYPE.ReplicaSetWithPrimary: if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.Mongos): @@ -375,43 +462,39 @@ def updated_topology_description(topology_description, server_description): topology_type = _check_has_primary(sds) elif server_type == SERVER_TYPE.RSPrimary: - (topology_type, - set_name, - max_set_version, - max_election_id) = _update_rs_from_primary(sds, - set_name, - server_description, - max_set_version, - max_election_id) - - elif server_type in ( - SERVER_TYPE.RSSecondary, - SERVER_TYPE.RSArbiter, - SERVER_TYPE.RSOther): - topology_type = _update_rs_with_primary_from_member( - sds, set_name, server_description) + (topology_type, set_name, max_set_version, max_election_id) = _update_rs_from_primary( + sds, set_name, server_description, max_set_version, max_election_id + ) + + elif server_type in (SERVER_TYPE.RSSecondary, SERVER_TYPE.RSArbiter, SERVER_TYPE.RSOther): + topology_type = _update_rs_with_primary_from_member(sds, set_name, server_description) else: # Server type is Unknown or RSGhost: did we just lose the primary? topology_type = _check_has_primary(sds) # Return updated copy. - return TopologyDescription(topology_type, - sds, - set_name, - max_set_version, - max_election_id, - topology_description._topology_settings) + return TopologyDescription( + topology_type, + sds, + set_name, + max_set_version, + max_election_id, + topology_description._topology_settings, + ) -def _updated_topology_description_srv_polling(topology_description, seedlist): +def _updated_topology_description_srv_polling( + topology_description: TopologyDescription, seedlist: list[tuple[str, Any]] +) -> TopologyDescription: """Return an updated copy of a TopologyDescription. :Parameters: - `topology_description`: the current TopologyDescription - `seedlist`: a list of new seeds new ServerDescription that resulted from - an ismaster call + a hello call """ + assert topology_description.topology_type in SRV_POLLING_TOPOLOGIES # Create a copy of the server descriptions. sds = topology_description.server_descriptions() @@ -419,32 +502,40 @@ def _updated_topology_description_srv_polling(topology_description, seedlist): if set(sds.keys()) == set(seedlist): return topology_description - # Add SDs corresponding to servers recently added to the SRV record. - for address in seedlist: - if address not in sds: - sds[address] = ServerDescription(address) - # Remove SDs corresponding to servers no longer part of the SRV record. for address in list(sds.keys()): if address not in seedlist: sds.pop(address) + if topology_description.srv_max_hosts != 0: + new_hosts = set(seedlist) - set(sds.keys()) + n_to_add = topology_description.srv_max_hosts - len(sds) + if n_to_add > 0: + seedlist = sample(sorted(new_hosts), min(n_to_add, len(new_hosts))) + else: + seedlist = [] + # Add SDs corresponding to servers recently added to the SRV record. + for address in seedlist: + if address not in sds: + sds[address] = ServerDescription(address) return TopologyDescription( topology_description.topology_type, sds, topology_description.replica_set_name, topology_description.max_set_version, topology_description.max_election_id, - topology_description._topology_settings) + topology_description._topology_settings, + ) def _update_rs_from_primary( - sds, - replica_set_name, - server_description, - max_set_version, - max_election_id): - """Update topology description from a primary's ismaster response. + sds: MutableMapping[_Address, ServerDescription], + replica_set_name: Optional[str], + server_description: ServerDescription, + max_set_version: Optional[int], + max_election_id: Optional[ObjectId], +) -> tuple[int, Optional[str], Optional[int], Optional[ObjectId]]: + """Update topology description from a primary's hello response. Pass in a dict of ServerDescriptions, current replica set name, the ServerDescription we are processing, and the TopologyDescription's @@ -460,39 +551,44 @@ def _update_rs_from_primary( # We found a primary but it doesn't have the replica_set_name # provided by the user. sds.pop(server_description.address) - return (_check_has_primary(sds), - replica_set_name, - max_set_version, - max_election_id) - - max_election_tuple = max_set_version, max_election_id - if None not in server_description.election_tuple: - if (None not in max_election_tuple and - max_election_tuple > server_description.election_tuple): - + return _check_has_primary(sds), replica_set_name, max_set_version, max_election_id + + if server_description.max_wire_version is None or server_description.max_wire_version < 17: + new_election_tuple: tuple = (server_description.set_version, server_description.election_id) + max_election_tuple: tuple = (max_set_version, max_election_id) + if None not in new_election_tuple: + if None not in max_election_tuple and new_election_tuple < max_election_tuple: + # Stale primary, set to type Unknown. + sds[server_description.address] = server_description.to_unknown() + return _check_has_primary(sds), replica_set_name, max_set_version, max_election_id + max_election_id = server_description.election_id + + if server_description.set_version is not None and ( + max_set_version is None or server_description.set_version > max_set_version + ): + max_set_version = server_description.set_version + else: + new_election_tuple = server_description.election_id, server_description.set_version + max_election_tuple = max_election_id, max_set_version + new_election_safe = tuple(MinKey() if i is None else i for i in new_election_tuple) + max_election_safe = tuple(MinKey() if i is None else i for i in max_election_tuple) + if new_election_safe < max_election_safe: # Stale primary, set to type Unknown. - address = server_description.address - sds[address] = ServerDescription(address) - return (_check_has_primary(sds), - replica_set_name, - max_set_version, - max_election_id) - - max_election_id = server_description.election_id - - if (server_description.set_version is not None and - (max_set_version is None or - server_description.set_version > max_set_version)): - - max_set_version = server_description.set_version + sds[server_description.address] = server_description.to_unknown() + return _check_has_primary(sds), replica_set_name, max_set_version, max_election_id + else: + max_election_id = server_description.election_id + max_set_version = server_description.set_version # We've heard from the primary. Is it the same primary as before? for server in sds.values(): - if (server.server_type is SERVER_TYPE.RSPrimary - and server.address != server_description.address): + if ( + server.server_type is SERVER_TYPE.RSPrimary + and server.address != server_description.address + ): # Reset old primary's type to Unknown. - sds[server.address] = ServerDescription(server.address) + sds[server.address] = server.to_unknown() # There can be only one prior primary. break @@ -508,16 +604,14 @@ def _update_rs_from_primary( # If the host list differs from the seed list, we may not have a primary # after all. - return (_check_has_primary(sds), - replica_set_name, - max_set_version, - max_election_id) + return (_check_has_primary(sds), replica_set_name, max_set_version, max_election_id) def _update_rs_with_primary_from_member( - sds, - replica_set_name, - server_description): + sds: MutableMapping[_Address, ServerDescription], + replica_set_name: Optional[str], + server_description: ServerDescription, +) -> int: """RS with known primary. Process a response from a non-primary. Pass in a dict of ServerDescriptions, current replica set name, and the @@ -529,8 +623,7 @@ def _update_rs_with_primary_from_member( if replica_set_name != server_description.replica_set_name: sds.pop(server_description.address) - elif (server_description.me and - server_description.address != server_description.me): + elif server_description.me and server_description.address != server_description.me: sds.pop(server_description.address) # Had this member been the primary? @@ -538,9 +631,10 @@ def _update_rs_with_primary_from_member( def _update_rs_no_primary_from_member( - sds, - replica_set_name, - server_description): + sds: MutableMapping[_Address, ServerDescription], + replica_set_name: Optional[str], + server_description: ServerDescription, +) -> tuple[int, Optional[str]]: """RS without known primary. Update from a non-primary's response. Pass in a dict of ServerDescriptions, current replica set name, and the @@ -562,14 +656,13 @@ def _update_rs_no_primary_from_member( if address not in sds: sds[address] = ServerDescription(address) - if (server_description.me and - server_description.address != server_description.me): + if server_description.me and server_description.address != server_description.me: sds.pop(server_description.address) return topology_type, replica_set_name -def _check_has_primary(sds): +def _check_has_primary(sds: Mapping[_Address, ServerDescription]) -> int: """Current topology type is ReplicaSetWithPrimary. Is primary still known? Pass in a dict of ServerDescriptions. @@ -579,5 +672,5 @@ def _check_has_primary(sds): for s in sds.values(): if s.server_type == SERVER_TYPE.RSPrimary: return TOPOLOGY_TYPE.ReplicaSetWithPrimary - else: + else: # noqa: PLW0120 return TOPOLOGY_TYPE.ReplicaSetNoPrimary diff --git a/pymongo/typings.py b/pymongo/typings.py new file mode 100644 index 0000000000..174a0e3614 --- /dev/null +++ b/pymongo/typings.py @@ -0,0 +1,60 @@ +# Copyright 2022-Present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Type aliases used by PyMongo""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, + Mapping, + Optional, + Sequence, + Tuple, + TypeVar, + Union, +) + +from bson.typings import _DocumentOut, _DocumentType, _DocumentTypeArg + +if TYPE_CHECKING: + from pymongo.collation import Collation + + +# Common Shared Types. +_Address = Tuple[str, Optional[int]] +_CollationIn = Union[Mapping[str, Any], "Collation"] +_Pipeline = Sequence[Mapping[str, Any]] +ClusterTime = Mapping[str, Any] + +_T = TypeVar("_T") + + +def strip_optional(elem: Optional[_T]) -> _T: + """This function is to allow us to cast all of the elements of an iterator from Optional[_T] to _T + while inside a list comprehension. + """ + assert elem is not None + return elem + + +__all__ = [ + "_DocumentOut", + "_DocumentType", + "_DocumentTypeArg", + "_Address", + "_CollationIn", + "_Pipeline", + "strip_optional", +] diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index 82e69b163b..d5292c1b54 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -14,59 +14,90 @@ """Tools to parse and validate a MongoDB URI.""" +from __future__ import annotations + import re +import sys import warnings - -from bson.py3compat import string_type, PY3 - -if PY3: - from urllib.parse import unquote_plus -else: - from urllib import unquote_plus - +from typing import ( + TYPE_CHECKING, + Any, + Mapping, + MutableMapping, + Optional, + Sized, + Union, + cast, +) +from urllib.parse import unquote_plus + +from pymongo.client_options import _parse_ssl_options from pymongo.common import ( - get_validated_options, INTERNAL_URI_OPTION_NAME_MAP, - URI_OPTIONS_DEPRECATION_MAP, _CaseInsensitiveDictionary) + INTERNAL_URI_OPTION_NAME_MAP, + SRV_SERVICE_NAME, + URI_OPTIONS_DEPRECATION_MAP, + _CaseInsensitiveDictionary, + get_validated_options, +) from pymongo.errors import ConfigurationError, InvalidURI from pymongo.srv_resolver import _HAVE_DNSPYTHON, _SrvResolver +from pymongo.typings import _Address +if TYPE_CHECKING: + from pymongo.pyopenssl_context import SSLContext -SCHEME = 'mongodb://' +SCHEME = "mongodb://" SCHEME_LEN = len(SCHEME) -SRV_SCHEME = 'mongodb+srv://' +SRV_SCHEME = "mongodb+srv://" SRV_SCHEME_LEN = len(SRV_SCHEME) DEFAULT_PORT = 27017 -def parse_userinfo(userinfo): +def _unquoted_percent(s: str) -> bool: + """Check for unescaped percent signs. + + :Parameters: + - `s`: A string. `s` can have things like '%25', '%2525', + and '%E2%85%A8' but cannot have unquoted percent like '%foo'. + """ + for i in range(len(s)): + if s[i] == "%": + sub = s[i : i + 3] + # If unquoting yields the same string this means there was an + # unquoted %. + if unquote_plus(sub) == sub: + return True + return False + + +def parse_userinfo(userinfo: str) -> tuple[str, str]: """Validates the format of user information in a MongoDB URI. - Reserved characters like ':', '/', '+' and '@' must be escaped - following RFC 3986. + Reserved characters that are gen-delimiters (":", "/", "?", "#", "[", + "]", "@") as per RFC 3986 must be escaped. Returns a 2-tuple containing the unescaped username followed by the unescaped password. - :Paramaters: + :Parameters: - `userinfo`: A string of the form : - - .. versionchanged:: 2.2 - Now uses `urllib.unquote_plus` so `+` characters must be escaped. """ - if '@' in userinfo or userinfo.count(':') > 1: - if PY3: - quote_fn = "urllib.parse.quote_plus" - else: - quote_fn = "urllib.quote_plus" - raise InvalidURI("Username and password must be escaped according to " - "RFC 3986, use %s()." % quote_fn) + if "@" in userinfo or userinfo.count(":") > 1 or _unquoted_percent(userinfo): + raise InvalidURI( + "Username and password must be escaped according to " + "RFC 3986, use urllib.parse.quote_plus" + ) + user, _, passwd = userinfo.partition(":") # No password is expected with GSSAPI authentication. if not user: raise InvalidURI("The empty string is not valid username.") + return unquote_plus(user), unquote_plus(passwd) -def parse_ipv6_literal_host(entity, default_port): +def parse_ipv6_literal_host( + entity: str, default_port: Optional[int] +) -> tuple[str, Optional[Union[str, int]]]: """Validates an IPv6 literal host:port string. Returns a 2-tuple of IPv6 literal followed by port where @@ -78,17 +109,17 @@ def parse_ipv6_literal_host(entity, default_port): - `default_port`: The port number to use when one wasn't specified in entity. """ - if entity.find(']') == -1: - raise ValueError("an IPv6 address literal must be " - "enclosed in '[' and ']' according " - "to RFC 2732.") - i = entity.find(']:') + if entity.find("]") == -1: + raise ValueError( + "an IPv6 address literal must be enclosed in '[' and ']' according to RFC 2732." + ) + i = entity.find("]:") if i == -1: return entity[1:-1], default_port - return entity[1: i], entity[i + 2:] + return entity[1:i], entity[i + 2 :] -def parse_host(entity, default_port=DEFAULT_PORT): +def parse_host(entity: str, default_port: Optional[int] = DEFAULT_PORT) -> _Address: """Validates a host string Returns a 2-tuple of host followed by port where port is default_port @@ -101,57 +132,63 @@ def parse_host(entity, default_port=DEFAULT_PORT): specified in entity. """ host = entity - port = default_port - if entity[0] == '[': + port: Optional[Union[str, int]] = default_port + if entity[0] == "[": host, port = parse_ipv6_literal_host(entity, default_port) elif entity.endswith(".sock"): return entity, default_port - elif entity.find(':') != -1: - if entity.count(':') > 1: - raise ValueError("Reserved characters such as ':' must be " - "escaped according RFC 2396. An IPv6 " - "address literal must be enclosed in '[' " - "and ']' according to RFC 2732.") - host, port = host.split(':', 1) - if isinstance(port, string_type): + elif entity.find(":") != -1: + if entity.count(":") > 1: + raise ValueError( + "Reserved characters such as ':' must be " + "escaped according RFC 2396. An IPv6 " + "address literal must be enclosed in '[' " + "and ']' according to RFC 2732." + ) + host, port = host.split(":", 1) + if isinstance(port, str): if not port.isdigit() or int(port) > 65535 or int(port) <= 0: - raise ValueError("Port must be an integer between 0 and 65535: %s" - % (port,)) + raise ValueError(f"Port must be an integer between 0 and 65535: {port!r}") port = int(port) # Normalize hostname to lowercase, since DNS is case-insensitive: # http://tools.ietf.org/html/rfc4343 # This prevents useless rediscovery if "foo.com" is in the seed list but - # "FOO.com" is in the ismaster response. + # "FOO.com" is in the hello response. return host.lower(), port -_IMPLICIT_TLSINSECURE_OPTS = {"tlsallowinvalidcertificates", - "tlsallowinvalidhostnames"} - -_TLSINSECURE_EXCLUDE_OPTS = (_IMPLICIT_TLSINSECURE_OPTS | - {INTERNAL_URI_OPTION_NAME_MAP[k] for k in - _IMPLICIT_TLSINSECURE_OPTS}) +# Options whose values are implicitly determined by tlsInsecure. +_IMPLICIT_TLSINSECURE_OPTS = { + "tlsallowinvalidcertificates", + "tlsallowinvalidhostnames", + "tlsdisableocspendpointcheck", +} -def _parse_options(opts, delim): +def _parse_options(opts: str, delim: Optional[str]) -> _CaseInsensitiveDictionary: """Helper method for split_options which creates the options dict. Also handles the creation of a list for the URI tag_sets/ - readpreferencetags portion, and the use of a unicode options string.""" + readpreferencetags portion, and the use of a unicode options string. + """ options = _CaseInsensitiveDictionary() for uriopt in opts.split(delim): key, value = uriopt.split("=") - if key.lower() == 'readpreferencetags': + if key.lower() == "readpreferencetags": options.setdefault(key, []).append(value) else: if key in options: - warnings.warn("Duplicate URI option '%s'." % (key,)) - options[key] = unquote_plus(value) + warnings.warn(f"Duplicate URI option '{key}'.", stacklevel=2) + if key.lower() == "authmechanismproperties": + val = value + else: + val = unquote_plus(value) + options[key] = val return options -def _handle_security_options(options): +def _handle_security_options(options: _CaseInsensitiveDictionary) -> _CaseInsensitiveDictionary: """Raise appropriate errors when conflicting TLS options are present in the options dictionary. @@ -159,32 +196,53 @@ def _handle_security_options(options): - `options`: Instance of _CaseInsensitiveDictionary containing MongoDB URI options. """ - tlsinsecure = options.get('tlsinsecure') + # Implicitly defined options must not be explicitly specified. + tlsinsecure = options.get("tlsinsecure") if tlsinsecure is not None: - for opt in _TLSINSECURE_EXCLUDE_OPTS: + for opt in _IMPLICIT_TLSINSECURE_OPTS: if opt in options: - err_msg = ("URI options %s and %s cannot be specified " - "simultaneously.") - raise InvalidURI(err_msg % ( - options.cased_key('tlsinsecure'), options.cased_key(opt))) - - if 'ssl' in options and 'tls' in options: - def truth_value(val): - if val in ('true', 'false'): - return val == 'true' + err_msg = "URI options %s and %s cannot be specified simultaneously." + raise InvalidURI( + err_msg % (options.cased_key("tlsinsecure"), options.cased_key(opt)) + ) + + # Handle co-occurence of OCSP & tlsAllowInvalidCertificates options. + tlsallowinvalidcerts = options.get("tlsallowinvalidcertificates") + if tlsallowinvalidcerts is not None: + if "tlsdisableocspendpointcheck" in options: + err_msg = "URI options %s and %s cannot be specified simultaneously." + raise InvalidURI( + err_msg + % ("tlsallowinvalidcertificates", options.cased_key("tlsdisableocspendpointcheck")) + ) + if tlsallowinvalidcerts is True: + options["tlsdisableocspendpointcheck"] = True + + # Handle co-occurence of CRL and OCSP-related options. + tlscrlfile = options.get("tlscrlfile") + if tlscrlfile is not None: + for opt in ("tlsinsecure", "tlsallowinvalidcertificates", "tlsdisableocspendpointcheck"): + if options.get(opt) is True: + err_msg = "URI option %s=True cannot be specified when CRL checking is enabled." + raise InvalidURI(err_msg % (opt,)) + + if "ssl" in options and "tls" in options: + + def truth_value(val: Any) -> Any: + if val in ("true", "false"): + return val == "true" if isinstance(val, bool): return val return val - if truth_value(options.get('ssl')) != truth_value(options.get('tls')): - err_msg = ("Can not specify conflicting values for URI options %s " - "and %s.") - raise InvalidURI(err_msg % ( - options.cased_key('ssl'), options.cased_key('tls'))) + + if truth_value(options.get("ssl")) != truth_value(options.get("tls")): + err_msg = "Can not specify conflicting values for URI options %s and %s." + raise InvalidURI(err_msg % (options.cased_key("ssl"), options.cased_key("tls"))) return options -def _handle_option_deprecations(options): +def _handle_option_deprecations(options: _CaseInsensitiveDictionary) -> _CaseInsensitiveDictionary: """Issue appropriate warnings when deprecated options are present in the options dictionary. Removes deprecated option key, value pairs if the options dictionary is found to also have the renamed option. @@ -196,44 +254,48 @@ def _handle_option_deprecations(options): for optname in list(options): if optname in URI_OPTIONS_DEPRECATION_MAP: mode, message = URI_OPTIONS_DEPRECATION_MAP[optname] - if mode == 'renamed': + if mode == "renamed": newoptname = message if newoptname in options: - warn_msg = ("Deprecated option '%s' ignored in favor of " - "'%s'.") + warn_msg = "Deprecated option '%s' ignored in favor of '%s'." warnings.warn( - warn_msg % (options.cased_key(optname), - options.cased_key(newoptname)), - DeprecationWarning, stacklevel=2) + warn_msg % (options.cased_key(optname), options.cased_key(newoptname)), + DeprecationWarning, + stacklevel=2, + ) options.pop(optname) continue warn_msg = "Option '%s' is deprecated, use '%s' instead." warnings.warn( warn_msg % (options.cased_key(optname), newoptname), - DeprecationWarning, stacklevel=2) - elif mode == 'removed': + DeprecationWarning, + stacklevel=2, + ) + elif mode == "removed": warn_msg = "Option '%s' is deprecated. %s." warnings.warn( warn_msg % (options.cased_key(optname), message), - DeprecationWarning, stacklevel=2) + DeprecationWarning, + stacklevel=2, + ) return options -def _normalize_options(options): +def _normalize_options(options: _CaseInsensitiveDictionary) -> _CaseInsensitiveDictionary: """Normalizes option names in the options dictionary by converting them to - their internally-used names. Also handles use of the tlsInsecure option. + their internally-used names. :Parameters: - `options`: Instance of _CaseInsensitiveDictionary containing MongoDB URI options. """ - tlsinsecure = options.get('tlsinsecure') + # Expand the tlsInsecure option. + tlsinsecure = options.get("tlsinsecure") if tlsinsecure is not None: for opt in _IMPLICIT_TLSINSECURE_OPTS: - intname = INTERNAL_URI_OPTION_NAME_MAP.get(opt, None) - # Internal options are logical inverse of public options. - options[intname] = not tlsinsecure + # Implicit options are logically the same as tlsInsecure. + options[opt] = tlsinsecure for optname in list(options): intname = INTERNAL_URI_OPTION_NAME_MAP.get(optname, None) @@ -243,7 +305,7 @@ def _normalize_options(options): return options -def validate_options(opts, warn=False): +def validate_options(opts: Mapping[str, Any], warn: bool = False) -> MutableMapping[str, Any]: """Validates and normalizes options passed in a MongoDB URI. Returns a new dictionary of validated and normalized options. If warn is @@ -259,7 +321,9 @@ def validate_options(opts, warn=False): return get_validated_options(opts, warn) -def split_options(opts, validate=True, warn=False, normalize=True): +def split_options( + opts: str, validate: bool = True, warn: bool = False, normalize: bool = True +) -> MutableMapping[str, Any]: """Takes the options portion of a MongoDB URI, validates each option and returns the options in a dictionary. @@ -286,22 +350,24 @@ def split_options(opts, validate=True, warn=False, normalize=True): else: raise ValueError except ValueError: - raise InvalidURI("MongoDB URI options are key=value pairs.") + raise InvalidURI("MongoDB URI options are key=value pairs.") from None options = _handle_security_options(options) options = _handle_option_deprecations(options) - if validate: - options = validate_options(options, warn) - if normalize: options = _normalize_options(options) + if validate: + options = cast(_CaseInsensitiveDictionary, validate_options(options, warn)) + if options.get("authsource") == "": + raise InvalidURI("the authSource database cannot be an empty string") + return options -def split_hosts(hosts, default_port=DEFAULT_PORT): +def split_hosts(hosts: str, default_port: Optional[int] = DEFAULT_PORT) -> list[_Address]: """Takes a string of the form host1[:port],host2[:port]... and splits it into (host, port) tuples. If [:port] isn't present the default_port is used. @@ -315,13 +381,12 @@ def split_hosts(hosts, default_port=DEFAULT_PORT): for a host. """ nodes = [] - for entity in hosts.split(','): + for entity in hosts.split(","): if not entity: - raise ConfigurationError("Empty host " - "(or extra comma in host list).") + raise ConfigurationError("Empty host (or extra comma in host list).") port = default_port # Unix socket entities don't have ports - if entity.endswith('.sock'): + if entity.endswith(".sock"): port = None nodes.append(parse_host(entity, port)) return nodes @@ -329,14 +394,37 @@ def split_hosts(hosts, default_port=DEFAULT_PORT): # Prohibited characters in database name. DB names also can't have ".", but for # backward-compat we allow "db.collection" in URI. -_BAD_DB_CHARS = re.compile('[' + re.escape(r'/ "$') + ']') +_BAD_DB_CHARS = re.compile("[" + re.escape(r'/ "$') + "]") _ALLOWED_TXT_OPTS = frozenset( - ['authsource', 'authSource', 'replicaset', 'replicaSet']) - - -def parse_uri(uri, default_port=DEFAULT_PORT, validate=True, warn=False, - normalize=True, connect_timeout=None): + ["authsource", "authSource", "replicaset", "replicaSet", "loadbalanced", "loadBalanced"] +) + + +def _check_options(nodes: Sized, options: Mapping[str, Any]) -> None: + # Ensure directConnection was not True if there are multiple seeds. + if len(nodes) > 1 and options.get("directconnection"): + raise ConfigurationError("Cannot specify multiple hosts with directConnection=true") + + if options.get("loadbalanced"): + if len(nodes) > 1: + raise ConfigurationError("Cannot specify multiple hosts with loadBalanced=true") + if options.get("directconnection"): + raise ConfigurationError("Cannot specify directConnection=true with loadBalanced=true") + if options.get("replicaset"): + raise ConfigurationError("Cannot specify replicaSet with loadBalanced=true") + + +def parse_uri( + uri: str, + default_port: Optional[int] = DEFAULT_PORT, + validate: bool = True, + warn: bool = False, + normalize: bool = True, + connect_timeout: Optional[float] = None, + srv_service_name: Optional[str] = None, + srv_max_hosts: Optional[int] = None, +) -> dict[str, Any]: """Parse and validate a MongoDB URI. Returns a dict of the form:: @@ -368,6 +456,15 @@ def parse_uri(uri, default_port=DEFAULT_PORT, validate=True, warn=False, to their internally-used names. Default: ``True``. - `connect_timeout` (optional): The maximum time in milliseconds to wait for a response from the DNS server. + - `srv_service_name` (optional): A custom SRV service name + + .. versionchanged:: 4.6 + The delimiting slash (``/``) between hosts and connection options is now optional. + For example, "mongodb://example.com?tls=true" is now a valid URI. + + .. versionchanged:: 4.0 + To better follow RFC 3986, unquoted percent signs ("%") are no longer + supported. .. versionchanged:: 3.9 Added the ``normalize`` parameter. @@ -387,13 +484,17 @@ def parse_uri(uri, default_port=DEFAULT_PORT, validate=True, warn=False, scheme_free = uri[SCHEME_LEN:] elif uri.startswith(SRV_SCHEME): if not _HAVE_DNSPYTHON: - raise ConfigurationError('The "dnspython" module must be ' - 'installed to use mongodb+srv:// URIs') + python_path = sys.executable or "python" + raise ConfigurationError( + 'The "dnspython" module must be ' + "installed to use mongodb+srv:// URIs. " + "To fix this error install pymongo again:\n " + "%s -m pip install pymongo>=4.3" % (python_path) + ) is_srv = True scheme_free = uri[SRV_SCHEME_LEN:] else: - raise InvalidURI("Invalid URI scheme: URI must " - "begin with '%s' or '%s'" % (SCHEME, SRV_SCHEME)) + raise InvalidURI(f"Invalid URI scheme: URI must begin with '{SCHEME}' or '{SRV_SCHEME}'") if not scheme_free: raise InvalidURI("Must provide at least one hostname or IP.") @@ -404,94 +505,135 @@ def parse_uri(uri, default_port=DEFAULT_PORT, validate=True, warn=False, collection = None options = _CaseInsensitiveDictionary() - host_part, _, path_part = scheme_free.partition('/') + host_part, _, path_part = scheme_free.partition("/") if not host_part: host_part = path_part path_part = "" - if not path_part and '?' in host_part: - raise InvalidURI("A '/' is required between " - "the host list and any options.") - if path_part: - if path_part[0] == '?': - opts = unquote_plus(path_part[1:]) - else: - dbase, _, opts = map(unquote_plus, path_part.partition('?')) - if '.' in dbase: - dbase, collection = dbase.split('.', 1) - - if _BAD_DB_CHARS.search(dbase): - raise InvalidURI('Bad database name "%s"' % dbase) - - if opts: - options.update(split_options(opts, validate, warn, normalize)) + dbase, _, opts = path_part.partition("?") + else: + # There was no slash in scheme_free, check for a sole "?". + host_part, _, opts = host_part.partition("?") - if dbase is not None: + if dbase: dbase = unquote_plus(dbase) - if collection is not None: - collection = unquote_plus(collection) - - if '@' in host_part: - userinfo, _, hosts = host_part.rpartition('@') + if "." in dbase: + dbase, collection = dbase.split(".", 1) + if _BAD_DB_CHARS.search(dbase): + raise InvalidURI('Bad database name "%s"' % dbase) + else: + dbase = None + + if opts: + options.update(split_options(opts, validate, warn, normalize)) + if srv_service_name is None: + srv_service_name = options.get("srvServiceName", SRV_SERVICE_NAME) + if "@" in host_part: + userinfo, _, hosts = host_part.rpartition("@") user, passwd = parse_userinfo(userinfo) else: hosts = host_part - if '/' in hosts: - raise InvalidURI("Any '/' in a unix domain socket must be" - " percent-encoded: %s" % host_part) + if "/" in hosts: + raise InvalidURI("Any '/' in a unix domain socket must be percent-encoded: %s" % host_part) hosts = unquote_plus(hosts) fqdn = None - + srv_max_hosts = srv_max_hosts or options.get("srvMaxHosts") if is_srv: + if options.get("directConnection"): + raise ConfigurationError(f"Cannot specify directConnection=true with {SRV_SCHEME} URIs") nodes = split_hosts(hosts, default_port=None) if len(nodes) != 1: - raise InvalidURI( - "%s URIs must include one, " - "and only one, hostname" % (SRV_SCHEME,)) + raise InvalidURI(f"{SRV_SCHEME} URIs must include one, and only one, hostname") fqdn, port = nodes[0] if port is not None: - raise InvalidURI( - "%s URIs must not include a port number" % (SRV_SCHEME,)) + raise InvalidURI(f"{SRV_SCHEME} URIs must not include a port number") # Use the connection timeout. connectTimeoutMS passed as a keyword # argument overrides the same option passed in the connection string. connect_timeout = connect_timeout or options.get("connectTimeoutMS") - dns_resolver = _SrvResolver(fqdn, connect_timeout=connect_timeout) + dns_resolver = _SrvResolver(fqdn, connect_timeout, srv_service_name, srv_max_hosts) nodes = dns_resolver.get_hosts() dns_options = dns_resolver.get_options() if dns_options: - parsed_dns_options = split_options( - dns_options, validate, warn, normalize) + parsed_dns_options = split_options(dns_options, validate, warn, normalize) if set(parsed_dns_options) - _ALLOWED_TXT_OPTS: raise ConfigurationError( - "Only authSource and replicaSet are supported from DNS") + "Only authSource, replicaSet, and loadBalanced are supported from DNS" + ) for opt, val in parsed_dns_options.items(): if opt not in options: options[opt] = val - if "ssl" not in options: - options["ssl"] = True if validate else 'true' + if options.get("loadBalanced") and srv_max_hosts: + raise InvalidURI("You cannot specify loadBalanced with srvMaxHosts") + if options.get("replicaSet") and srv_max_hosts: + raise InvalidURI("You cannot specify replicaSet with srvMaxHosts") + if "tls" not in options and "ssl" not in options: + options["tls"] = True if validate else "true" + elif not is_srv and options.get("srvServiceName") is not None: + raise ConfigurationError( + "The srvServiceName option is only allowed with 'mongodb+srv://' URIs" + ) + elif not is_srv and srv_max_hosts: + raise ConfigurationError( + "The srvMaxHosts option is only allowed with 'mongodb+srv://' URIs" + ) else: nodes = split_hosts(hosts, default_port=default_port) + _check_options(nodes, options) + return { - 'nodelist': nodes, - 'username': user, - 'password': passwd, - 'database': dbase, - 'collection': collection, - 'options': options, - 'fqdn': fqdn + "nodelist": nodes, + "username": user, + "password": passwd, + "database": dbase, + "collection": collection, + "options": options, + "fqdn": fqdn, } -if __name__ == '__main__': +def _parse_kms_tls_options(kms_tls_options: Optional[Mapping[str, Any]]) -> dict[str, SSLContext]: + """Parse KMS TLS connection options.""" + if not kms_tls_options: + return {} + if not isinstance(kms_tls_options, dict): + raise TypeError("kms_tls_options must be a dict") + contexts = {} + for provider, options in kms_tls_options.items(): + if not isinstance(options, dict): + raise TypeError(f'kms_tls_options["{provider}"] must be a dict') + options.setdefault("tls", True) + opts = _CaseInsensitiveDictionary(options) + opts = _handle_security_options(opts) + opts = _normalize_options(opts) + opts = cast(_CaseInsensitiveDictionary, validate_options(opts)) + ssl_context, allow_invalid_hostnames = _parse_ssl_options(opts) + if ssl_context is None: + raise ConfigurationError("TLS is required for KMS providers") + if allow_invalid_hostnames: + raise ConfigurationError("Insecure TLS options prohibited") + + for n in [ + "tlsInsecure", + "tlsAllowInvalidCertificates", + "tlsAllowInvalidHostnames", + "tlsDisableCertificateRevocationCheck", + ]: + if n in opts: + raise ConfigurationError(f"Insecure TLS options prohibited: {n}") + contexts[provider] = ssl_context + return contexts + + +if __name__ == "__main__": import pprint - import sys + try: - pprint.pprint(parse_uri(sys.argv[1])) + pprint.pprint(parse_uri(sys.argv[1])) # noqa: T203 except InvalidURI as exc: - print(exc) - sys.exit(0) \ No newline at end of file + print(exc) # noqa: T201 + sys.exit(0) diff --git a/pymongo/write_concern.py b/pymongo/write_concern.py index 14ad63de88..ab6629fbbc 100644 --- a/pymongo/write_concern.py +++ b/pymongo/write_concern.py @@ -13,12 +13,22 @@ # limitations under the License. """Tools for working with write concerns.""" +from __future__ import annotations + +from typing import Any, Optional, Union -from bson.py3compat import integer_types, string_type from pymongo.errors import ConfigurationError -class WriteConcern(object): +# Moved here to avoid a circular import. +def validate_boolean(option: str, value: Any) -> bool: + """Validates that 'value' is True or False.""" + if isinstance(value, bool): + return value + raise TypeError(f"{option} must be True or False, was: {option}={value}") + + +class WriteConcern: """WriteConcern :Parameters: @@ -34,11 +44,9 @@ class WriteConcern(object): to complete. If replication does not complete in the given timeframe, a timeout exception is raised. - `j`: If ``True`` block until write operations have been committed - to the journal. Cannot be used in combination with `fsync`. Prior - to MongoDB 2.6 this option was ignored if the server was running - without journaling. Starting with MongoDB 2.6 write operations will - fail with an exception if this option is used when the server is - running without journaling. + to the journal. Cannot be used in combination with `fsync`. Write + operations will fail with an exception if this option is used when + the server is running without journaling. - `fsync`: If ``True`` and the server is running without journaling, blocks until the server has synced all data files to disk. If the server is running with journaling, this acts the same as the `j` @@ -48,51 +56,54 @@ class WriteConcern(object): __slots__ = ("__document", "__acknowledged", "__server_default") - def __init__(self, w=None, wtimeout=None, j=None, fsync=None): - self.__document = {} + def __init__( + self, + w: Optional[Union[int, str]] = None, + wtimeout: Optional[int] = None, + j: Optional[bool] = None, + fsync: Optional[bool] = None, + ) -> None: + self.__document: dict[str, Any] = {} self.__acknowledged = True if wtimeout is not None: - if not isinstance(wtimeout, integer_types): + if not isinstance(wtimeout, int): raise TypeError("wtimeout must be an integer") if wtimeout < 0: raise ValueError("wtimeout cannot be less than 0") self.__document["wtimeout"] = wtimeout if j is not None: - if not isinstance(j, bool): - raise TypeError("j must be True or False") + validate_boolean("j", j) self.__document["j"] = j if fsync is not None: - if not isinstance(fsync, bool): - raise TypeError("fsync must be True or False") + validate_boolean("fsync", fsync) if j and fsync: - raise ConfigurationError("Can't set both j " - "and fsync at the same time") + raise ConfigurationError("Can't set both j and fsync at the same time") self.__document["fsync"] = fsync if w == 0 and j is True: raise ConfigurationError("Cannot set w to 0 and j to True") if w is not None: - if isinstance(w, integer_types): + if isinstance(w, int): if w < 0: raise ValueError("w cannot be less than 0") self.__acknowledged = w > 0 - elif not isinstance(w, string_type): + elif not isinstance(w, str): raise TypeError("w must be an integer or string") self.__document["w"] = w self.__server_default = not self.__document @property - def is_server_default(self): + def is_server_default(self) -> bool: """Does this WriteConcern match the server default.""" return self.__server_default @property - def document(self): + def document(self) -> dict[str, Any]: """The document representation of this write concern. .. note:: @@ -102,22 +113,23 @@ def document(self): return self.__document.copy() @property - def acknowledged(self): + def acknowledged(self) -> bool: """If ``True`` write operations will wait for acknowledgement before returning. """ return self.__acknowledged - def __repr__(self): - return ("WriteConcern(%s)" % ( - ", ".join("%s=%s" % kvt for kvt in self.__document.items()),)) + def __repr__(self) -> str: + return "WriteConcern({})".format( + ", ".join("{}={}".format(*kvt) for kvt in self.__document.items()) + ) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, WriteConcern): return self.__document == other.document return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: if isinstance(other, WriteConcern): return self.__document != other.document return NotImplemented diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000..78925e6024 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,187 @@ +[build-system] +requires = ["setuptools>=63.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "pymongo" +dynamic = ["version"] +description = "Python driver for MongoDB " +readme = "README.rst" +license = {file="LICENSE"} +requires-python = ">=3.7" +authors = [ + { name = "The MongoDB Python Team" }, +] +keywords = [ + "bson", + "gridfs", + "mongo", + "mongodb", + "pymongo", +] +classifiers = [ + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "License :: OSI Approved :: Apache Software License", + "Operating System :: MacOS :: MacOS X", + "Operating System :: Microsoft :: Windows", + "Operating System :: POSIX", + "Programming Language :: Python :: Implementation :: CPython", + "Programming Language :: Python :: Implementation :: PyPy", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Topic :: Database", + "Typing :: Typed", +] +dependencies = [ + "dnspython>=1.16.0,<3.0.0", +] + +[project.optional-dependencies] +aws = [ + "pymongo-auth-aws<2.0.0", +] +encryption = [ + "pymongo[aws]", + "pymongocrypt>=1.6.0,<2.0.0", + "certifi;os.name=='nt' or sys_platform=='darwin'", +] +gssapi = [ + "pykerberos;os.name!='nt'", + "winkerberos>=0.5.0;os.name=='nt'" +] +# PyOpenSSL 17.0.0 introduced support for OCSP. 17.1.0 introduced +# a related feature we need. 17.2.0 fixes a bug +# in set_default_verify_paths we should really avoid. +# service_identity 18.1.0 introduced support for IP addr matching. +# Fallback to certifi on Windows if we can't load CA certs from the system +# store and just use certifi on macOS. +# https://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Context.set_default_verify_paths +ocsp = [ + "certifi;os.name=='nt' or sys_platform=='darwin'", + "pyopenssl>=17.2.0", + "requests<3.0.0", + "cryptography>=2.5", + "service_identity>=18.1.0", +] +snappy = [ + "python-snappy", +] +# PYTHON-3423 Removed in 4.3 but kept here to avoid pip warnings. +srv = [] +tls = [] +# PYTHON-2133 Removed in 4.0 but kept here to avoid pip warnings. +zstd = [ + "zstandard", +] +test = ["pytest>=7"] + +[project.urls] +Homepage = "http://github.com/mongodb/mongo-python-driver" + +[tool.setuptools.dynamic] +version = {attr = "pymongo._version.__version__"} + +[tool.setuptools.packages.find] +include = ["bson","gridfs", "pymongo"] + +[tool.setuptools.package-data] +bson=["py.typed", "*.pyi"] +pymongo=["py.typed", "*.pyi"] +gridfs=["py.typed", "*.pyi"] + +[tool.ruff] +target-version = "py37" +line-length = 100 +select = [ + "E", "F", "W", # flake8 + "B", # flake8-bugbear + "I", # isort + "ARG", # flake8-unused-arguments + "C4", # flake8-comprehensions + "EM", # flake8-errmsg + "ICN", # flake8-import-conventions + "ISC", # flake8-implicit-str-concat + "G", # flake8-logging-format + "PGH", # pygrep-hooks + "PIE", # flake8-pie + "PL", # pylint + "PT", # flake8-pytest-style + "PTH", # flake8-use-pathlib + "RET", # flake8-return + "RUF", # Ruff-specific + "S", # flake8-bandit + "SIM", # flake8-simplify + "T20", # flake8-print + "UP", # pyupgrade + "YTT", # flake8-2020 + "EXE", # flake8-executable +] +extend-ignore = [ + "PLR", # Design related pylint codes + "E501", # Line too long + "PT004", # Use underscore for non-returning fixture (use usefixture instead) + "UP007", # Use `X | Y` for type annotation + "EM101", # Exception must not use a string literal, assign to variable first + "EM102", # Exception must not use an f-string literal, assign to variable first + "G004", # Logging statement uses f-string" + "UP006", # Use `type` instead of `Type` for type annotation" + "RET505", # Unnecessary `elif` after `return` statement" + "RET506", # Unnecessary `elif` after `raise` statement + "SIM108", # Use ternary operator" + "PTH123", # `open()` should be replaced by `Path.open()`" + "SIM102", # Use a single `if` statement instead of nested `if` statements + "SIM105", # Use `contextlib.suppress(OSError)` instead of `try`-`except`-`pass` + "ARG002", # Unused method argument: + "S101", # Use of `assert` detected + "SIM114", # Combine `if` branches using logical `or` operator + "PGH003", # Use specific rule codes when ignoring type issues + "RUF012", # Mutable class attributes should be annotated with `typing.ClassVar` + "EM103", # Exception must not use a `.format()` string directly, assign to variable first + "C408", # Unnecessary `dict` call (rewrite as a literal) + "SIM117", # Use a single `with` statement with multiple contexts instead of nested `with` statements +] +unfixable = [ + "RUF100", # Unused noqa + "T20", # Removes print statements + "F841", # Removes unused variables +] +exclude = [] +flake8-unused-arguments.ignore-variadic-names = true +isort.required-imports = ["from __future__ import annotations"] +dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?)|dummy.*)$" + +[tool.ruff.per-file-ignores] +"pymongo/__init__.py" = ["E402"] +"test/*.py" = ["PT", "E402", "PLW", "SIM", "E741", "PTH", "S", "B904", "E722", "T201", + "RET", "ARG", "F405", "B028", "PGH001", "B018", "F403", "RUF015", "E731", "B007", + "UP031", "F401", "B023", "F811"] +"green_framework_test.py" = ["T201"] + +[tool.coverage.run] +branch = true +source = ["pymongo", "bson", "gridfs" ] +relative_files = true + +[tool.coverage.report] +exclude_lines = [ + "if (.*and +)*_use_c( and.*)*:", + "def has_c", + "def get_version_string", + "^except AttributeError:", + "except ImportError:", + "raise NotImplementedError", + "return NotImplemented", + "_use_c = true", + "if __name__ == '__main__':", + ] +partial_branches = ["if (.*and +)*not _use_c( and.*)*:"] + +[tool.coverage.html] +directory = "htmlcov" diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 0000000000..e10d24558a --- /dev/null +++ b/pytest.ini @@ -0,0 +1,6 @@ +[pytest] +testpaths = + test +norecursedirs = test/* +addopts = -ra --junitxml=xunit-results/TEST-results.xml +faulthandler_timeout = 1500 diff --git a/setup.py b/setup.py old mode 100755 new mode 100644 index 7c0110fae8..a711e246bf --- a/setup.py +++ b/setup.py @@ -1,235 +1,18 @@ +from __future__ import annotations + import os -import platform -import re import sys import warnings - -if sys.version_info[:2] < (2, 7): - raise RuntimeError("Python version >= 2.7 required.") - - # Hack to silence atexit traceback in some Python versions try: - import multiprocessing + import multiprocessing # noqa: F401 except ImportError: pass -# Don't force people to install setuptools unless -# we have to. -try: - from setuptools import setup -except ImportError: - from ez_setup import use_setuptools - use_setuptools() - from setuptools import setup - -from distutils.cmd import Command -from distutils.command.build_ext import build_ext -from distutils.errors import CCompilerError, DistutilsOptionError -from distutils.errors import DistutilsPlatformError, DistutilsExecError -from distutils.core import Extension - -_HAVE_SPHINX = True -try: - from sphinx.cmd import build as sphinx -except ImportError: - try: - import sphinx - except ImportError: - _HAVE_SPHINX = False - -version = "3.10.1" - -f = open("README.rst") -try: - try: - readme_content = f.read() - except: - readme_content = "" -finally: - f.close() - -# PYTHON-654 - Clang doesn't support -mno-fused-madd but the pythons Apple -# ships are built with it. This is a problem starting with Xcode 5.1 -# since clang 3.4 errors out when it encounters unrecognized compiler -# flags. This hack removes -mno-fused-madd from the CFLAGS automatically -# generated by distutils for Apple provided pythons, allowing C extension -# builds to complete without error. The inspiration comes from older -# versions of distutils.sysconfig.get_config_vars. -if sys.platform == 'darwin' and 'clang' in platform.python_compiler().lower(): - from distutils.sysconfig import get_config_vars - res = get_config_vars() - for key in ('CFLAGS', 'PY_CFLAGS'): - if key in res: - flags = res[key] - flags = re.sub('-mno-fused-madd', '', flags) - res[key] = flags - - -class test(Command): - description = "run the tests" - - user_options = [ - ("test-module=", "m", "Discover tests in specified module"), - ("test-suite=", "s", - "Test suite to run (e.g. 'some_module.test_suite')"), - ("failfast", "f", "Stop running tests on first failure or error"), - ("xunit-output=", "x", - "Generate a results directory with XUnit XML format") - ] - - def initialize_options(self): - self.test_module = None - self.test_suite = None - self.failfast = False - self.xunit_output = None - - def finalize_options(self): - if self.test_suite is None and self.test_module is None: - self.test_module = 'test' - elif self.test_module is not None and self.test_suite is not None: - raise DistutilsOptionError( - "You may specify a module or suite, but not both" - ) - - def run(self): - # Installing required packages, running egg_info and build_ext are - # part of normal operation for setuptools.command.test.test - if self.distribution.install_requires: - self.distribution.fetch_build_eggs( - self.distribution.install_requires) - if self.distribution.tests_require: - self.distribution.fetch_build_eggs(self.distribution.tests_require) - if self.xunit_output: - self.distribution.fetch_build_eggs(["unittest-xml-reporting"]) - self.run_command('egg_info') - build_ext_cmd = self.reinitialize_command('build_ext') - build_ext_cmd.inplace = 1 - self.run_command('build_ext') - - # Construct a TextTestRunner directly from the unittest imported from - # test, which creates a TestResult that supports the 'addSkip' method. - # setuptools will by default create a TextTestRunner that uses the old - # TestResult class. - from test import unittest, PymongoTestRunner, test_cases - if self.test_suite is None: - all_tests = unittest.defaultTestLoader.discover(self.test_module) - suite = unittest.TestSuite() - suite.addTests(sorted(test_cases(all_tests), - key=lambda x: x.__module__)) - else: - suite = unittest.defaultTestLoader.loadTestsFromName( - self.test_suite) - if self.xunit_output: - from xmlrunner import XMLTestRunner - runner = XMLTestRunner(verbosity=2, failfast=self.failfast, - output=self.xunit_output) - else: - runner = PymongoTestRunner(verbosity=2, failfast=self.failfast) - result = runner.run(suite) - sys.exit(not result.wasSuccessful()) - - -class doc(Command): - - description = "generate or test documentation" - - user_options = [("test", "t", - "run doctests instead of generating documentation")] - - boolean_options = ["test"] - - def initialize_options(self): - self.test = False - - def finalize_options(self): - pass - - def run(self): - - if not _HAVE_SPHINX: - raise RuntimeError( - "You must install Sphinx to build or test the documentation.") - - if sys.version_info[0] >= 3: - import doctest - from doctest import OutputChecker as _OutputChecker - - # Match u or U (possibly followed by r or R), removing it. - # r/R can follow u/U but not precede it. Don't match the - # single character string 'u' or 'U'. - _u_literal_re = re.compile( - r"(\W|^)(?=1.16.0,<1.17.0"]}) -else: - extras_require.update({'tls': []}) - extras_require.update({'srv': ["dnspython>=1.16.0,<2.0.0"]}) -if sys.platform == 'win32': - extras_require['gssapi'] = ["winkerberos>=0.5.0"] - if vi < (2, 7, 9): - extras_require['tls'].append("wincertstore>=0.2") -else: - extras_require['gssapi'] = ["pykerberos"] - if vi < (2, 7, 9): - extras_require['tls'].append("certifi") + sys.stdout.write("%s\n" % str(e)) + warnings.warn( + self.warning_message + % ( + "The %s extension module" % (name,), # noqa: UP031 + "The output above this warning shows how the compilation failed.", + ), + stacklevel=2, + ) -extra_opts = { - "packages": ["bson", "pymongo", "gridfs"] -} -if "--no_ext" in sys.argv: - sys.argv.remove("--no_ext") -elif (sys.platform.startswith("java") or - sys.platform == "cli" or - "PyPy" in sys.version): - sys.stdout.write(""" +ext_modules = [ + Extension( + "bson._cbson", + include_dirs=["bson"], + sources=["bson/_cbsonmodule.c", "bson/time64.c", "bson/buffer.c"], + ), + Extension( + "pymongo._cmessage", + include_dirs=["bson"], + sources=[ + "pymongo/_cmessagemodule.c", + "bson/_cbsonmodule.c", + "bson/time64.c", + "bson/buffer.c", + ], + ), +] + + +if "--no_ext" in sys.argv or os.environ.get("NO_EXT"): + try: + sys.argv.remove("--no_ext") + except ValueError: + pass + ext_modules = [] +elif sys.platform.startswith("java") or sys.platform == "cli" or "PyPy" in sys.version: + sys.stdout.write( + """ *****************************************************\n The optional C extensions are currently not supported\n by this python implementation.\n *****************************************************\n -""") -else: - extra_opts['ext_modules'] = ext_modules +""" + ) + ext_modules = [] -setup( - name="pymongo", - version=version, - description="Python driver for MongoDB ", - long_description=readme_content, - author="Mike Dirolf", - author_email="mongodb-user@googlegroups.com", - maintainer="Bernie Hackett", - maintainer_email="bernie@mongodb.com", - url="http://github.com/mongodb/mongo-python-driver", - keywords=["mongo", "mongodb", "pymongo", "gridfs", "bson"], - install_requires=[], - license="Apache License, Version 2.0", - python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*", - classifiers=[ - "Development Status :: 5 - Production/Stable", - "Intended Audience :: Developers", - "License :: OSI Approved :: Apache Software License", - "Operating System :: MacOS :: MacOS X", - "Operating System :: Microsoft :: Windows", - "Operating System :: POSIX", - "Programming Language :: Python :: 2", - "Programming Language :: Python :: 2.7", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.4", - "Programming Language :: Python :: 3.5", - "Programming Language :: Python :: 3.6", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: Implementation :: CPython", - "Programming Language :: Python :: Implementation :: PyPy", - "Topic :: Database"], - cmdclass={"build_ext": custom_build_ext, - "doc": doc, - "test": test}, - extras_require=extras_require, - **extra_opts -) +setup(cmdclass={"build_ext": custom_build_ext}, ext_modules=ext_modules) # type:ignore diff --git a/test/__init__.py b/test/__init__.py index 6f585f27eb..cea27c01f7 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -12,71 +12,118 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Test suite for pymongo, bson, and gridfs. -""" +"""Test suite for pymongo, bson, and gridfs.""" +from __future__ import annotations +import base64 +import gc +import multiprocessing import os +import signal import socket +import subprocess import sys import threading import time +import traceback import unittest import warnings try: import ipaddress + HAVE_IPADDRESS = True except ImportError: HAVE_IPADDRESS = False - from contextlib import contextmanager from functools import wraps +from test.version import Version +from typing import Any, Callable, Dict, Generator, no_type_check from unittest import SkipTest +from urllib.parse import quote_plus import pymongo import pymongo.errors - from bson.son import SON from pymongo import common, message from pymongo.common import partition_node -from pymongo.ssl_support import HAVE_SSL, validate_cert_reqs -from test.version import Version +from pymongo.database import Database +from pymongo.hello import HelloCompat +from pymongo.mongo_client import MongoClient +from pymongo.server_api import ServerApi +from pymongo.ssl_support import HAVE_SSL, _ssl +from pymongo.uri_parser import parse_uri if HAVE_SSL: import ssl -try: - # Enable the fault handler to dump the traceback of each running thread - # after a segfault. - import faulthandler - faulthandler.enable() -except ImportError: - pass +# Enable debug output for uncollectable objects. PyPy does not have set_debug. +if hasattr(gc, "set_debug"): + gc.set_debug( + gc.DEBUG_UNCOLLECTABLE | getattr(gc, "DEBUG_OBJECTS", 0) | getattr(gc, "DEBUG_INSTANCES", 0) + ) # The host and port of a single mongod or mongos, or the seed host # for a replica set. -host = os.environ.get("DB_IP", 'localhost') +host = os.environ.get("DB_IP", "localhost") port = int(os.environ.get("DB_PORT", 27017)) db_user = os.environ.get("DB_USER", "user") db_pwd = os.environ.get("DB_PASSWORD", "password") -CERT_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), - 'certificates') -CLIENT_PEM = os.environ.get('CLIENT_PEM', - os.path.join(CERT_PATH, 'client.pem')) -CA_PEM = os.environ.get('CA_PEM', os.path.join(CERT_PATH, 'ca.pem')) -CERT_REQS = validate_cert_reqs('CERT_REQS', os.environ.get('CERT_REQS')) +CERT_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "certificates") +CLIENT_PEM = os.environ.get("CLIENT_PEM", os.path.join(CERT_PATH, "client.pem")) +CA_PEM = os.environ.get("CA_PEM", os.path.join(CERT_PATH, "ca.pem")) -_SSL_OPTIONS = dict(ssl=True) +TLS_OPTIONS: Dict = {"tls": True} if CLIENT_PEM: - _SSL_OPTIONS['ssl_certfile'] = CLIENT_PEM + TLS_OPTIONS["tlsCertificateKeyFile"] = CLIENT_PEM if CA_PEM: - _SSL_OPTIONS['ssl_ca_certs'] = CA_PEM -if CERT_REQS is not None: - _SSL_OPTIONS['ssl_cert_reqs'] = CERT_REQS + TLS_OPTIONS["tlsCAFile"] = CA_PEM COMPRESSORS = os.environ.get("COMPRESSORS") +MONGODB_API_VERSION = os.environ.get("MONGODB_API_VERSION") +TEST_LOADBALANCER = bool(os.environ.get("TEST_LOADBALANCER")) +TEST_SERVERLESS = bool(os.environ.get("TEST_SERVERLESS")) +SINGLE_MONGOS_LB_URI = os.environ.get("SINGLE_MONGOS_LB_URI") +MULTI_MONGOS_LB_URI = os.environ.get("MULTI_MONGOS_LB_URI") + +if TEST_LOADBALANCER: + res = parse_uri(SINGLE_MONGOS_LB_URI or "") + host, port = res["nodelist"][0] + db_user = res["username"] or db_user + db_pwd = res["password"] or db_pwd +elif TEST_SERVERLESS: + TEST_LOADBALANCER = True + res = parse_uri(SINGLE_MONGOS_LB_URI or "") + host, port = res["nodelist"][0] + db_user = res["username"] or db_user + db_pwd = res["password"] or db_pwd + TLS_OPTIONS = {"tls": True} + # Spec says serverless tests must be run with compression. + COMPRESSORS = COMPRESSORS or "zlib" + + +# Shared KMS data. +LOCAL_MASTER_KEY = base64.b64decode( + b"Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ" + b"5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" +) +AWS_CREDS = { + "accessKeyId": os.environ.get("FLE_AWS_KEY", ""), + "secretAccessKey": os.environ.get("FLE_AWS_SECRET", ""), +} +AZURE_CREDS = { + "tenantId": os.environ.get("FLE_AZURE_TENANTID", ""), + "clientId": os.environ.get("FLE_AZURE_CLIENTID", ""), + "clientSecret": os.environ.get("FLE_AZURE_CLIENTSECRET", ""), +} +GCP_CREDS = { + "email": os.environ.get("FLE_GCP_EMAIL", ""), + "privateKey": os.environ.get("FLE_GCP_PRIVATEKEY", ""), +} +KMIP_CREDS = {"endpoint": os.environ.get("FLE_KMIP_ENDPOINT", "localhost:5698")} + def is_server_resolvable(): """Returns True if 'server' is resolvable.""" @@ -84,31 +131,32 @@ def is_server_resolvable(): socket.setdefaulttimeout(1) try: try: - socket.gethostbyname('server') + socket.gethostbyname("server") return True - except socket.error: + except OSError: return False finally: socket.setdefaulttimeout(socket_timeout) def _create_user(authdb, user, pwd=None, roles=None, **kwargs): - cmd = SON([('createUser', user)]) + cmd = SON([("createUser", user)]) # X509 doesn't use a password if pwd: - cmd['pwd'] = pwd - cmd['roles'] = roles or ['root'] + cmd["pwd"] = pwd + cmd["roles"] = roles or ["root"] cmd.update(**kwargs) return authdb.command(cmd) -class client_knobs(object): +class client_knobs: def __init__( - self, - heartbeat_frequency=None, - min_heartbeat_interval=None, - kill_cursor_frequency=None, - events_queue_frequency=None): + self, + heartbeat_frequency=None, + min_heartbeat_interval=None, + kill_cursor_frequency=None, + events_queue_frequency=None, + ): self.heartbeat_frequency = heartbeat_frequency self.min_heartbeat_interval = min_heartbeat_interval self.kill_cursor_frequency = kill_cursor_frequency @@ -118,6 +166,8 @@ def __init__( self.old_min_heartbeat_interval = None self.old_kill_cursor_frequency = None self.old_events_queue_frequency = None + self._enabled = False + self._stack = None def enable(self): self.old_heartbeat_frequency = common.HEARTBEAT_FREQUENCY @@ -136,25 +186,60 @@ def enable(self): if self.events_queue_frequency is not None: common.EVENTS_QUEUE_FREQUENCY = self.events_queue_frequency + self._enabled = True + # Store the allocation traceback to catch non-disabled client_knobs. + self._stack = "".join(traceback.format_stack()) def __enter__(self): self.enable() + @no_type_check def disable(self): common.HEARTBEAT_FREQUENCY = self.old_heartbeat_frequency common.MIN_HEARTBEAT_INTERVAL = self.old_min_heartbeat_interval common.KILL_CURSOR_FREQUENCY = self.old_kill_cursor_frequency common.EVENTS_QUEUE_FREQUENCY = self.old_events_queue_frequency + self._enabled = False def __exit__(self, exc_type, exc_val, exc_tb): self.disable() + def __call__(self, func): + def make_wrapper(f): + @wraps(f) + def wrap(*args, **kwargs): + with self: + return f(*args, **kwargs) + + return wrap + + return make_wrapper(func) + + def __del__(self): + if self._enabled: + msg = ( + "ERROR: client_knobs still enabled! HEARTBEAT_FREQUENCY={}, " + "MIN_HEARTBEAT_INTERVAL={}, KILL_CURSOR_FREQUENCY={}, " + "EVENTS_QUEUE_FREQUENCY={}, stack:\n{}".format( + common.HEARTBEAT_FREQUENCY, + common.MIN_HEARTBEAT_INTERVAL, + common.KILL_CURSOR_FREQUENCY, + common.EVENTS_QUEUE_FREQUENCY, + self._stack, + ) + ) + self.disable() + raise Exception(msg) + def _all_users(db): - return set(u['user'] for u in db.command('usersInfo').get('users', [])) + return {u["user"] for u in db.command("usersInfo").get("users", [])} + +class ClientContext: + client: MongoClient -class ClientContext(object): + MULTI_MONGOS_LB_URI = MULTI_MONGOS_LB_URI def __init__(self): """Create a client and grab essential information from the server.""" @@ -168,104 +253,161 @@ def __init__(self): self.version = Version(-1) # Needs to be comparable with Version self.auth_enabled = False self.test_commands_enabled = False + self.server_parameters = {} + self._hello = None self.is_mongos = False self.mongoses = [] self.is_rs = False self.has_ipv6 = False - self.ssl = False - self.ssl_cert_none = False - self.ssl_certfile = False + self.tls = False + self.tlsCertificateKeyFile = False self.server_is_resolvable = is_server_resolvable() - self.default_client_options = {} + self.default_client_options: Dict = {} self.sessions_enabled = False - self.client = None + self.client = None # type: ignore self.conn_lock = threading.Lock() - + self.is_data_lake = False + self.load_balancer = TEST_LOADBALANCER + self.serverless = TEST_SERVERLESS + if self.load_balancer or self.serverless: + self.default_client_options["loadBalanced"] = True if COMPRESSORS: self.default_client_options["compressors"] = COMPRESSORS + if MONGODB_API_VERSION: + server_api = ServerApi(MONGODB_API_VERSION) + self.default_client_options["server_api"] = server_api @property - def ismaster(self): - return self.client.admin.command('isMaster') + def client_options(self): + """Return the MongoClient options for creating a duplicate client.""" + opts = client_context.default_client_options.copy() + if client_context.auth_enabled: + opts["username"] = db_user + opts["password"] = db_pwd + if self.replica_set_name: + opts["replicaSet"] = self.replica_set_name + return opts + + @property + def uri(self): + """Return the MongoClient URI for creating a duplicate client.""" + opts = client_context.default_client_options.copy() + opts.pop("server_api", None) # Cannot be set from the URI + opts_parts = [] + for opt, val in opts.items(): + strval = str(val) + if isinstance(val, bool): + strval = strval.lower() + opts_parts.append(f"{opt}={quote_plus(strval)}") + opts_part = "&".join(opts_parts) + auth_part = "" + if client_context.auth_enabled: + auth_part = f"{quote_plus(db_user)}:{quote_plus(db_pwd)}@" + return f"mongodb://{auth_part}{self.pair}/?{opts_part}" + + @property + def hello(self): + if not self._hello: + if self.serverless or self.load_balancer: + self._hello = self.client.admin.command(HelloCompat.CMD) + else: + self._hello = self.client.admin.command(HelloCompat.LEGACY_CMD) + return self._hello def _connect(self, host, port, **kwargs): - # Jython takes a long time to connect. - if sys.platform.startswith('java'): - timeout_ms = 10000 - else: - timeout_ms = 5000 - if COMPRESSORS: - kwargs["compressors"] = COMPRESSORS - client = pymongo.MongoClient( - host, port, serverSelectionTimeoutMS=timeout_ms, **kwargs) + kwargs.update(self.default_client_options) + client: MongoClient = pymongo.MongoClient( + host, port, serverSelectionTimeoutMS=5000, **kwargs + ) try: try: - client.admin.command('isMaster') # Can we connect? + client.admin.command("ping") # Can we connect? except pymongo.errors.OperationFailure as exc: # SERVER-32063 self.connection_attempts.append( - 'connected client %r, but isMaster failed: %s' % ( - client, exc)) + f"connected client {client!r}, but legacy hello failed: {exc}" + ) else: - self.connection_attempts.append( - 'successfully connected client %r' % (client,)) + self.connection_attempts.append(f"successfully connected client {client!r}") # If connected, then return client with default timeout return pymongo.MongoClient(host, port, **kwargs) except pymongo.errors.ConnectionFailure as exc: - self.connection_attempts.append( - 'failed to connect client %r: %s' % (client, exc)) + self.connection_attempts.append(f"failed to connect client {client!r}: {exc}") return None + finally: + client.close() def _init_client(self): self.client = self._connect(host, port) + if self.client is not None: + # Return early when connected to dataLake as mongohoused does not + # support the getCmdLineOpts command and is tested without TLS. + build_info: Any = self.client.admin.command("buildInfo") + if "dataLake" in build_info: + self.is_data_lake = True + self.auth_enabled = True + self.client = self._connect(host, port, username=db_user, password=db_pwd) + self.connected = True + return + if HAVE_SSL and not self.client: # Is MongoDB configured for SSL? - self.client = self._connect(host, port, **_SSL_OPTIONS) + self.client = self._connect(host, port, **TLS_OPTIONS) if self.client: - self.ssl = True - self.default_client_options.update(_SSL_OPTIONS) - self.ssl_certfile = True - if _SSL_OPTIONS.get('ssl_cert_reqs') == ssl.CERT_NONE: - self.ssl_cert_none = True + self.tls = True + self.default_client_options.update(TLS_OPTIONS) + self.tlsCertificateKeyFile = True if self.client: self.connected = True - try: - self.cmd_line = self.client.admin.command('getCmdLineOpts') - except pymongo.errors.OperationFailure as e: - msg = e.details.get('errmsg', '') - if e.code == 13 or 'unauthorized' in msg or 'login' in msg: - # Unauthorized. - self.auth_enabled = True - else: - raise + if self.serverless: + self.auth_enabled = True else: - self.auth_enabled = self._server_started_with_auth() + try: + self.cmd_line = self.client.admin.command("getCmdLineOpts") + except pymongo.errors.OperationFailure as e: + assert e.details is not None + msg = e.details.get("errmsg", "") + if e.code == 13 or "unauthorized" in msg or "login" in msg: + # Unauthorized. + self.auth_enabled = True + else: + raise + else: + self.auth_enabled = self._server_started_with_auth() if self.auth_enabled: - # See if db_user already exists. - if not self._check_user_provided(): - _create_user(self.client.admin, db_user, db_pwd) + if not self.serverless: + # See if db_user already exists. + if not self._check_user_provided(): + _create_user(self.client.admin, db_user, db_pwd) self.client = self._connect( - host, port, username=db_user, password=db_pwd, + host, + port, + username=db_user, + password=db_pwd, replicaSet=self.replica_set_name, - **self.default_client_options) + **self.default_client_options, + ) # May not have this if OperationFailure was raised earlier. - self.cmd_line = self.client.admin.command('getCmdLineOpts') + self.cmd_line = self.client.admin.command("getCmdLineOpts") - self.server_status = self.client.admin.command('serverStatus') - if self.storage_engine == "mmapv1": - # MMAPv1 does not support retryWrites=True. - self.default_client_options['retryWrites'] = False + if self.serverless: + self.server_status = {} + else: + self.server_status = self.client.admin.command("serverStatus") + if self.storage_engine == "mmapv1": + # MMAPv1 does not support retryWrites=True. + self.default_client_options["retryWrites"] = False - ismaster = self.ismaster - self.sessions_enabled = 'logicalSessionTimeoutMinutes' in ismaster + hello = self.hello + self.sessions_enabled = "logicalSessionTimeoutMinutes" in hello - if 'setName' in ismaster: - self.replica_set_name = str(ismaster['setName']) + if "setName" in hello: + self.replica_set_name = str(hello["setName"]) self.is_rs = True if self.auth_enabled: # It doesn't matter which member we use as the seed here. @@ -275,52 +417,60 @@ def _init_client(self): username=db_user, password=db_pwd, replicaSet=self.replica_set_name, - **self.default_client_options) + **self.default_client_options, + ) else: self.client = pymongo.MongoClient( - host, - port, - replicaSet=self.replica_set_name, - **self.default_client_options) - - # Get the authoritative ismaster result from the primary. - ismaster = self.ismaster - nodes = [partition_node(node.lower()) - for node in ismaster.get('hosts', [])] - nodes.extend([partition_node(node.lower()) - for node in ismaster.get('passives', [])]) - nodes.extend([partition_node(node.lower()) - for node in ismaster.get('arbiters', [])]) + host, port, replicaSet=self.replica_set_name, **self.default_client_options + ) + + # Get the authoritative hello result from the primary. + self._hello = None + hello = self.hello + nodes = [partition_node(node.lower()) for node in hello.get("hosts", [])] + nodes.extend([partition_node(node.lower()) for node in hello.get("passives", [])]) + nodes.extend([partition_node(node.lower()) for node in hello.get("arbiters", [])]) self.nodes = set(nodes) else: - self.nodes = set([(host, port)]) - self.w = len(ismaster.get("hosts", [])) or 1 + self.nodes = {(host, port)} + self.w = len(hello.get("hosts", [])) or 1 self.version = Version.from_client(self.client) - if 'enableTestCommands=1' in self.cmd_line['argv']: + if self.serverless: + self.server_parameters = { + "requireApiVersion": False, + "enableTestCommands": True, + } self.test_commands_enabled = True - elif 'parsed' in self.cmd_line: - params = self.cmd_line['parsed'].get('setParameter', []) - if 'enableTestCommands=1' in params: + self.has_ipv6 = False + else: + self.server_parameters = self.client.admin.command("getParameter", "*") + assert self.cmd_line is not None + if "enableTestCommands=1" in self.cmd_line["argv"]: self.test_commands_enabled = True - else: - params = self.cmd_line['parsed'].get('setParameter', {}) - if params.get('enableTestCommands') == '1': + elif "parsed" in self.cmd_line: + params = self.cmd_line["parsed"].get("setParameter", []) + if "enableTestCommands=1" in params: self.test_commands_enabled = True + else: + params = self.cmd_line["parsed"].get("setParameter", {}) + if params.get("enableTestCommands") == "1": + self.test_commands_enabled = True + self.has_ipv6 = self._server_started_with_ipv6() - self.is_mongos = (self.ismaster.get('msg') == 'isdbgrid') - self.has_ipv6 = self._server_started_with_ipv6() + self.is_mongos = self.hello.get("msg") == "isdbgrid" if self.is_mongos: - # Check for another mongos on the next port. address = self.client.address - next_address = address[0], address[1] + 1 self.mongoses.append(address) - mongos_client = self._connect(*next_address, - **self.default_client_options) - if mongos_client: - ismaster = mongos_client.admin.command('ismaster') - if ismaster.get('msg') == 'isdbgrid': - self.mongoses.append(next_address) + if not self.serverless: + # Check for another mongos on the next port. + assert address is not None + next_address = address[0], address[1] + 1 + mongos_client = self._connect(*next_address, **self.default_client_options) + if mongos_client: + hello = mongos_client.admin.command(HelloCompat.LEGACY_CMD) + if hello.get("msg") == "isdbgrid": + self.mongoses.append(next_address) def init(self): with self.conn_lock: @@ -328,7 +478,7 @@ def init(self): self._init_client() def connection_attempt_info(self): - return '\n'.join(self.connection_attempts) + return "\n".join(self.connection_attempts) @property def host(self): @@ -357,57 +507,64 @@ def has_secondaries(self): @property def storage_engine(self): try: - return self.server_status.get("storageEngine", {}).get("name") + return self.server_status.get("storageEngine", {}).get( # type:ignore[union-attr] + "name" + ) except AttributeError: # Raised if self.server_status is None. return None def _check_user_provided(self): """Return True if db_user/db_password is already an admin user.""" - client = pymongo.MongoClient( - host, port, + client: MongoClient = pymongo.MongoClient( + host, + port, username=db_user, password=db_pwd, - serverSelectionTimeoutMS=100, - **self.default_client_options) + **self.default_client_options, + ) try: return db_user in _all_users(client.admin) except pymongo.errors.OperationFailure as e: - msg = e.details.get('errmsg', '') - if e.code == 18 or 'auth fails' in msg: + assert e.details is not None + msg = e.details.get("errmsg", "") + if e.code == 18 or "auth fails" in msg: # Auth failed. return False else: raise + finally: + client.close() def _server_started_with_auth(self): # MongoDB >= 2.0 - if 'parsed' in self.cmd_line: - parsed = self.cmd_line['parsed'] + assert self.cmd_line is not None + if "parsed" in self.cmd_line: + parsed = self.cmd_line["parsed"] # MongoDB >= 2.6 - if 'security' in parsed: - security = parsed['security'] + if "security" in parsed: + security = parsed["security"] # >= rc3 - if 'authorization' in security: - return security['authorization'] == 'enabled' + if "authorization" in security: + return security["authorization"] == "enabled" # < rc3 - return (security.get('auth', False) or - bool(security.get('keyFile'))) - return parsed.get('auth', False) or bool(parsed.get('keyFile')) + return security.get("auth", False) or bool(security.get("keyFile")) + return parsed.get("auth", False) or bool(parsed.get("keyFile")) # Legacy - argv = self.cmd_line['argv'] - return '--auth' in argv or '--keyFile' in argv + argv = self.cmd_line["argv"] + return "--auth" in argv or "--keyFile" in argv def _server_started_with_ipv6(self): if not socket.has_ipv6: return False - if 'parsed' in self.cmd_line: - if not self.cmd_line['parsed'].get('net', {}).get('ipv6'): + assert self.cmd_line is not None + if "parsed" in self.cmd_line: + if not self.cmd_line["parsed"].get("net", {}).get("ipv6"): return False else: - if '--ipv6' not in self.cmd_line['argv']: + if "--ipv6" not in self.cmd_line["argv"]: return False # The server was started with --ipv6. Is there an IPv6 route to it? @@ -415,7 +572,7 @@ def _server_started_with_ipv6(self): for info in socket.getaddrinfo(self.host, self.port): if info[0] == socket.AF_INET6: return True - except socket.error: + except OSError: pass return False @@ -427,211 +584,314 @@ def wrap(*args, **kwargs): self.init() # Always raise SkipTest if we can't connect to MongoDB if not self.connected: - raise SkipTest( - "Cannot connect to MongoDB on %s" % (self.pair,)) + raise SkipTest(f"Cannot connect to MongoDB on {self.pair}") if condition(): return f(*args, **kwargs) raise SkipTest(msg) + return wrap if func is None: + def decorate(f): return make_wrapper(f) + return decorate return make_wrapper(func) def create_user(self, dbname, user, pwd=None, roles=None, **kwargs): - kwargs['writeConcern'] = {'w': self.w} + kwargs["writeConcern"] = {"w": self.w} return _create_user(self.client[dbname], user, pwd, roles, **kwargs) def drop_user(self, dbname, user): - self.client[dbname].command( - 'dropUser', user, writeConcern={'w': self.w}) + self.client[dbname].command("dropUser", user, writeConcern={"w": self.w}) def require_connection(self, func): """Run a test only if we can connect to MongoDB.""" return self._require( lambda: True, # _require checks if we're connected - "Cannot connect to MongoDB on %s" % (self.pair,), - func=func) + f"Cannot connect to MongoDB on {self.pair}", + func=func, + ) + + def require_data_lake(self, func): + """Run a test only if we are connected to Atlas Data Lake.""" + return self._require( + lambda: self.is_data_lake, + f"Not connected to Atlas Data Lake on {self.pair}", + func=func, + ) def require_no_mmap(self, func): """Run a test only if the server is not using the MMAPv1 storage engine. Only works for standalone and replica sets; tests are - run regardless of storage engine on sharded clusters. """ + run regardless of storage engine on sharded clusters. + """ + def is_not_mmap(): if self.is_mongos: return True - return self.storage_engine != 'mmapv1' + return self.storage_engine != "mmapv1" - return self._require( - is_not_mmap, "Storage engine must not be MMAPv1", func=func) + return self._require(is_not_mmap, "Storage engine must not be MMAPv1", func=func) def require_version_min(self, *ver): """Run a test only if the server version is at least ``version``.""" other_version = Version(*ver) - return self._require(lambda: self.version >= other_version, - "Server version must be at least %s" - % str(other_version)) + return self._require( + lambda: self.version >= other_version, + "Server version must be at least %s" % str(other_version), + ) def require_version_max(self, *ver): """Run a test only if the server version is at most ``version``.""" other_version = Version(*ver) - return self._require(lambda: self.version <= other_version, - "Server version must be at most %s" - % str(other_version)) + return self._require( + lambda: self.version <= other_version, + "Server version must be at most %s" % str(other_version), + ) def require_auth(self, func): """Run a test only if the server is running with auth enabled.""" - return self.check_auth_with_sharding( - self._require(lambda: self.auth_enabled, - "Authentication is not enabled on the server", - func=func)) + return self._require( + lambda: self.auth_enabled, "Authentication is not enabled on the server", func=func + ) def require_no_auth(self, func): """Run a test only if the server is running without auth enabled.""" - return self._require(lambda: not self.auth_enabled, - "Authentication must not be enabled on the server", - func=func) + return self._require( + lambda: not self.auth_enabled, + "Authentication must not be enabled on the server", + func=func, + ) def require_replica_set(self, func): """Run a test only if the client is connected to a replica set.""" - return self._require(lambda: self.is_rs, - "Not connected to a replica set", - func=func) + return self._require(lambda: self.is_rs, "Not connected to a replica set", func=func) def require_secondaries_count(self, count): """Run a test only if the client is connected to a replica set that has `count` secondaries. """ + def sec_count(): return 0 if not self.client else len(self.client.secondaries) - return self._require(lambda: sec_count() >= count, - "Not enough secondaries available") + + return self._require(lambda: sec_count() >= count, "Not enough secondaries available") + + @property + def supports_secondary_read_pref(self): + if self.has_secondaries: + return True + if self.is_mongos: + shard = self.client.config.shards.find_one()["host"] # type:ignore[index] + num_members = shard.count(",") + 1 + return num_members > 1 + return False + + def require_secondary_read_pref(self): + """Run a test only if the client is connected to a cluster that + supports secondary read preference + """ + return self._require( + lambda: self.supports_secondary_read_pref, + "This cluster does not support secondary read preference", + ) def require_no_replica_set(self, func): """Run a test if the client is *not* connected to a replica set.""" return self._require( - lambda: not self.is_rs, - "Connected to a replica set, not a standalone mongod", - func=func) + lambda: not self.is_rs, "Connected to a replica set, not a standalone mongod", func=func + ) def require_ipv6(self, func): """Run a test only if the client can connect to a server via IPv6.""" - return self._require(lambda: self.has_ipv6, - "No IPv6", - func=func) + return self._require(lambda: self.has_ipv6, "No IPv6", func=func) def require_no_mongos(self, func): """Run a test only if the client is not connected to a mongos.""" - return self._require(lambda: not self.is_mongos, - "Must be connected to a mongod, not a mongos", - func=func) + return self._require( + lambda: not self.is_mongos, "Must be connected to a mongod, not a mongos", func=func + ) def require_mongos(self, func): """Run a test only if the client is connected to a mongos.""" - return self._require(lambda: self.is_mongos, - "Must be connected to a mongos", - func=func) + return self._require(lambda: self.is_mongos, "Must be connected to a mongos", func=func) def require_multiple_mongoses(self, func): """Run a test only if the client is connected to a sharded cluster - that has 2 mongos nodes.""" - return self._require(lambda: len(self.mongoses) > 1, - "Must have multiple mongoses available", - func=func) + that has 2 mongos nodes. + """ + return self._require( + lambda: len(self.mongoses) > 1, "Must have multiple mongoses available", func=func + ) def require_standalone(self, func): """Run a test only if the client is connected to a standalone.""" - return self._require(lambda: not (self.is_mongos or self.is_rs), - "Must be connected to a standalone", - func=func) + return self._require( + lambda: not (self.is_mongos or self.is_rs), + "Must be connected to a standalone", + func=func, + ) def require_no_standalone(self, func): """Run a test only if the client is not connected to a standalone.""" - return self._require(lambda: self.is_mongos or self.is_rs, - "Must be connected to a replica set or mongos", - func=func) - - def check_auth_with_sharding(self, func): - """Skip a test when connected to mongos < 2.0 and running with auth.""" - condition = lambda: not (self.auth_enabled and - self.is_mongos and self.version < (2,)) - return self._require(condition, - "Auth with sharding requires MongoDB >= 2.0.0", - func=func) + return self._require( + lambda: self.is_mongos or self.is_rs, + "Must be connected to a replica set or mongos", + func=func, + ) + + def require_load_balancer(self, func): + """Run a test only if the client is connected to a load balancer.""" + return self._require( + lambda: self.load_balancer, "Must be connected to a load balancer", func=func + ) + + def require_no_load_balancer(self, func): + """Run a test only if the client is not connected to a load balancer.""" + return self._require( + lambda: not self.load_balancer, "Must not be connected to a load balancer", func=func + ) + + def require_no_serverless(self, func): + """Run a test only if the client is not connected to serverless.""" + return self._require( + lambda: not self.serverless, "Must not be connected to serverless", func=func + ) + + def require_change_streams(self, func): + """Run a test only if the server supports change streams.""" + return self.require_no_mmap(self.require_no_standalone(self.require_no_serverless(func))) def is_topology_type(self, topologies): - if 'single' in topologies and not (self.is_mongos or self.is_rs): + unknown = set(topologies) - { + "single", + "replicaset", + "sharded", + "sharded-replicaset", + "load-balanced", + } + if unknown: + raise AssertionError(f"Unknown topologies: {unknown!r}") + if self.load_balancer: + if "load-balanced" in topologies: + return True + return False + if "single" in topologies and not (self.is_mongos or self.is_rs): return True - if 'replicaset' in topologies and self.is_rs: + if "replicaset" in topologies and self.is_rs: return True - if 'sharded' in topologies and self.is_mongos: + if "sharded" in topologies and self.is_mongos: + return True + if "sharded-replicaset" in topologies and self.is_mongos: + shards = list(client_context.client.config.shards.find()) + for shard in shards: + # For a 3-member RS-backed sharded cluster, shard['host'] + # will be 'replicaName/ip1:port1,ip2:port2,ip3:port3' + # Otherwise it will be 'ip1:port1' + host_spec = shard["host"] + if not len(host_spec.split("/")) > 1: + return False return True return False - def require_cluster_type(self, topologies=[]): + def require_cluster_type(self, topologies=None): """Run a test only if the client is connected to a cluster that conforms to one of the specified topologies. Acceptable topologies - are 'single', 'replicaset', and 'sharded'.""" + are 'single', 'replicaset', and 'sharded'. + """ + topologies = topologies or [] + def _is_valid_topology(): return self.is_topology_type(topologies) - return self._require( - _is_valid_topology, - "Cluster type not in %s" % (topologies)) + + return self._require(_is_valid_topology, "Cluster type not in %s" % (topologies)) def require_test_commands(self, func): """Run a test only if the server has test commands enabled.""" - return self._require(lambda: self.test_commands_enabled, - "Test commands must be enabled", - func=func) + return self._require( + lambda: self.test_commands_enabled, "Test commands must be enabled", func=func + ) def require_failCommand_fail_point(self, func): """Run a test only if the server supports the failCommand fail - point.""" - return self._require(lambda: self.supports_failCommand_fail_point, - "failCommand fail point must be supported", - func=func) - - def require_ssl(self, func): - """Run a test only if the client can connect over SSL.""" - return self._require(lambda: self.ssl, - "Must be able to connect via SSL", - func=func) - - def require_no_ssl(self, func): - """Run a test only if the client can connect over SSL.""" - return self._require(lambda: not self.ssl, - "Must be able to connect without SSL", - func=func) - - def require_ssl_cert_none(self, func): - """Run a test only if the client can connect with ssl.CERT_NONE.""" - return self._require(lambda: self.ssl_cert_none, - "Must be able to connect with ssl.CERT_NONE", - func=func) - - def require_ssl_certfile(self, func): - """Run a test only if the client can connect with ssl_certfile.""" - return self._require(lambda: self.ssl_certfile, - "Must be able to connect with ssl_certfile", - func=func) + point. + """ + return self._require( + lambda: self.supports_failCommand_fail_point, + "failCommand fail point must be supported", + func=func, + ) + + def require_failCommand_appName(self, func): + """Run a test only if the server supports the failCommand appName.""" + # SERVER-47195 + return self._require( + lambda: (self.test_commands_enabled and self.version >= (4, 4, -1)), + "failCommand appName must be supported", + func=func, + ) + + def require_failCommand_blockConnection(self, func): + """Run a test only if the server supports failCommand blockConnection.""" + return self._require( + lambda: ( + self.test_commands_enabled + and ( + (not self.is_mongos and self.version >= (4, 2, 9)) + or (self.is_mongos and self.version >= (4, 4)) + ) + ), + "failCommand blockConnection is not supported", + func=func, + ) + + def require_tls(self, func): + """Run a test only if the client can connect over TLS.""" + return self._require(lambda: self.tls, "Must be able to connect via TLS", func=func) + + def require_no_tls(self, func): + """Run a test only if the client can connect over TLS.""" + return self._require(lambda: not self.tls, "Must be able to connect without TLS", func=func) + + def require_tlsCertificateKeyFile(self, func): + """Run a test only if the client can connect with tlsCertificateKeyFile.""" + return self._require( + lambda: self.tlsCertificateKeyFile, + "Must be able to connect with tlsCertificateKeyFile", + func=func, + ) def require_server_resolvable(self, func): """Run a test only if the hostname 'server' is resolvable.""" - return self._require(lambda: self.server_is_resolvable, - "No hosts entry for 'server'. Cannot validate " - "hostname in the certificate", - func=func) + return self._require( + lambda: self.server_is_resolvable, + "No hosts entry for 'server'. Cannot validate hostname in the certificate", + func=func, + ) def require_sessions(self, func): """Run a test only if the deployment supports sessions.""" - return self._require(lambda: self.sessions_enabled, - "Sessions not supported", - func=func) + return self._require(lambda: self.sessions_enabled, "Sessions not supported", func=func) + + def supports_retryable_writes(self): + if self.storage_engine == "mmapv1": + return False + if not self.sessions_enabled: + return False + return self.is_mongos or self.is_rs + + def require_retryable_writes(self, func): + """Run a test only if the deployment supports retryable writes.""" + return self._require( + self.supports_retryable_writes, + "This server does not support retryable writes", + func=func, + ) def supports_transactions(self): - if self.storage_engine == 'mmapv1': + if self.storage_engine == "mmapv1": return False if self.version.at_least(4, 1, 8): @@ -647,33 +907,28 @@ def require_transactions(self, func): *Might* because this does not test the storage engine or FCV. """ - return self._require(self.supports_transactions, - "Transactions are not supported", - func=func) - - def mongos_seeds(self): - return ','.join('%s:%s' % address for address in self.mongoses) + return self._require( + self.supports_transactions, "Transactions are not supported", func=func + ) - @property - def supports_reindex(self): - """Does the connected server support reindex?""" - return not (self.version.at_least(4, 1, 0) and self.is_mongos) + def require_no_api_version(self, func): + """Skip this test when testing with requireApiVersion.""" + return self._require( + lambda: not MONGODB_API_VERSION, + "This test does not work with requireApiVersion", + func=func, + ) - @property - def supports_getpreverror(self): - """Does the connected server support getpreverror?""" - return not (self.version.at_least(4, 1, 0) or self.is_mongos) + def mongos_seeds(self): + return ",".join("{}:{}".format(*address) for address in self.mongoses) @property def supports_failCommand_fail_point(self): """Does the server support the failCommand fail point?""" if self.is_mongos: - return (self.version.at_least(4, 1, 5) and - self.test_commands_enabled) + return self.version.at_least(4, 1, 5) and self.test_commands_enabled else: - return (self.version.at_least(4, 0) and - self.test_commands_enabled) - + return self.version.at_least(4, 0) and self.test_commands_enabled @property def requires_hint_with_min_max_queries(self): @@ -681,6 +936,14 @@ def requires_hint_with_min_max_queries(self): # Changed in SERVER-39567. return self.version.at_least(4, 1, 10) + @property + def max_bson_size(self): + return self.hello["maxBsonObjectSize"] + + @property + def max_write_batch_size(self): + return self.hello["maxWriteBatchSize"] + # Reusable client context client_context = ClientContext() @@ -688,10 +951,13 @@ def requires_hint_with_min_max_queries(self): def sanitize_cmd(cmd): cp = cmd.copy() - cp.pop('$clusterTime', None) - cp.pop('$db', None) - cp.pop('$readPreference', None) - cp.pop('lsid', None) + cp.pop("$clusterTime", None) + cp.pop("$db", None) + cp.pop("$readPreference", None) + cp.pop("lsid", None) + if MONGODB_API_VERSION: + # Stable API parameters + cp.pop("apiVersion", None) # OP_MSG encoding may move the payload type one field to the # end of the command. Do the same here. name = next(iter(cp)) @@ -706,8 +972,8 @@ def sanitize_cmd(cmd): def sanitize_reply(reply): cp = reply.copy() - cp.pop('$clusterTime', None) - cp.pop('operationTime', None) + cp.pop("$clusterTime", None) + cp.pop("operationTime", None) return cp @@ -718,35 +984,131 @@ def assertEqualCommand(self, expected, actual, msg=None): def assertEqualReply(self, expected, actual, msg=None): self.assertEqual(sanitize_reply(expected), sanitize_reply(actual), msg) + @contextmanager + def fail_point(self, command_args): + cmd_on = SON([("configureFailPoint", "failCommand")]) + cmd_on.update(command_args) + client_context.client.admin.command(cmd_on) + try: + yield + finally: + client_context.client.admin.command( + "configureFailPoint", cmd_on["configureFailPoint"], mode="off" + ) + + @contextmanager + def fork( + self, target: Callable, timeout: float = 60 + ) -> Generator[multiprocessing.Process, None, None]: + """Helper for tests that use os.fork() + + Use in a with statement: + + with self.fork(target=lambda: print('in child')) as proc: + self.assertTrue(proc.pid) # Child process was started + """ + + def _print_threads(*args: object) -> None: + if _print_threads.called: # type:ignore[attr-defined] + return + _print_threads.called = True # type:ignore[attr-defined] + print_thread_tracebacks() + + _print_threads.called = False # type:ignore[attr-defined] + + def _target() -> None: + signal.signal(signal.SIGUSR1, _print_threads) + try: + target() + except Exception as exc: + sys.stderr.write(f"Child process failed with: {exc}\n") + _print_threads() + # Sleep for a while to let the parent attach via GDB. + time.sleep(2 * timeout) + raise + + ctx = multiprocessing.get_context("fork") + proc = ctx.Process(target=_target) + proc.start() + try: + yield proc # type: ignore + finally: + proc.join(timeout) + pid = proc.pid + assert pid + if proc.exitcode is None: + # gdb to get C-level tracebacks + print_thread_stacks(pid) + # If it failed, SIGUSR1 to get thread tracebacks. + os.kill(pid, signal.SIGUSR1) + proc.join(5) + if proc.exitcode is None: + # SIGINT to get main thread traceback in case SIGUSR1 didn't work. + os.kill(pid, signal.SIGINT) + proc.join(5) + if proc.exitcode is None: + # SIGKILL in case SIGINT didn't work. + proc.kill() + proc.join(1) + self.fail(f"child timed out after {timeout}s (see traceback in logs): deadlock?") + self.assertEqual(proc.exitcode, 0) + + +def print_thread_tracebacks() -> None: + """Print all Python thread tracebacks.""" + for thread_id, frame in sys._current_frames().items(): + sys.stderr.write(f"\n--- Traceback for thread {thread_id} ---\n") + traceback.print_stack(frame, file=sys.stderr) + + +def print_thread_stacks(pid: int) -> None: + """Print all C-level thread stacks for a given process id.""" + if sys.platform == "darwin": + cmd = ["lldb", "--attach-pid", f"{pid}", "--batch", "--one-line", '"thread backtrace all"'] + else: + cmd = ["gdb", f"--pid={pid}", "--batch", '--eval-command="thread apply all bt"'] + + try: + res = subprocess.run( + cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf-8" + ) + except Exception as exc: + sys.stderr.write(f"Could not print C-level thread stacks because {cmd[0]} failed: {exc}") + else: + sys.stderr.write(res.stdout) + class IntegrationTest(PyMongoTestCase): """Base class for TestCases that need a connection to MongoDB to pass.""" + client: MongoClient[dict] + db: Database + credentials: Dict[str, str] + @classmethod @client_context.require_connection def setUpClass(cls): + if client_context.load_balancer and not getattr(cls, "RUN_ON_LOAD_BALANCER", False): + raise SkipTest("this test does not support load balancers") + if client_context.serverless and not getattr(cls, "RUN_ON_SERVERLESS", False): + raise SkipTest("this test does not support serverless") cls.client = client_context.client cls.db = cls.client.pymongo_test if client_context.auth_enabled: - cls.credentials = {'username': db_user, 'password': db_pwd} + cls.credentials = {"username": db_user, "password": db_pwd} else: cls.credentials = {} - @contextmanager - def fail_point(self, command_args): - cmd_on = SON([('configureFailPoint', 'failCommand')]) - cmd_on.update(command_args) - self.client.admin.command(cmd_on) - try: - yield - finally: - self.client.admin.command( - 'configureFailPoint', cmd_on['configureFailPoint'], mode='off') + def cleanup_colls(self, *collections): + """Cleanup collections faster than drop_collection.""" + for c in collections: + c = self.client[c.database.name][c.name] + c.delete_many({}) + c.drop_indexes() -# Use assertRaisesRegex if available, otherwise use Python 2.7's -# deprecated assertRaisesRegexp, with a 'p'. -if not hasattr(unittest.TestCase, 'assertRaisesRegex'): - unittest.TestCase.assertRaisesRegex = unittest.TestCase.assertRaisesRegexp + def patch_system_certs(self, ca_certs): + patcher = SystemCertsPatcher(ca_certs) + self.addCleanup(patcher.disable) class MockClientTest(unittest.TestCase): @@ -758,44 +1120,108 @@ class MockClientTest(unittest.TestCase): The class temporarily overrides HEARTBEAT_FREQUENCY to speed up tests. """ + # MockClients tests that use replicaSet, directConnection=True, pass + # multiple seed addresses, or wait for heartbeat events are incompatible + # with loadBalanced=True. + @classmethod + @client_context.require_no_load_balancer + def setUpClass(cls): + pass + def setUp(self): - super(MockClientTest, self).setUp() + super().setUp() - self.client_knobs = client_knobs( - heartbeat_frequency=0.001, - min_heartbeat_interval=0.001) + self.client_knobs = client_knobs(heartbeat_frequency=0.001, min_heartbeat_interval=0.001) self.client_knobs.enable() def tearDown(self): self.client_knobs.disable() - super(MockClientTest, self).tearDown() + super().tearDown() + + +# Global knobs to speed up the test suite. +global_knobs = client_knobs(events_queue_frequency=0.05) def setup(): client_context.init() warnings.resetwarnings() warnings.simplefilter("always") + global_knobs.enable() -def teardown(): +def _get_executors(topology): + executors = [] + for server in topology._servers.values(): + # Some MockMonitor do not have an _executor. + if hasattr(server._monitor, "_executor"): + executors.append(server._monitor._executor) + if hasattr(server._monitor, "_rtt_monitor"): + executors.append(server._monitor._rtt_monitor._executor) + executors.append(topology._Topology__events_executor) + if topology._srv_monitor: + executors.append(topology._srv_monitor._executor) + + return [e for e in executors if e is not None] + + +def print_running_topology(topology): + running = [e for e in _get_executors(topology) if not e._stopped] + if running: + print( + "WARNING: found Topology with running threads:\n" + f" Threads: {running}\n" + f" Topology: {topology}\n" + f" Creation traceback:\n{topology._settings._stack}" + ) + + +def print_running_clients(): + from pymongo.topology import Topology + + processed = set() + # Avoid false positives on the main test client. + # XXX: Can be removed after PYTHON-1634 or PYTHON-1896. c = client_context.client - c.drop_database("pymongo-pooling-tests") - c.drop_database("pymongo_test") - c.drop_database("pymongo_test1") - c.drop_database("pymongo_test2") - c.drop_database("pymongo_test_mike") - c.drop_database("pymongo_test_bernie") - - -class PymongoTestRunner(unittest.TextTestRunner): - def run(self, test): - setup() - result = super(PymongoTestRunner, self).run(test) + if c: + processed.add(c._topology._topology_id) + # Call collect to manually cleanup any would-be gc'd clients to avoid + # false positives. + gc.collect() + for obj in gc.get_objects(): try: - teardown() - finally: - return result + if isinstance(obj, Topology): + # Avoid printing the same Topology multiple times. + if obj._topology_id in processed: + continue + print_running_topology(obj) + processed.add(obj._topology_id) + except ReferenceError: + pass + + +def teardown(): + global_knobs.disable() + garbage = [] + for g in gc.garbage: + garbage.append(f"GARBAGE: {g!r}") + garbage.append(f" gc.get_referents: {gc.get_referents(g)!r}") + garbage.append(f" gc.get_referrers: {gc.get_referrers(g)!r}") + if garbage: + raise AssertionError("\n".join(garbage)) + c = client_context.client + if c: + if not client_context.is_data_lake: + c.drop_database("pymongo-pooling-tests") + c.drop_database("pymongo_test") + c.drop_database("pymongo_test1") + c.drop_database("pymongo_test2") + c.drop_database("pymongo_test_mike") + c.drop_database("pymongo_test_bernie") + c.close() + + print_running_clients() def test_cases(suite): @@ -806,13 +1232,34 @@ def test_cases(suite): yield suite_or_case else: # unittest.TestSuite - for case in test_cases(suite_or_case): - yield case + yield from test_cases(suite_or_case) # Helper method to workaround https://bugs.python.org/issue21724 def clear_warning_registry(): """Clear the __warningregistry__ for all modules.""" - for name, module in list(sys.modules.items()): + for _, module in list(sys.modules.items()): if hasattr(module, "__warningregistry__"): - setattr(module, "__warningregistry__", {}) + module.__warningregistry__ = {} # type:ignore[attr-defined] + + +class SystemCertsPatcher: + def __init__(self, ca_certs): + if ( + ssl.OPENSSL_VERSION.lower().startswith("libressl") + and sys.platform == "darwin" + and not _ssl.IS_PYOPENSSL + ): + raise SkipTest( + "LibreSSL on OSX doesn't support setting CA certificates " + "using SSL_CERT_FILE environment variable." + ) + self.original_certs = os.environ.get("SSL_CERT_FILE") + # Tell OpenSSL where CA certificates live. + os.environ["SSL_CERT_FILE"] = ca_certs + + def disable(self): + if self.original_certs is None: + os.environ.pop("SSL_CERT_FILE") + else: + os.environ["SSL_CERT_FILE"] = self.original_certs diff --git a/test/atlas/test_connection.py b/test/atlas/test_connection.py index 4241f59232..2c45241ea8 100644 --- a/test/atlas/test_connection.py +++ b/test/atlas/test_connection.py @@ -13,57 +13,99 @@ # limitations under the License. """Test connections to various Atlas cluster types.""" +from __future__ import annotations import os -import ssl import sys import unittest +from collections import defaultdict sys.path[0:0] = [""] import pymongo - - -_REPL = os.environ.get("ATLAS_REPL") -_SHRD = os.environ.get("ATLAS_SHRD") -_FREE = os.environ.get("ATLAS_FREE") -_TLS11 = os.environ.get("ATLAS_TLS11") -_TLS12 = os.environ.get("ATLAS_TLS12") - - -def _connect(uri): +from pymongo.ssl_support import HAS_SNI + +URIS = { + "ATLAS_REPL": os.environ.get("ATLAS_REPL"), + "ATLAS_SHRD": os.environ.get("ATLAS_SHRD"), + "ATLAS_FREE": os.environ.get("ATLAS_FREE"), + "ATLAS_TLS11": os.environ.get("ATLAS_TLS11"), + "ATLAS_TLS12": os.environ.get("ATLAS_TLS12"), + "ATLAS_SERVERLESS": os.environ.get("ATLAS_SERVERLESS"), + "ATLAS_SRV_REPL": os.environ.get("ATLAS_SRV_REPL"), + "ATLAS_SRV_SHRD": os.environ.get("ATLAS_SRV_SHRD"), + "ATLAS_SRV_FREE": os.environ.get("ATLAS_SRV_FREE"), + "ATLAS_SRV_TLS11": os.environ.get("ATLAS_SRV_TLS11"), + "ATLAS_SRV_TLS12": os.environ.get("ATLAS_SRV_TLS12"), + "ATLAS_SRV_SERVERLESS": os.environ.get("ATLAS_SRV_SERVERLESS"), +} + + +def connect(uri): + if not uri: + raise Exception("Must set env variable to test.") client = pymongo.MongoClient(uri) # No TLS error - client.admin.command('ismaster') + client.admin.command("ping") # No auth error client.test.test.count_documents({}) class TestAtlasConnect(unittest.TestCase): - - @classmethod - def setUpClass(cls): - if not all([_REPL, _SHRD, _FREE]): - raise Exception( - "Must set ATLAS_REPL/SHRD/FREE env variables to test.") + @unittest.skipUnless(HAS_SNI, "Free tier requires SNI support") + def test_free_tier(self): + connect(URIS["ATLAS_FREE"]) def test_replica_set(self): - _connect(_REPL) + connect(URIS["ATLAS_REPL"]) def test_sharded_cluster(self): - _connect(_SHRD) - - def test_free_tier(self): - if not getattr(ssl, 'HAS_SNI', False): - raise unittest.SkipTest("Free tier requires SNI support.") - _connect(_FREE) + connect(URIS["ATLAS_SHRD"]) def test_tls_11(self): - _connect(_TLS11) + connect(URIS["ATLAS_TLS11"]) def test_tls_12(self): - _connect(_TLS12) + connect(URIS["ATLAS_TLS12"]) + + def test_serverless(self): + connect(URIS["ATLAS_SERVERLESS"]) + + def connect_srv(self, uri): + connect(uri) + self.assertIn("mongodb+srv://", uri) + + @unittest.skipUnless(HAS_SNI, "Free tier requires SNI support") + def test_srv_free_tier(self): + self.connect_srv(URIS["ATLAS_SRV_FREE"]) + + def test_srv_replica_set(self): + self.connect_srv(URIS["ATLAS_SRV_REPL"]) + + def test_srv_sharded_cluster(self): + self.connect_srv(URIS["ATLAS_SRV_SHRD"]) + + def test_srv_tls_11(self): + self.connect_srv(URIS["ATLAS_SRV_TLS11"]) + + def test_srv_tls_12(self): + self.connect_srv(URIS["ATLAS_SRV_TLS12"]) + + def test_srv_serverless(self): + self.connect_srv(URIS["ATLAS_SRV_SERVERLESS"]) + + def test_uniqueness(self): + """Ensure that we don't accidentally duplicate the test URIs.""" + uri_to_names = defaultdict(list) + for name, uri in URIS.items(): + if uri: + uri_to_names[uri].append(name) + duplicates = [names for names in uri_to_names.values() if len(names) > 1] + self.assertFalse( + duplicates, + f"Error: the following env variables have duplicate values: {duplicates}", + ) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/auth/connection-string.json b/test/auth/connection-string.json deleted file mode 100644 index 820ad853c7..0000000000 --- a/test/auth/connection-string.json +++ /dev/null @@ -1,453 +0,0 @@ -{ - "tests": [ - { - "description": "should use the default source and mechanism", - "uri": "mongodb://user:password@localhost", - "hosts": null, - "valid": true, - "warning": false, - "auth": { - "username": "user", - "password": "password", - "db": "admin" - }, - "options": null - }, - { - "description": "should use the database when no authSource is specified", - "uri": "mongodb://user:password@localhost/foo", - "hosts": null, - "valid": true, - "warning": false, - "auth": { - "username": "user", - "password": "password", - "db": "foo" - }, - "options": null - }, - { - "description": "should use the authSource when specified", - "uri": "mongodb://user:password@localhost/foo?authSource=bar", - "hosts": null, - "valid": true, - "warning": false, - "auth": { - "username": "user", - "password": "password", - "db": "bar" - }, - "options": null - }, - { - "description": "should recognise the mechanism (GSSAPI)", - "uri": "mongodb://user%40DOMAIN.COM@localhost/?authMechanism=GSSAPI", - "hosts": null, - "valid": true, - "warning": false, - "auth": { - "username": "user@DOMAIN.COM", - "password": null, - "db": "$external" - }, - "options": { - "authmechanism": "GSSAPI" - } - }, - { - "description": "should ignore the database (GSSAPI)", - "uri": "mongodb://user%40DOMAIN.COM@localhost/foo?authMechanism=GSSAPI", - "hosts": null, - "valid": true, - "warning": false, - "auth": { - "username": "user@DOMAIN.COM", - "password": null, - "db": "$external" - }, - "options": { - "authmechanism": "GSSAPI" - } - }, - { - "description": "should accept valid authSource (GSSAPI)", - "uri": "mongodb://user%40DOMAIN.COM@localhost/?authMechanism=GSSAPI&authSource=$external", - "hosts": null, - "valid": true, - "warning": false, - "auth": { - "username": "user@DOMAIN.COM", - "password": null, - "db": "$external" - }, - "options": { - "authmechanism": "GSSAPI" - } - }, - { - "description": "should accept generic mechanism property (GSSAPI)", - "uri": "mongodb://user%40DOMAIN.COM@localhost/?authMechanism=GSSAPI&authMechanismProperties=SERVICE_NAME:other,CANONICALIZE_HOST_NAME:true", - "hosts": null, - "valid": true, - "warning": false, - "auth": { - "username": "user@DOMAIN.COM", - "password": null, - "db": "$external" - }, - "options": { - "authmechanism": "GSSAPI", - "authmechanismproperties": { - "SERVICE_NAME": "other", - "CANONICALIZE_HOST_NAME": true - } - } - }, - { - "description": "should accept the password (GSSAPI)", - "uri": "mongodb://user%40DOMAIN.COM:password@localhost/?authMechanism=GSSAPI&authSource=$external", - "hosts": null, - "valid": true, - "warning": false, - "auth": { - "username": "user@DOMAIN.COM", - "password": "password", - "db": "$external" - }, - "options": { - "authmechanism": "GSSAPI" - } - }, - { - "description": "may support deprecated gssapiServiceName option (GSSAPI)", - "uri": "mongodb://user%40DOMAIN.COM@localhost/?authMechanism=GSSAPI&gssapiServiceName=other", - "hosts": null, - "valid": true, - "warning": false, - "optional": true, - "auth": { - "username": "user@DOMAIN.COM", - "password": null, - "db": "$external" - }, - "options": { - "authmechanism": "GSSAPI", - "authmechanismproperties": { - "SERVICE_NAME": "other" - } - } - }, - { - "description": "should throw an exception if authSource is invalid (GSSAPI)", - "uri": "mongodb://user%40DOMAIN.COM@localhost/?authMechanism=GSSAPI&authSource=foo", - "hosts": null, - "valid": false, - "warning": false, - "auth": null, - "options": null - }, - { - "description": "should throw an exception if no username (GSSAPI)", - "uri": "mongodb://localhost/?authMechanism=GSSAPI", - "hosts": null, - "valid": false, - "warning": false, - "auth": null, - "options": null - }, - { - "description": "should recognize the mechanism (MONGODB-CR)", - "uri": "mongodb://user:password@localhost/?authMechanism=MONGODB-CR", - "hosts": null, - "valid": true, - "warning": false, - "auth": { - "username": "user", - "password": "password", - "db": "admin" - }, - "options": { - "authmechanism": "MONGODB-CR" - } - }, - { - "description": "should use the database when no authSource is specified (MONGODB-CR)", - "uri": "mongodb://user:password@localhost/foo?authMechanism=MONGODB-CR", - "hosts": null, - "valid": true, - "warning": false, - "auth": { - "username": "user", - "password": "password", - "db": "foo" - }, - "options": { - "authmechanism": "MONGODB-CR" - } - }, - { - "description": "should use the authSource when specified (MONGODB-CR)", - "uri": "mongodb://user:password@localhost/foo?authMechanism=MONGODB-CR&authSource=bar", - "hosts": null, - "valid": true, - "warning": false, - "auth": { - "username": "user", - "password": "password", - "db": "bar" - }, - "options": { - "authmechanism": "MONGODB-CR" - } - }, - { - "description": "should throw an exception if no username is supplied (MONGODB-CR)", - "uri": "mongodb://localhost/?authMechanism=MONGODB-CR", - "hosts": null, - "valid": false, - "warning": false, - "auth": null, - "options": null - }, - { - "description": "should recognize the mechanism (MONGODB-X509)", - "uri": "mongodb://CN%3DmyName%2COU%3DmyOrgUnit%2CO%3DmyOrg%2CL%3DmyLocality%2CST%3DmyState%2CC%3DmyCountry@localhost/?authMechanism=MONGODB-X509", - "hosts": null, - "valid": true, - "warning": false, - "auth": { - "username": "CN=myName,OU=myOrgUnit,O=myOrg,L=myLocality,ST=myState,C=myCountry", - "password": null, - "db": "$external" - }, - "options": { - "authmechanism": "MONGODB-X509" - } - }, - { - "description": "should ignore the database (MONGODB-X509)", - "uri": "mongodb://CN%3DmyName%2COU%3DmyOrgUnit%2CO%3DmyOrg%2CL%3DmyLocality%2CST%3DmyState%2CC%3DmyCountry@localhost/foo?authMechanism=MONGODB-X509", - "hosts": null, - "valid": true, - "warning": false, - "auth": { - "username": "CN=myName,OU=myOrgUnit,O=myOrg,L=myLocality,ST=myState,C=myCountry", - "password": null, - "db": "$external" - }, - "options": { - "authmechanism": "MONGODB-X509" - } - }, - { - "description": "should accept valid authSource (MONGODB-X509)", - "uri": "mongodb://CN%3DmyName%2COU%3DmyOrgUnit%2CO%3DmyOrg%2CL%3DmyLocality%2CST%3DmyState%2CC%3DmyCountry@localhost/?authMechanism=MONGODB-X509&authSource=$external", - "hosts": null, - "valid": true, - "warning": false, - "auth": { - "username": "CN=myName,OU=myOrgUnit,O=myOrg,L=myLocality,ST=myState,C=myCountry", - "password": null, - "db": "$external" - }, - "options": { - "authmechanism": "MONGODB-X509" - } - }, - { - "description": "should recognize the mechanism with no username (MONGODB-X509)", - "uri": "mongodb://localhost/?authMechanism=MONGODB-X509", - "hosts": null, - "valid": true, - "warning": false, - "auth": { - "username": null, - "password": null, - "db": "$external" - }, - "options": { - "authmechanism": "MONGODB-X509" - } - }, - { - "description": "should throw an exception if supplied a password (MONGODB-X509)", - "uri": "mongodb://user:password@localhost/?authMechanism=MONGODB-X509", - "hosts": null, - "valid": false, - "warning": false, - "auth": null, - "options": null - }, - { - "description": "should throw an exception if authSource is invalid (MONGODB-X509)", - "uri": "mongodb://CN%3DmyName%2COU%3DmyOrgUnit%2CO%3DmyOrg%2CL%3DmyLocality%2CST%3DmyState%2CC%3DmyCountry@localhost/foo?authMechanism=MONGODB-X509&authSource=bar", - "hosts": null, - "valid": false, - "warning": false, - "auth": null, - "options": null - }, - { - "description": "should recognize the mechanism (PLAIN)", - "uri": "mongodb://user:password@localhost/?authMechanism=PLAIN", - "hosts": null, - "valid": true, - "warning": false, - "auth": { - "username": "user", - "password": "password", - "db": "$external" - }, - "options": { - "authmechanism": "PLAIN" - } - }, - { - "description": "should use the database when no authSource is specified (PLAIN)", - "uri": "mongodb://user:password@localhost/foo?authMechanism=PLAIN", - "hosts": null, - "valid": true, - "warning": false, - "auth": { - "username": "user", - "password": "password", - "db": "foo" - }, - "options": { - "authmechanism": "PLAIN" - } - }, - { - "description": "should use the authSource when specified (PLAIN)", - "uri": "mongodb://user:password@localhost/foo?authMechanism=PLAIN&authSource=bar", - "hosts": null, - "valid": true, - "warning": false, - "auth": { - "username": "user", - "password": "password", - "db": "bar" - }, - "options": { - "authmechanism": "PLAIN" - } - }, - { - "description": "should throw an exception if no username (PLAIN)", - "uri": "mongodb://localhost/?authMechanism=PLAIN", - "hosts": null, - "valid": false, - "warning": false, - "auth": null, - "options": null - }, - { - "description": "should recognize the mechanism (SCRAM-SHA-1)", - "uri": "mongodb://user:password@localhost/?authMechanism=SCRAM-SHA-1", - "hosts": null, - "valid": true, - "warning": false, - "auth": { - "username": "user", - "password": "password", - "db": "admin" - }, - "options": { - "authmechanism": "SCRAM-SHA-1" - } - }, - { - "description": "should use the database when no authSource is specified (SCRAM-SHA-1)", - "uri": "mongodb://user:password@localhost/foo?authMechanism=SCRAM-SHA-1", - "hosts": null, - "valid": true, - "warning": false, - "auth": { - "username": "user", - "password": "password", - "db": "foo" - }, - "options": { - "authmechanism": "SCRAM-SHA-1" - } - }, - { - "description": "should accept valid authSource (SCRAM-SHA-1)", - "uri": "mongodb://user:password@localhost/foo?authMechanism=SCRAM-SHA-1&authSource=bar", - "hosts": null, - "valid": true, - "warning": false, - "auth": { - "username": "user", - "password": "password", - "db": "bar" - }, - "options": { - "authmechanism": "SCRAM-SHA-1" - } - }, - { - "description": "should throw an exception if no username (SCRAM-SHA-1)", - "uri": "mongodb://localhost/?authMechanism=SCRAM-SHA-1", - "hosts": null, - "valid": false, - "warning": false, - "auth": null, - "options": null - }, - { - "description": "should recognize the mechanism (SCRAM-SHA-256)", - "uri": "mongodb://user:password@localhost/?authMechanism=SCRAM-SHA-256", - "hosts": null, - "valid": true, - "warning": false, - "auth": { - "username": "user", - "password": "password", - "db": "admin" - }, - "options": { - "authmechanism": "SCRAM-SHA-256" - } - }, - { - "description": "should use the database when no authSource is specified (SCRAM-SHA-256)", - "uri": "mongodb://user:password@localhost/foo?authMechanism=SCRAM-SHA-256", - "hosts": null, - "valid": true, - "warning": false, - "auth": { - "username": "user", - "password": "password", - "db": "foo" - }, - "options": { - "authmechanism": "SCRAM-SHA-256" - } - }, - { - "description": "should accept valid authSource (SCRAM-SHA-256)", - "uri": "mongodb://user:password@localhost/foo?authMechanism=SCRAM-SHA-256&authSource=bar", - "hosts": null, - "valid": true, - "warning": false, - "auth": { - "username": "user", - "password": "password", - "db": "bar" - }, - "options": { - "authmechanism": "SCRAM-SHA-256" - } - }, - { - "description": "should throw an exception if no username (SCRAM-SHA-256)", - "uri": "mongodb://localhost/?authMechanism=SCRAM-SHA-256", - "hosts": null, - "valid": false, - "warning": false, - "auth": null, - "options": null - } - ] -} diff --git a/test/auth/legacy/connection-string.json b/test/auth/legacy/connection-string.json new file mode 100644 index 0000000000..0463a5141e --- /dev/null +++ b/test/auth/legacy/connection-string.json @@ -0,0 +1,553 @@ +{ + "tests": [ + { + "description": "should use the default source and mechanism", + "uri": "mongodb://user:password@localhost", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "admin", + "mechanism": null, + "mechanism_properties": null + } + }, + { + "description": "should use the database when no authSource is specified", + "uri": "mongodb://user:password@localhost/foo", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "foo", + "mechanism": null, + "mechanism_properties": null + } + }, + { + "description": "should use the authSource when specified", + "uri": "mongodb://user:password@localhost/foo?authSource=bar", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "bar", + "mechanism": null, + "mechanism_properties": null + } + }, + { + "description": "should recognise the mechanism (GSSAPI)", + "uri": "mongodb://user%40DOMAIN.COM@localhost/?authMechanism=GSSAPI", + "valid": true, + "credential": { + "username": "user@DOMAIN.COM", + "password": null, + "source": "$external", + "mechanism": "GSSAPI", + "mechanism_properties": { + "SERVICE_NAME": "mongodb" + } + } + }, + { + "description": "should ignore the database (GSSAPI)", + "uri": "mongodb://user%40DOMAIN.COM@localhost/foo?authMechanism=GSSAPI", + "valid": true, + "credential": { + "username": "user@DOMAIN.COM", + "password": null, + "source": "$external", + "mechanism": "GSSAPI", + "mechanism_properties": { + "SERVICE_NAME": "mongodb" + } + } + }, + { + "description": "should accept valid authSource (GSSAPI)", + "uri": "mongodb://user%40DOMAIN.COM@localhost/?authMechanism=GSSAPI&authSource=$external", + "valid": true, + "credential": { + "username": "user@DOMAIN.COM", + "password": null, + "source": "$external", + "mechanism": "GSSAPI", + "mechanism_properties": { + "SERVICE_NAME": "mongodb" + } + } + }, + { + "description": "should accept generic mechanism property (GSSAPI)", + "uri": "mongodb://user%40DOMAIN.COM@localhost/?authMechanism=GSSAPI&authMechanismProperties=SERVICE_NAME:other,CANONICALIZE_HOST_NAME:true", + "valid": true, + "credential": { + "username": "user@DOMAIN.COM", + "password": null, + "source": "$external", + "mechanism": "GSSAPI", + "mechanism_properties": { + "SERVICE_NAME": "other", + "CANONICALIZE_HOST_NAME": true + } + } + }, + { + "description": "should accept the password (GSSAPI)", + "uri": "mongodb://user%40DOMAIN.COM:password@localhost/?authMechanism=GSSAPI&authSource=$external", + "valid": true, + "credential": { + "username": "user@DOMAIN.COM", + "password": "password", + "source": "$external", + "mechanism": "GSSAPI", + "mechanism_properties": { + "SERVICE_NAME": "mongodb" + } + } + }, + { + "description": "must raise an error when the authSource is empty", + "uri": "mongodb://user:password@localhost/foo?authSource=", + "valid": false + }, + { + "description": "must raise an error when the authSource is empty without credentials", + "uri": "mongodb://localhost/admin?authSource=", + "valid": false + }, + { + "description": "should throw an exception if authSource is invalid (GSSAPI)", + "uri": "mongodb://user%40DOMAIN.COM@localhost/?authMechanism=GSSAPI&authSource=foo", + "valid": false + }, + { + "description": "should throw an exception if no username (GSSAPI)", + "uri": "mongodb://localhost/?authMechanism=GSSAPI", + "valid": false + }, + { + "description": "should recognize the mechanism (MONGODB-CR)", + "uri": "mongodb://user:password@localhost/?authMechanism=MONGODB-CR", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "admin", + "mechanism": "MONGODB-CR", + "mechanism_properties": null + } + }, + { + "description": "should use the database when no authSource is specified (MONGODB-CR)", + "uri": "mongodb://user:password@localhost/foo?authMechanism=MONGODB-CR", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "foo", + "mechanism": "MONGODB-CR", + "mechanism_properties": null + } + }, + { + "description": "should use the authSource when specified (MONGODB-CR)", + "uri": "mongodb://user:password@localhost/foo?authMechanism=MONGODB-CR&authSource=bar", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "bar", + "mechanism": "MONGODB-CR", + "mechanism_properties": null + } + }, + { + "description": "should throw an exception if no username is supplied (MONGODB-CR)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-CR", + "valid": false + }, + { + "description": "should recognize the mechanism (MONGODB-X509)", + "uri": "mongodb://CN%3DmyName%2COU%3DmyOrgUnit%2CO%3DmyOrg%2CL%3DmyLocality%2CST%3DmyState%2CC%3DmyCountry@localhost/?authMechanism=MONGODB-X509", + "valid": true, + "credential": { + "username": "CN=myName,OU=myOrgUnit,O=myOrg,L=myLocality,ST=myState,C=myCountry", + "password": null, + "source": "$external", + "mechanism": "MONGODB-X509", + "mechanism_properties": null + } + }, + { + "description": "should ignore the database (MONGODB-X509)", + "uri": "mongodb://CN%3DmyName%2COU%3DmyOrgUnit%2CO%3DmyOrg%2CL%3DmyLocality%2CST%3DmyState%2CC%3DmyCountry@localhost/foo?authMechanism=MONGODB-X509", + "valid": true, + "credential": { + "username": "CN=myName,OU=myOrgUnit,O=myOrg,L=myLocality,ST=myState,C=myCountry", + "password": null, + "source": "$external", + "mechanism": "MONGODB-X509", + "mechanism_properties": null + } + }, + { + "description": "should accept valid authSource (MONGODB-X509)", + "uri": "mongodb://CN%3DmyName%2COU%3DmyOrgUnit%2CO%3DmyOrg%2CL%3DmyLocality%2CST%3DmyState%2CC%3DmyCountry@localhost/?authMechanism=MONGODB-X509&authSource=$external", + "valid": true, + "credential": { + "username": "CN=myName,OU=myOrgUnit,O=myOrg,L=myLocality,ST=myState,C=myCountry", + "password": null, + "source": "$external", + "mechanism": "MONGODB-X509", + "mechanism_properties": null + } + }, + { + "description": "should recognize the mechanism with no username (MONGODB-X509)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-X509", + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-X509", + "mechanism_properties": null + } + }, + { + "description": "should recognize the mechanism with no username when auth source is explicitly specified (MONGODB-X509)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-X509&authSource=$external", + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-X509", + "mechanism_properties": null + } + }, + { + "description": "should throw an exception if supplied a password (MONGODB-X509)", + "uri": "mongodb://user:password@localhost/?authMechanism=MONGODB-X509", + "valid": false + }, + { + "description": "should throw an exception if authSource is invalid (MONGODB-X509)", + "uri": "mongodb://CN%3DmyName%2COU%3DmyOrgUnit%2CO%3DmyOrg%2CL%3DmyLocality%2CST%3DmyState%2CC%3DmyCountry@localhost/foo?authMechanism=MONGODB-X509&authSource=bar", + "valid": false + }, + { + "description": "should recognize the mechanism (PLAIN)", + "uri": "mongodb://user:password@localhost/?authMechanism=PLAIN", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "$external", + "mechanism": "PLAIN", + "mechanism_properties": null + } + }, + { + "description": "should use the database when no authSource is specified (PLAIN)", + "uri": "mongodb://user:password@localhost/foo?authMechanism=PLAIN", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "foo", + "mechanism": "PLAIN", + "mechanism_properties": null + } + }, + { + "description": "should use the authSource when specified (PLAIN)", + "uri": "mongodb://user:password@localhost/foo?authMechanism=PLAIN&authSource=bar", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "bar", + "mechanism": "PLAIN", + "mechanism_properties": null + } + }, + { + "description": "should throw an exception if no username (PLAIN)", + "uri": "mongodb://localhost/?authMechanism=PLAIN", + "valid": false + }, + { + "description": "should recognize the mechanism (SCRAM-SHA-1)", + "uri": "mongodb://user:password@localhost/?authMechanism=SCRAM-SHA-1", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "admin", + "mechanism": "SCRAM-SHA-1", + "mechanism_properties": null + } + }, + { + "description": "should use the database when no authSource is specified (SCRAM-SHA-1)", + "uri": "mongodb://user:password@localhost/foo?authMechanism=SCRAM-SHA-1", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "foo", + "mechanism": "SCRAM-SHA-1", + "mechanism_properties": null + } + }, + { + "description": "should accept valid authSource (SCRAM-SHA-1)", + "uri": "mongodb://user:password@localhost/foo?authMechanism=SCRAM-SHA-1&authSource=bar", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "bar", + "mechanism": "SCRAM-SHA-1", + "mechanism_properties": null + } + }, + { + "description": "should throw an exception if no username (SCRAM-SHA-1)", + "uri": "mongodb://localhost/?authMechanism=SCRAM-SHA-1", + "valid": false + }, + { + "description": "should recognize the mechanism (SCRAM-SHA-256)", + "uri": "mongodb://user:password@localhost/?authMechanism=SCRAM-SHA-256", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "admin", + "mechanism": "SCRAM-SHA-256", + "mechanism_properties": null + } + }, + { + "description": "should use the database when no authSource is specified (SCRAM-SHA-256)", + "uri": "mongodb://user:password@localhost/foo?authMechanism=SCRAM-SHA-256", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "foo", + "mechanism": "SCRAM-SHA-256", + "mechanism_properties": null + } + }, + { + "description": "should accept valid authSource (SCRAM-SHA-256)", + "uri": "mongodb://user:password@localhost/foo?authMechanism=SCRAM-SHA-256&authSource=bar", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "bar", + "mechanism": "SCRAM-SHA-256", + "mechanism_properties": null + } + }, + { + "description": "should throw an exception if no username (SCRAM-SHA-256)", + "uri": "mongodb://localhost/?authMechanism=SCRAM-SHA-256", + "valid": false + }, + { + "description": "URI with no auth-related info doesn't create credential", + "uri": "mongodb://localhost/", + "valid": true, + "credential": null + }, + { + "description": "database in URI path doesn't create credentials", + "uri": "mongodb://localhost/foo", + "valid": true, + "credential": null + }, + { + "description": "authSource without username doesn't create credential (default mechanism)", + "uri": "mongodb://localhost/?authSource=foo", + "valid": true, + "credential": null + }, + { + "description": "should throw an exception if no username provided (userinfo implies default mechanism)", + "uri": "mongodb://@localhost.com/", + "valid": false + }, + { + "description": "should throw an exception if no username/password provided (userinfo implies default mechanism)", + "uri": "mongodb://:@localhost.com/", + "valid": false + }, + { + "description": "should recognise the mechanism (MONGODB-AWS)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-AWS", + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-AWS", + "mechanism_properties": null + } + }, + { + "description": "should recognise the mechanism when auth source is explicitly specified (MONGODB-AWS)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-AWS&authSource=$external", + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-AWS", + "mechanism_properties": null + } + }, + { + "description": "should throw an exception if username and no password (MONGODB-AWS)", + "uri": "mongodb://user@localhost/?authMechanism=MONGODB-AWS", + "valid": false, + "credential": null + }, + { + "description": "should use username and password if specified (MONGODB-AWS)", + "uri": "mongodb://user%21%40%23%24%25%5E%26%2A%28%29_%2B:pass%21%40%23%24%25%5E%26%2A%28%29_%2B@localhost/?authMechanism=MONGODB-AWS", + "valid": true, + "credential": { + "username": "user!@#$%^&*()_+", + "password": "pass!@#$%^&*()_+", + "source": "$external", + "mechanism": "MONGODB-AWS", + "mechanism_properties": null + } + }, + { + "description": "should use username, password and session token if specified (MONGODB-AWS)", + "uri": "mongodb://user:password@localhost/?authMechanism=MONGODB-AWS&authMechanismProperties=AWS_SESSION_TOKEN:token%21%40%23%24%25%5E%26%2A%28%29_%2B", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "$external", + "mechanism": "MONGODB-AWS", + "mechanism_properties": { + "AWS_SESSION_TOKEN": "token!@#$%^&*()_+" + } + } + }, + { + "description": "should recognise the mechanism and request callback (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC", + "callback": ["oidcRequest"], + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "REQUEST_TOKEN_CALLBACK": true + } + } + }, + { + "description": "should recognise the mechanism when auth source is explicitly specified and with request callback (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authSource=$external", + "callback": ["oidcRequest"], + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "REQUEST_TOKEN_CALLBACK": true + } + } + }, + { + "description": "should recognise the mechanism and username with request callback (MONGODB-OIDC)", + "uri": "mongodb://principalName@localhost/?authMechanism=MONGODB-OIDC", + "callback": ["oidcRequest"], + "valid": true, + "credential": { + "username": "principalName", + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "REQUEST_TOKEN_CALLBACK": true + } + } + }, + { + "description": "should recognise the mechanism with aws device (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=PROVIDER_NAME:aws", + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "PROVIDER_NAME": "aws" + } + } + }, + { + "description": "should recognise the mechanism when auth source is explicitly specified and with aws device (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authSource=$external&authMechanismProperties=PROVIDER_NAME:aws", + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "PROVIDER_NAME": "aws" + } + } + }, + { + "description": "should throw an exception if username and password are specified (MONGODB-OIDC)", + "uri": "mongodb://user:pass@localhost/?authMechanism=MONGODB-OIDC", + "callback": ["oidcRequest"], + "valid": false, + "credential": null + }, + { + "description": "should throw an exception if username and deviceName are specified (MONGODB-OIDC)", + "uri": "mongodb://principalName@localhost/?authMechanism=MONGODB-OIDC&PROVIDER_NAME:gcp", + "valid": false, + "credential": null + }, + { + "description": "should throw an exception if specified deviceName is not supported (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=PROVIDER_NAME:unexisted", + "valid": false, + "credential": null + }, + { + "description": "should throw an exception if neither deviceName nor callback specified (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC", + "valid": false, + "credential": null + }, + { + "description": "should throw an exception when unsupported auth property is specified (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=UnsupportedProperty:unexisted", + "valid": false, + "credential": null + } + ] +} diff --git a/test/auth/unified/reauthenticate_with_retry.json b/test/auth/unified/reauthenticate_with_retry.json new file mode 100644 index 0000000000..ef110562ed --- /dev/null +++ b/test/auth/unified/reauthenticate_with_retry.json @@ -0,0 +1,191 @@ +{ + "description": "reauthenticate_with_retry", + "schemaVersion": "1.12", + "runOnRequirements": [ + { + "minServerVersion": "6.3", + "auth": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "uriOptions": { + "retryReads": true, + "retryWrites": true + }, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collName" + } + } + ], + "initialData": [ + { + "collectionName": "collName", + "databaseName": "db", + "documents": [] + } + ], + "tests": [ + { + "description": "Read command should reauthenticate when receive ReauthenticationRequired error code and retryReads=true", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 391 + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": {} + }, + "object": "collection0", + "expectResult": [] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collName", + "filter": {} + } + } + }, + { + "commandFailedEvent": { + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "collName", + "filter": {} + } + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "Write command should reauthenticate when receive ReauthenticationRequired error code and retryWrites=true", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 391 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "collName", + "documents": [ + { + "_id": 1, + "x": 1 + } + ] + } + } + }, + { + "commandFailedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "collName", + "documents": [ + { + "_id": 1, + "x": 1 + } + ] + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/auth/unified/reauthenticate_without_retry.json b/test/auth/unified/reauthenticate_without_retry.json new file mode 100644 index 0000000000..6fded47634 --- /dev/null +++ b/test/auth/unified/reauthenticate_without_retry.json @@ -0,0 +1,191 @@ +{ + "description": "reauthenticate_without_retry", + "schemaVersion": "1.12", + "runOnRequirements": [ + { + "minServerVersion": "6.3", + "auth": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "uriOptions": { + "retryReads": false, + "retryWrites": false + }, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collName" + } + } + ], + "initialData": [ + { + "collectionName": "collName", + "databaseName": "db", + "documents": [] + } + ], + "tests": [ + { + "description": "Read command should reauthenticate when receive ReauthenticationRequired error code and retryReads=false", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 391 + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": {} + }, + "object": "collection0", + "expectResult": [] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collName", + "filter": {} + } + } + }, + { + "commandFailedEvent": { + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "collName", + "filter": {} + } + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "Write command should reauthenticate when receive ReauthenticationRequired error code and retryWrites=false", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 391 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "collName", + "documents": [ + { + "_id": 1, + "x": 1 + } + ] + } + } + }, + { + "commandFailedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "collName", + "documents": [ + { + "_id": 1, + "x": 1 + } + ] + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/auth_aws/test_auth_aws.py b/test/auth_aws/test_auth_aws.py new file mode 100644 index 0000000000..d0bb41b739 --- /dev/null +++ b/test/auth_aws/test_auth_aws.py @@ -0,0 +1,209 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test MONGODB-AWS Authentication.""" +from __future__ import annotations + +import os +import sys +import unittest +from unittest.mock import patch + +sys.path[0:0] = [""] + +from pymongo_auth_aws import AwsCredential, auth + +from pymongo import MongoClient +from pymongo.errors import OperationFailure +from pymongo.uri_parser import parse_uri + + +class TestAuthAWS(unittest.TestCase): + uri: str + + @classmethod + def setUpClass(cls): + cls.uri = os.environ["MONGODB_URI"] + + def test_should_fail_without_credentials(self): + if "@" not in self.uri: + self.skipTest("MONGODB_URI already has no credentials") + + hosts = ["{}:{}".format(*addr) for addr in parse_uri(self.uri)["nodelist"]] + self.assertTrue(hosts) + with MongoClient(hosts) as client: + with self.assertRaises(OperationFailure): + client.aws.test.find_one() + + def test_should_fail_incorrect_credentials(self): + with MongoClient( + self.uri, username="fake", password="fake", authMechanism="MONGODB-AWS" + ) as client: + with self.assertRaises(OperationFailure): + client.get_database().test.find_one() + + def test_connect_uri(self): + with MongoClient(self.uri) as client: + client.get_database().test.find_one() + + def setup_cache(self): + if os.environ.get("AWS_ACCESS_KEY_ID", None) or "@" in self.uri: + self.skipTest("Not testing cached credentials") + if not hasattr(auth, "set_cached_credentials"): + self.skipTest("Cached credentials not available") + + # Ensure cleared credentials. + auth.set_cached_credentials(None) + self.assertEqual(auth.get_cached_credentials(), None) + + client = MongoClient(self.uri) + client.get_database().test.find_one() + client.close() + return auth.get_cached_credentials() + + def test_cache_credentials(self): + creds = self.setup_cache() + self.assertIsNotNone(creds) + + def test_cache_about_to_expire(self): + creds = self.setup_cache() + client = MongoClient(self.uri) + self.addCleanup(client.close) + + # Make the creds about to expire. + creds = auth.get_cached_credentials() + assert creds is not None + + creds = AwsCredential(creds.username, creds.password, creds.token, lambda x: True) + auth.set_cached_credentials(creds) + + client.get_database().test.find_one() + new_creds = auth.get_cached_credentials() + self.assertNotEqual(creds, new_creds) + + def test_poisoned_cache(self): + creds = self.setup_cache() + + client = MongoClient(self.uri) + self.addCleanup(client.close) + + # Poison the creds with invalid password. + assert creds is not None + creds = AwsCredential("a" * 24, "b" * 24, "c" * 24) + auth.set_cached_credentials(creds) + + with self.assertRaises(OperationFailure): + client.get_database().test.find_one() + + # Make sure the cache was cleared. + self.assertEqual(auth.get_cached_credentials(), None) + + # The next attempt should generate a new cred and succeed. + client.get_database().test.find_one() + self.assertNotEqual(auth.get_cached_credentials(), None) + + def test_environment_variables_ignored(self): + creds = self.setup_cache() + self.assertIsNotNone(creds) + os.environ.copy() + + client = MongoClient(self.uri) + self.addCleanup(client.close) + + client.get_database().test.find_one() + + self.assertIsNotNone(auth.get_cached_credentials()) + + mock_env = { + "AWS_ACCESS_KEY_ID": "foo", + "AWS_SECRET_ACCESS_KEY": "bar", + "AWS_SESSION_TOKEN": "baz", + } + + with patch.dict("os.environ", mock_env): + self.assertEqual(os.environ["AWS_ACCESS_KEY_ID"], "foo") + client.get_database().test.find_one() + + auth.set_cached_credentials(None) + + client2 = MongoClient(self.uri) + self.addCleanup(client2.close) + + with patch.dict("os.environ", mock_env): + self.assertEqual(os.environ["AWS_ACCESS_KEY_ID"], "foo") + with self.assertRaises(OperationFailure): + client2.get_database().test.find_one() + + def test_no_cache_environment_variables(self): + creds = self.setup_cache() + self.assertIsNotNone(creds) + auth.set_cached_credentials(None) + + mock_env = {"AWS_ACCESS_KEY_ID": creds.username, "AWS_SECRET_ACCESS_KEY": creds.password} + if creds.token: + mock_env["AWS_SESSION_TOKEN"] = creds.token + + client = MongoClient(self.uri) + self.addCleanup(client.close) + + with patch.dict(os.environ, mock_env): + self.assertEqual(os.environ["AWS_ACCESS_KEY_ID"], creds.username) + client.get_database().test.find_one() + + self.assertIsNone(auth.get_cached_credentials()) + + mock_env["AWS_ACCESS_KEY_ID"] = "foo" + + client2 = MongoClient(self.uri) + self.addCleanup(client2.close) + + with patch.dict("os.environ", mock_env), self.assertRaises(OperationFailure): + self.assertEqual(os.environ["AWS_ACCESS_KEY_ID"], "foo") + client2.get_database().test.find_one() + + +class TestAWSLambdaExamples(unittest.TestCase): + def test_shared_client(self): + # Start AWS Lambda Example 1 + import os + + from pymongo import MongoClient + + client = MongoClient(host=os.environ["MONGODB_URI"]) + + def lambda_handler(event, context): + return client.db.command("ping") + + # End AWS Lambda Example 1 + + def test_IAM_auth(self): + # Start AWS Lambda Example 2 + import os + + from pymongo import MongoClient + + client = MongoClient( + host=os.environ["MONGODB_URI"], + authSource="$external", + authMechanism="MONGODB-AWS", + ) + + def lambda_handler(event, context): + return client.db.command("ping") + + # End AWS Lambda Example 2 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/auth_oidc/test_auth_oidc.py b/test/auth_oidc/test_auth_oidc.py new file mode 100644 index 0000000000..29de512da7 --- /dev/null +++ b/test/auth_oidc/test_auth_oidc.py @@ -0,0 +1,573 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test MONGODB-OIDC Authentication.""" +from __future__ import annotations + +import os +import sys +import time +import unittest +from contextlib import contextmanager +from typing import Dict + +sys.path[0:0] = [""] + +from test.utils import EventListener + +from bson import SON +from pymongo import MongoClient +from pymongo.auth import _AUTH_MAP, _authenticate_oidc +from pymongo.cursor import CursorType +from pymongo.errors import ConfigurationError, OperationFailure +from pymongo.hello import HelloCompat +from pymongo.operations import InsertOne + +# Force MONGODB-OIDC to be enabled. +_AUTH_MAP["MONGODB-OIDC"] = _authenticate_oidc # type:ignore + + +class TestAuthOIDC(unittest.TestCase): + uri: str + + @classmethod + def setUpClass(cls): + cls.uri_single = os.environ["MONGODB_URI_SINGLE"] + cls.uri_multiple = os.environ["MONGODB_URI_MULTI"] + cls.uri_admin = os.environ["MONGODB_URI"] + cls.token_dir = os.environ["OIDC_TOKEN_DIR"] + + def setUp(self): + self.request_called = 0 + + def create_request_cb(self, username="test_user1", sleep=0): + + token_file = os.path.join(self.token_dir, username).replace(os.sep, "/") + + def request_token(server_info, context): + # Validate the info. + self.assertIn("issuer", server_info) + self.assertIn("clientId", server_info) + + # Validate the timeout. + timeout_seconds = context["timeout_seconds"] + self.assertEqual(timeout_seconds, 60 * 5) + with open(token_file) as fid: + token = fid.read() + resp = {"access_token": token, "refresh_token": token} + + time.sleep(sleep) + self.request_called += 1 + return resp + + return request_token + + @contextmanager + def fail_point(self, command_args): + cmd_on = SON([("configureFailPoint", "failCommand")]) + cmd_on.update(command_args) + client = MongoClient(self.uri_admin) + client.admin.command(cmd_on) + try: + yield + finally: + client.admin.command("configureFailPoint", cmd_on["configureFailPoint"], mode="off") + + def test_connect_request_callback_single_implicit_username(self): + request_token = self.create_request_cb() + props: Dict = {"request_token_callback": request_token} + client = MongoClient(self.uri_single, authmechanismproperties=props) + client.test.test.find_one() + client.close() + + def test_connect_request_callback_single_explicit_username(self): + request_token = self.create_request_cb() + props: Dict = {"request_token_callback": request_token} + client = MongoClient(self.uri_single, username="test_user1", authmechanismproperties=props) + client.test.test.find_one() + client.close() + + def test_connect_request_callback_multiple_principal_user1(self): + request_token = self.create_request_cb() + props: Dict = {"request_token_callback": request_token} + client = MongoClient( + self.uri_multiple, username="test_user1", authmechanismproperties=props + ) + client.test.test.find_one() + client.close() + + def test_connect_request_callback_multiple_principal_user2(self): + request_token = self.create_request_cb("test_user2") + props: Dict = {"request_token_callback": request_token} + client = MongoClient( + self.uri_multiple, username="test_user2", authmechanismproperties=props + ) + client.test.test.find_one() + client.close() + + def test_connect_request_callback_multiple_no_username(self): + request_token = self.create_request_cb() + props: Dict = {"request_token_callback": request_token} + client = MongoClient(self.uri_multiple, authmechanismproperties=props) + with self.assertRaises(OperationFailure): + client.test.test.find_one() + client.close() + + def test_allowed_hosts_blocked(self): + request_token = self.create_request_cb() + props: Dict = {"request_token_callback": request_token, "allowed_hosts": []} + client = MongoClient(self.uri_single, authmechanismproperties=props) + with self.assertRaises(ConfigurationError): + client.test.test.find_one() + client.close() + + props: Dict = {"request_token_callback": request_token, "allowed_hosts": ["example.com"]} + client = MongoClient( + self.uri_single + "&ignored=example.com", authmechanismproperties=props, connect=False + ) + with self.assertRaises(ConfigurationError): + client.test.test.find_one() + client.close() + + def test_valid_request_token_callback(self): + request_cb = self.create_request_cb() + + props: Dict = { + "request_token_callback": request_cb, + } + client = MongoClient(self.uri_single, authmechanismproperties=props) + client.test.test.find_one() + client.close() + + client = MongoClient(self.uri_single, authmechanismproperties=props) + client.test.test.find_one() + client.close() + + def test_request_callback_returns_null(self): + def request_token_null(a, b): + return None + + props: Dict = {"request_token_callback": request_token_null} + client = MongoClient(self.uri_single, authMechanismProperties=props) + with self.assertRaises(ValueError): + client.test.test.find_one() + client.close() + + def test_request_callback_invalid_result(self): + def request_token_invalid(a, b): + return {} + + props: Dict = {"request_token_callback": request_token_invalid} + client = MongoClient(self.uri_single, authMechanismProperties=props) + with self.assertRaises(ValueError): + client.test.test.find_one() + client.close() + + def request_cb_extra_value(server_info, context): + result = self.create_request_cb()(server_info, context) + result["foo"] = "bar" + return result + + props: Dict = {"request_token_callback": request_cb_extra_value} + client = MongoClient(self.uri_single, authMechanismProperties=props) + with self.assertRaises(ValueError): + client.test.test.find_one() + client.close() + + def test_speculative_auth_success(self): + request_token = self.create_request_cb() + + # Create a client with a request callback that returns a valid token. + props: Dict = {"request_token_callback": request_token} + client = MongoClient(self.uri_single, authmechanismproperties=props) + + # Set a fail point for saslStart commands. + with self.fail_point( + { + "mode": {"times": 2}, + "data": {"failCommands": ["saslStart"], "errorCode": 18}, + } + ): + # Perform a find operation. + client.test.test.find_one() + + # Close the client. + client.close() + + def test_reauthenticate_succeeds(self): + listener = EventListener() + + # Create request callback that returns valid credentials. + request_cb = self.create_request_cb() + + # Create a client with the callback. + props: Dict = {"request_token_callback": request_cb} + client = MongoClient( + self.uri_single, event_listeners=[listener], authmechanismproperties=props + ) + + # Perform a find operation. + client.test.test.find_one() + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + listener.reset() + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a find operation. + client.test.test.find_one() + + started_events = [ + i.command_name for i in listener.started_events if not i.command_name.startswith("sasl") + ] + succeeded_events = [ + i.command_name + for i in listener.succeeded_events + if not i.command_name.startswith("sasl") + ] + failed_events = [ + i.command_name for i in listener.failed_events if not i.command_name.startswith("sasl") + ] + + self.assertEqual( + started_events, + [ + "find", + "find", + ], + ) + self.assertEqual(succeeded_events, ["find"]) + self.assertEqual(failed_events, ["find"]) + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + client.close() + + def test_reauthenticate_succeeds_no_refresh(self): + cb = self.create_request_cb() + + def request_cb(*args, **kwargs): + result = cb(*args, **kwargs) + del result["refresh_token"] + return result + + # Create a client with the callback. + props: Dict = {"request_token_callback": request_cb} + client = MongoClient(self.uri_single, authmechanismproperties=props) + + # Perform a find operation. + client.test.test.find_one() + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a find operation. + client.test.test.find_one() + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + client.close() + + def test_reauthenticate_succeeds_after_refresh_fails(self): + + # Create request callback that returns valid credentials. + request_cb = self.create_request_cb() + + # Create a client with the callback. + props: Dict = {"request_token_callback": request_cb} + client = MongoClient(self.uri_single, authmechanismproperties=props) + + # Perform a find operation. + client.test.test.find_one() + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + with self.fail_point( + { + "mode": {"times": 2}, + "data": {"failCommands": ["find", "saslContinue"], "errorCode": 391}, + } + ): + # Perform a find operation. + client.test.test.find_one() + + # Assert that the request callback has been called three times. + self.assertEqual(self.request_called, 3) + + def test_reauthenticate_fails(self): + + # Create request callback that returns valid credentials. + request_cb = self.create_request_cb() + + # Create a client with the callback. + props: Dict = {"request_token_callback": request_cb} + client = MongoClient(self.uri_single, authmechanismproperties=props) + + # Perform a find operation. + client.test.test.find_one() + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + with self.fail_point( + { + "mode": {"times": 2}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a find operation that fails. + with self.assertRaises(OperationFailure): + client.test.test.find_one() + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + client.close() + + def test_reauthenticate_succeeds_bulk_write(self): + request_cb = self.create_request_cb() + + # Create a client with the callback. + props: Dict = {"request_token_callback": request_cb} + client = MongoClient(self.uri_single, authmechanismproperties=props) + + # Perform a find operation. + client.test.test.find_one() + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["insert"], "errorCode": 391}, + } + ): + # Perform a bulk write operation. + client.test.test.bulk_write([InsertOne({})]) + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + client.close() + + def test_reauthenticate_succeeds_bulk_read(self): + request_cb = self.create_request_cb() + + # Create a client with the callback. + props: Dict = {"request_token_callback": request_cb} + client = MongoClient(self.uri_single, authmechanismproperties=props) + + # Perform a find operation. + client.test.test.find_one() + + # Perform a bulk write operation. + client.test.test.bulk_write([InsertOne({})]) + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a bulk read operation. + cursor = client.test.test.find_raw_batches({}) + list(cursor) + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + client.close() + + def test_reauthenticate_succeeds_cursor(self): + request_cb = self.create_request_cb() + + # Create a client with the callback. + props: Dict = {"request_token_callback": request_cb} + client = MongoClient(self.uri_single, authmechanismproperties=props) + + # Perform an insert operation. + client.test.test.insert_one({"a": 1}) + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a find operation. + cursor = client.test.test.find({"a": 1}) + self.assertGreaterEqual(len(list(cursor)), 1) + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + client.close() + + def test_reauthenticate_succeeds_get_more(self): + request_cb = self.create_request_cb() + + # Create a client with the callback. + props: Dict = {"request_token_callback": request_cb} + client = MongoClient(self.uri_single, authmechanismproperties=props) + + # Perform an insert operation. + client.test.test.insert_many([{"a": 1}, {"a": 1}]) + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["getMore"], "errorCode": 391}, + } + ): + # Perform a find operation. + cursor = client.test.test.find({"a": 1}, batch_size=1) + self.assertGreaterEqual(len(list(cursor)), 1) + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + client.close() + + def test_reauthenticate_succeeds_get_more_exhaust(self): + # Ensure no mongos + props = {"request_token_callback": self.create_request_cb()} + client = MongoClient(self.uri_single, authmechanismproperties=props) + hello = client.admin.command(HelloCompat.LEGACY_CMD) + if hello.get("msg") != "isdbgrid": + raise unittest.SkipTest("Must not be a mongos") + + request_cb = self.create_request_cb() + + # Create a client with the callback. + props: Dict = {"request_token_callback": request_cb} + client = MongoClient(self.uri_single, authmechanismproperties=props) + + # Perform an insert operation. + client.test.test.insert_many([{"a": 1}, {"a": 1}]) + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["getMore"], "errorCode": 391}, + } + ): + # Perform a find operation. + cursor = client.test.test.find({"a": 1}, batch_size=1, cursor_type=CursorType.EXHAUST) + self.assertGreaterEqual(len(list(cursor)), 1) + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + client.close() + + def test_reauthenticate_succeeds_command(self): + request_cb = self.create_request_cb() + + # Create a client with the callback. + props: Dict = {"request_token_callback": request_cb} + + print("start of test") + client = MongoClient(self.uri_single, authmechanismproperties=props) + + # Perform an insert operation. + client.test.test.insert_one({"a": 1}) + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["count"], "errorCode": 391}, + } + ): + # Perform a count operation. + cursor = client.test.command({"count": "test"}) + + self.assertGreaterEqual(len(list(cursor)), 1) + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + client.close() + + def test_reauthentication_succeeds_multiple_connections(self): + request_cb = self.create_request_cb() + + # Create a client with the callback. + props: Dict = {"request_token_callback": request_cb} + + client1 = MongoClient(self.uri_single, authmechanismproperties=props) + client2 = MongoClient(self.uri_single, authmechanismproperties=props) + + # Perform an insert operation. + client1.test.test.insert_many([{"a": 1}, {"a": 1}]) + client2.test.test.find_one() + self.assertEqual(self.request_called, 2) + + # Use the same authenticator for both clients + # to simulate a race condition with separate connections. + # We should only see one extra callback despite both connections + # needing to reauthenticate. + client2.options.pool_options._credentials.cache.data = ( + client1.options.pool_options._credentials.cache.data + ) + + client1.test.test.find_one() + client2.test.test.find_one() + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + client1.test.test.find_one() + + self.assertEqual(self.request_called, 3) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + client2.test.test.find_one() + + self.assertEqual(self.request_called, 3) + client1.close() + client2.close() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/bson_corpus/array.json b/test/bson_corpus/array.json index 1c654cf36b..9ff953e5ae 100644 --- a/test/bson_corpus/array.json +++ b/test/bson_corpus/array.json @@ -14,16 +14,22 @@ "canonical_extjson": "{\"a\" : [{\"$numberInt\": \"10\"}]}" }, { - "description": "Single Element Array with index set incorrectly", + "description": "Single Element Array with index set incorrectly to empty string", "degenerate_bson": "130000000461000B00000010000A0000000000", "canonical_bson": "140000000461000C0000001030000A0000000000", "canonical_extjson": "{\"a\" : [{\"$numberInt\": \"10\"}]}" }, { - "description": "Single Element Array with index set incorrectly", + "description": "Single Element Array with index set incorrectly to ab", "degenerate_bson": "150000000461000D000000106162000A0000000000", "canonical_bson": "140000000461000C0000001030000A0000000000", "canonical_extjson": "{\"a\" : [{\"$numberInt\": \"10\"}]}" + }, + { + "description": "Multi Element Array with duplicate indexes", + "degenerate_bson": "1b000000046100130000001030000a000000103000140000000000", + "canonical_bson": "1b000000046100130000001030000a000000103100140000000000", + "canonical_extjson": "{\"a\" : [{\"$numberInt\": \"10\"}, {\"$numberInt\": \"20\"}]}" } ], "decodeErrors": [ diff --git a/test/bson_corpus/binary.json b/test/bson_corpus/binary.json index 90a15c1a1c..20aaef743b 100644 --- a/test/bson_corpus/binary.json +++ b/test/bson_corpus/binary.json @@ -39,11 +39,27 @@ "canonical_bson": "1D000000057800100000000473FFD26444B34C6990E8E7D1DFC035D400", "canonical_extjson": "{\"x\" : { \"$binary\" : {\"base64\" : \"c//SZESzTGmQ6OfR38A11A==\", \"subType\" : \"04\"}}}" }, + { + "description": "subtype 0x04 UUID", + "canonical_bson": "1D000000057800100000000473FFD26444B34C6990E8E7D1DFC035D400", + "canonical_extjson": "{\"x\" : { \"$binary\" : {\"base64\" : \"c//SZESzTGmQ6OfR38A11A==\", \"subType\" : \"04\"}}}", + "degenerate_extjson": "{\"x\" : { \"$uuid\" : \"73ffd264-44b3-4c69-90e8-e7d1dfc035d4\"}}" + }, { "description": "subtype 0x05", "canonical_bson": "1D000000057800100000000573FFD26444B34C6990E8E7D1DFC035D400", "canonical_extjson": "{\"x\" : { \"$binary\" : {\"base64\" : \"c//SZESzTGmQ6OfR38A11A==\", \"subType\" : \"05\"}}}" }, + { + "description": "subtype 0x07", + "canonical_bson": "1D000000057800100000000773FFD26444B34C6990E8E7D1DFC035D400", + "canonical_extjson": "{\"x\" : { \"$binary\" : {\"base64\" : \"c//SZESzTGmQ6OfR38A11A==\", \"subType\" : \"07\"}}}" + }, + { + "description": "subtype 0x08", + "canonical_bson": "1D000000057800100000000873FFD26444B34C6990E8E7D1DFC035D400", + "canonical_extjson": "{\"x\" : { \"$binary\" : {\"base64\" : \"c//SZESzTGmQ6OfR38A11A==\", \"subType\" : \"08\"}}}" + }, { "description": "subtype 0x80", "canonical_bson": "0F0000000578000200000080FFFF00", @@ -81,5 +97,27 @@ "description": "subtype 0x02 length negative one", "bson": "130000000578000600000002FFFFFFFFFFFF00" } + ], + "parseErrors": [ + { + "description": "$uuid wrong type", + "string": "{\"x\" : { \"$uuid\" : { \"data\" : \"73ffd264-44b3-4c69-90e8-e7d1dfc035d4\"}}}" + }, + { + "description": "$uuid invalid value--too short", + "string": "{\"x\" : { \"$uuid\" : \"73ffd264-44b3-90e8-e7d1dfc035d4\"}}" + }, + { + "description": "$uuid invalid value--too long", + "string": "{\"x\" : { \"$uuid\" : \"73ffd264-44b3-4c69-90e8-e7d1dfc035d4-789e4\"}}" + }, + { + "description": "$uuid invalid value--misplaced hyphens", + "string": "{\"x\" : { \"$uuid\" : \"73ff-d26444b-34c6-990e8e-7d1dfc035d4\"}}" + }, + { + "description": "$uuid invalid value--too many hyphens", + "string": "{\"x\" : { \"$uuid\" : \"----d264-44b3-4--9-90e8-e7d1dfc0----\"}}" + } ] } diff --git a/test/bson_corpus/code.json b/test/bson_corpus/code.json index 6f37349ad0..b8482b2541 100644 --- a/test/bson_corpus/code.json +++ b/test/bson_corpus/code.json @@ -20,48 +20,48 @@ }, { "description": "two-byte UTF-8 (\u00e9)", - "canonical_bson": "190000000261000D000000C3A9C3A9C3A9C3A9C3A9C3A90000", - "canonical_extjson": "{\"a\" : \"\\u00e9\\u00e9\\u00e9\\u00e9\\u00e9\\u00e9\"}" + "canonical_bson": "190000000D61000D000000C3A9C3A9C3A9C3A9C3A9C3A90000", + "canonical_extjson": "{\"a\" : {\"$code\" : \"\\u00e9\\u00e9\\u00e9\\u00e9\\u00e9\\u00e9\"}}" }, { "description": "three-byte UTF-8 (\u2606)", - "canonical_bson": "190000000261000D000000E29886E29886E29886E298860000", - "canonical_extjson": "{\"a\" : \"\\u2606\\u2606\\u2606\\u2606\"}" + "canonical_bson": "190000000D61000D000000E29886E29886E29886E298860000", + "canonical_extjson": "{\"a\" : {\"$code\" : \"\\u2606\\u2606\\u2606\\u2606\"}}" }, { "description": "Embedded nulls", - "canonical_bson": "190000000261000D0000006162006261620062616261620000", - "canonical_extjson": "{\"a\" : \"ab\\u0000bab\\u0000babab\"}" + "canonical_bson": "190000000D61000D0000006162006261620062616261620000", + "canonical_extjson": "{\"a\" : {\"$code\" : \"ab\\u0000bab\\u0000babab\"}}" } ], "decodeErrors": [ { "description": "bad code string length: 0 (but no 0x00 either)", - "bson": "0C0000000261000000000000" + "bson": "0C0000000D61000000000000" }, { "description": "bad code string length: -1", - "bson": "0C000000026100FFFFFFFF00" + "bson": "0C0000000D6100FFFFFFFF00" }, { "description": "bad code string length: eats terminator", - "bson": "10000000026100050000006200620000" + "bson": "100000000D6100050000006200620000" }, { "description": "bad code string length: longer than rest of document", - "bson": "120000000200FFFFFF00666F6F6261720000" + "bson": "120000000D00FFFFFF00666F6F6261720000" }, { "description": "code string is not null-terminated", - "bson": "1000000002610004000000616263FF00" + "bson": "100000000D610004000000616263FF00" }, { "description": "empty code string, but extra null", - "bson": "0E00000002610001000000000000" + "bson": "0E0000000D610001000000000000" }, { "description": "invalid UTF-8", - "bson": "0E00000002610002000000E90000" + "bson": "0E0000000D610002000000E90000" } ] } diff --git a/test/bson_corpus/datetime.json b/test/bson_corpus/datetime.json index 60506ce174..f857afdc36 100644 --- a/test/bson_corpus/datetime.json +++ b/test/bson_corpus/datetime.json @@ -25,6 +25,12 @@ "description" : "Y10K", "canonical_bson" : "1000000009610000DC1FD277E6000000", "canonical_extjson" : "{\"a\":{\"$date\":{\"$numberLong\":\"253402300800000\"}}}" + }, + { + "description": "leading zero ms", + "canonical_bson": "10000000096100D1D6D6CC3B01000000", + "relaxed_extjson": "{\"a\" : {\"$date\" : \"2012-12-24T12:15:30.001Z\"}}", + "canonical_extjson": "{\"a\" : {\"$date\" : {\"$numberLong\" : \"1356351330001\"}}}" } ], "decodeErrors": [ diff --git a/test/bson_corpus/dbref.json b/test/bson_corpus/dbref.json index 1fe12c6f68..41c0b09d0e 100644 --- a/test/bson_corpus/dbref.json +++ b/test/bson_corpus/dbref.json @@ -1,5 +1,5 @@ { - "description": "DBRef", + "description": "Document type (DBRef sub-documents)", "bson_type": "0x03", "valid": [ { @@ -26,6 +26,26 @@ "description": "Document with key names similar to those of a DBRef", "canonical_bson": "3e0000000224726566000c0000006e6f742d612d646272656600072469640058921b3e6e32ab156a22b59e022462616e616e6100050000007065656c0000", "canonical_extjson": "{\"$ref\": \"not-a-dbref\", \"$id\": {\"$oid\": \"58921b3e6e32ab156a22b59e\"}, \"$banana\": \"peel\"}" + }, + { + "description": "DBRef with additional dollar-prefixed and dotted fields", + "canonical_bson": "48000000036462726566003c0000000224726566000b000000636f6c6c656374696f6e00072469640058921b3e6e32ab156a22b59e10612e62000100000010246300010000000000", + "canonical_extjson": "{\"dbref\": {\"$ref\": \"collection\", \"$id\": {\"$oid\": \"58921b3e6e32ab156a22b59e\"}, \"a.b\": {\"$numberInt\": \"1\"}, \"$c\": {\"$numberInt\": \"1\"}}}" + }, + { + "description": "Sub-document resembles DBRef but $id is missing", + "canonical_bson": "26000000036462726566001a0000000224726566000b000000636f6c6c656374696f6e000000", + "canonical_extjson": "{\"dbref\": {\"$ref\": \"collection\"}}" + }, + { + "description": "Sub-document resembles DBRef but $ref is not a string", + "canonical_bson": "2c000000036462726566002000000010247265660001000000072469640058921b3e6e32ab156a22b59e0000", + "canonical_extjson": "{\"dbref\": {\"$ref\": {\"$numberInt\": \"1\"}, \"$id\": {\"$oid\": \"58921b3e6e32ab156a22b59e\"}}}" + }, + { + "description": "Sub-document resembles DBRef but $db is not a string", + "canonical_bson": "4000000003646272656600340000000224726566000b000000636f6c6c656374696f6e00072469640058921b3e6e32ab156a22b59e1024646200010000000000", + "canonical_extjson": "{\"dbref\": {\"$ref\": \"collection\", \"$id\": {\"$oid\": \"58921b3e6e32ab156a22b59e\"}, \"$db\": {\"$numberInt\": \"1\"}}}" } ] } diff --git a/test/bson_corpus/decimal128-2.json b/test/bson_corpus/decimal128-2.json index de73b86ffb..316d3b0e61 100644 --- a/test/bson_corpus/decimal128-2.json +++ b/test/bson_corpus/decimal128-2.json @@ -790,3 +790,4 @@ } ] } + diff --git a/test/bson_corpus/decimal128-5.json b/test/bson_corpus/decimal128-5.json index 778bf96c4b..e976eae407 100644 --- a/test/bson_corpus/decimal128-5.json +++ b/test/bson_corpus/decimal128-5.json @@ -399,3 +399,4 @@ } ] } + diff --git a/test/bson_corpus/document.json b/test/bson_corpus/document.json index 3ec9187044..698e7ae90a 100644 --- a/test/bson_corpus/document.json +++ b/test/bson_corpus/document.json @@ -17,6 +17,26 @@ "description": "Single-character key subdoc", "canonical_bson": "160000000378000E0000000261000200000062000000", "canonical_extjson": "{\"x\" : {\"a\" : \"b\"}}" + }, + { + "description": "Dollar-prefixed key in sub-document", + "canonical_bson": "170000000378000F000000022461000200000062000000", + "canonical_extjson": "{\"x\" : {\"$a\" : \"b\"}}" + }, + { + "description": "Dollar as key in sub-document", + "canonical_bson": "160000000378000E0000000224000200000061000000", + "canonical_extjson": "{\"x\" : {\"$\" : \"a\"}}" + }, + { + "description": "Dotted key in sub-document", + "canonical_bson": "180000000378001000000002612E62000200000063000000", + "canonical_extjson": "{\"x\" : {\"a.b\" : \"c\"}}" + }, + { + "description": "Dot as key in sub-document", + "canonical_bson": "160000000378000E000000022E000200000061000000", + "canonical_extjson": "{\"x\" : {\".\" : \"a\"}}" } ], "decodeErrors": [ @@ -31,6 +51,10 @@ { "description": "Invalid subdocument: bad string length in field", "bson": "1C00000003666F6F001200000002626172000500000062617A000000" + }, + { + "description": "Null byte in sub-document key", + "bson": "150000000378000D00000010610000010000000000" } ] } diff --git a/test/bson_corpus/double.json b/test/bson_corpus/double.json index d13fd5c471..d5b8fb3d7e 100644 --- a/test/bson_corpus/double.json +++ b/test/bson_corpus/double.json @@ -28,16 +28,16 @@ "relaxed_extjson": "{\"d\" : -1.0001220703125}" }, { - "description": "1.2345678901234568e+18", - "canonical_bson": "1000000001640081E97DF41022B14300", - "canonical_extjson": "{\"d\" : {\"$numberDouble\": \"1.2345678901234568e+18\"}}", - "relaxed_extjson": "{\"d\" : 1.2345678901234568E+18}" + "description": "1.2345678921232E+18", + "canonical_bson": "100000000164002a1bf5f41022b14300", + "canonical_extjson": "{\"d\" : {\"$numberDouble\": \"1.2345678921232E+18\"}}", + "relaxed_extjson": "{\"d\" : 1.2345678921232E+18}" }, { - "description": "-1.2345678901234568e+18", - "canonical_bson": "1000000001640081E97DF41022B1C300", - "canonical_extjson": "{\"d\" : {\"$numberDouble\": \"-1.2345678901234568e+18\"}}", - "relaxed_extjson": "{\"d\" : -1.2345678901234568e+18}" + "description": "-1.2345678921232E+18", + "canonical_bson": "100000000164002a1bf5f41022b1c300", + "canonical_extjson": "{\"d\" : {\"$numberDouble\": \"-1.2345678921232E+18\"}}", + "relaxed_extjson": "{\"d\" : -1.2345678921232E+18}" }, { "description": "0.0", diff --git a/test/bson_corpus/multi-type-deprecated.json b/test/bson_corpus/multi-type-deprecated.json index e804e23c8a..665f388cd4 100644 --- a/test/bson_corpus/multi-type-deprecated.json +++ b/test/bson_corpus/multi-type-deprecated.json @@ -5,10 +5,11 @@ "valid": [ { "description": "All BSON types", - "canonical_bson": "3B020000075F69640057E193D7A9CC81B4027498B50E53796D626F6C000700000073796D626F6C0002537472696E670007000000737472696E670010496E743332002A00000012496E743634002A0000000000000001446F75626C6500000000000000F0BF0542696E617279001000000003A34C38F7C3ABEDC8A37814A992AB8DB60542696E61727955736572446566696E656400050000008001020304050D436F6465000E00000066756E6374696F6E2829207B7D000F436F64655769746853636F7065001B0000000E00000066756E6374696F6E2829207B7D00050000000003537562646F63756D656E74001200000002666F6F0004000000626172000004417272617900280000001030000100000010310002000000103200030000001033000400000010340005000000001154696D657374616D7000010000002A0000000B5265676578007061747465726E0000094461746574696D6545706F6368000000000000000000094461746574696D65506F73697469766500FFFFFF7F00000000094461746574696D654E656761746976650000000080FFFFFFFF085472756500010846616C736500000C4442506F696E746572000E00000064622E636F6C6C656374696F6E0057E193D7A9CC81B4027498B1034442526566003D0000000224726566000B000000636F6C6C656374696F6E00072469640057FD71E96E32AB4225B723FB02246462000900000064617461626173650000FF4D696E6B6579007F4D61786B6579000A4E756C6C0006556E646566696E65640000", - "converted_bson": "4b020000075f69640057e193d7a9cc81b4027498b50253796d626f6c000700000073796d626f6c0002537472696e670007000000737472696e670010496e743332002a00000012496e743634002a0000000000000001446f75626c6500000000000000f0bf0542696e617279001000000003a34c38f7c3abedc8a37814a992ab8db60542696e61727955736572446566696e656400050000008001020304050d436f6465000e00000066756e6374696f6e2829207b7d000f436f64655769746853636f7065001b0000000e00000066756e6374696f6e2829207b7d00050000000003537562646f63756d656e74001200000002666f6f0004000000626172000004417272617900280000001030000100000010310002000000103200030000001033000400000010340005000000001154696d657374616d7000010000002a0000000b5265676578007061747465726e0000094461746574696d6545706f6368000000000000000000094461746574696d65506f73697469766500ffffff7f00000000094461746574696d654e656761746976650000000080ffffffff085472756500010846616c73650000034442506f696e746572002e0000000224726566000e00000064622e636f6c6c656374696f6e00072469640057e193d7a9cc81b4027498b100034442526566003d0000000224726566000b000000636f6c6c656374696f6e00072469640057fd71e96e32ab4225b723fb02246462000900000064617461626173650000ff4d696e6b6579007f4d61786b6579000a4e756c6c000a556e646566696e65640000", - "canonical_extjson": "{\"_id\": {\"$oid\": \"57e193d7a9cc81b4027498b5\"}, \"Symbol\": {\"$symbol\": \"symbol\"}, \"String\": \"string\", \"Int32\": {\"$numberInt\": \"42\"}, \"Int64\": {\"$numberLong\": \"42\"}, \"Double\": {\"$numberDouble\": \"-1.0\"}, \"Binary\": { \"$binary\" : {\"base64\": \"o0w498Or7cijeBSpkquNtg==\", \"subType\": \"03\"}}, \"BinaryUserDefined\": { \"$binary\" : {\"base64\": \"AQIDBAU=\", \"subType\": \"80\"}}, \"Code\": {\"$code\": \"function() {}\"}, \"CodeWithScope\": {\"$code\": \"function() {}\", \"$scope\": {}}, \"Subdocument\": {\"foo\": \"bar\"}, \"Array\": [{\"$numberInt\": \"1\"}, {\"$numberInt\": \"2\"}, {\"$numberInt\": \"3\"}, {\"$numberInt\": \"4\"}, {\"$numberInt\": \"5\"}], \"Timestamp\": {\"$timestamp\": {\"t\": 42, \"i\": 1}}, \"Regex\": {\"$regularExpression\": {\"pattern\": \"pattern\", \"options\": \"\"}}, \"DatetimeEpoch\": {\"$date\": {\"$numberLong\": \"0\"}}, \"DatetimePositive\": {\"$date\": {\"$numberLong\": \"2147483647\"}}, \"DatetimeNegative\": {\"$date\": {\"$numberLong\": \"-2147483648\"}}, \"True\": true, \"False\": false, \"DBPointer\": {\"$dbPointer\": {\"$ref\": \"db.collection\", \"$id\": {\"$oid\": \"57e193d7a9cc81b4027498b1\"}}}, \"DBRef\": {\"$ref\": \"collection\", \"$id\": {\"$oid\": \"57fd71e96e32ab4225b723fb\"}, \"$db\": \"database\"}, \"Minkey\": {\"$minKey\": 1}, \"Maxkey\": {\"$maxKey\": 1}, \"Null\": null, \"Undefined\": {\"$undefined\": true}}", - "converted_extjson": "{\"_id\": {\"$oid\": \"57e193d7a9cc81b4027498b5\"}, \"Symbol\": \"symbol\", \"String\": \"string\", \"Int32\": {\"$numberInt\": \"42\"}, \"Int64\": {\"$numberLong\": \"42\"}, \"Double\": {\"$numberDouble\": \"-1.0\"}, \"Binary\": { \"$binary\" : {\"base64\": \"o0w498Or7cijeBSpkquNtg==\", \"subType\": \"03\"}}, \"BinaryUserDefined\": { \"$binary\" : {\"base64\": \"AQIDBAU=\", \"subType\": \"80\"}}, \"Code\": {\"$code\": \"function() {}\"}, \"CodeWithScope\": {\"$code\": \"function() {}\", \"$scope\": {}}, \"Subdocument\": {\"foo\": \"bar\"}, \"Array\": [{\"$numberInt\": \"1\"}, {\"$numberInt\": \"2\"}, {\"$numberInt\": \"3\"}, {\"$numberInt\": \"4\"}, {\"$numberInt\": \"5\"}], \"Timestamp\": {\"$timestamp\": {\"t\": 42, \"i\": 1}}, \"Regex\": {\"$regularExpression\": {\"pattern\": \"pattern\", \"options\": \"\"}}, \"DatetimeEpoch\": {\"$date\": {\"$numberLong\": \"0\"}}, \"DatetimePositive\": {\"$date\": {\"$numberLong\": \"2147483647\"}}, \"DatetimeNegative\": {\"$date\": {\"$numberLong\": \"-2147483648\"}}, \"True\": true, \"False\": false, \"DBPointer\": {\"$ref\": \"db.collection\", \"$id\": {\"$oid\": \"57e193d7a9cc81b4027498b1\"}}, \"DBRef\": {\"$ref\": \"collection\", \"$id\": {\"$oid\": \"57fd71e96e32ab4225b723fb\"}, \"$db\": \"database\"}, \"Minkey\": {\"$minKey\": 1}, \"Maxkey\": {\"$maxKey\": 1}, \"Null\": null, \"Undefined\": null}" + "canonical_bson": "38020000075F69640057E193D7A9CC81B4027498B50E53796D626F6C000700000073796D626F6C0002537472696E670007000000737472696E670010496E743332002A00000012496E743634002A0000000000000001446F75626C6500000000000000F0BF0542696E617279001000000003A34C38F7C3ABEDC8A37814A992AB8DB60542696E61727955736572446566696E656400050000008001020304050D436F6465000E00000066756E6374696F6E2829207B7D000F436F64655769746853636F7065001B0000000E00000066756E6374696F6E2829207B7D00050000000003537562646F63756D656E74001200000002666F6F0004000000626172000004417272617900280000001030000100000010310002000000103200030000001033000400000010340005000000001154696D657374616D7000010000002A0000000B5265676578007061747465726E0000094461746574696D6545706F6368000000000000000000094461746574696D65506F73697469766500FFFFFF7F00000000094461746574696D654E656761746976650000000080FFFFFFFF085472756500010846616C736500000C4442506F696E746572000B000000636F6C6C656374696F6E0057E193D7A9CC81B4027498B1034442526566003D0000000224726566000B000000636F6C6C656374696F6E00072469640057FD71E96E32AB4225B723FB02246462000900000064617461626173650000FF4D696E6B6579007F4D61786B6579000A4E756C6C0006556E646566696E65640000", + "converted_bson": "48020000075f69640057e193d7a9cc81b4027498b50253796d626f6c000700000073796d626f6c0002537472696e670007000000737472696e670010496e743332002a00000012496e743634002a0000000000000001446f75626c6500000000000000f0bf0542696e617279001000000003a34c38f7c3abedc8a37814a992ab8db60542696e61727955736572446566696e656400050000008001020304050d436f6465000e00000066756e6374696f6e2829207b7d000f436f64655769746853636f7065001b0000000e00000066756e6374696f6e2829207b7d00050000000003537562646f63756d656e74001200000002666f6f0004000000626172000004417272617900280000001030000100000010310002000000103200030000001033000400000010340005000000001154696d657374616d7000010000002a0000000b5265676578007061747465726e0000094461746574696d6545706f6368000000000000000000094461746574696d65506f73697469766500ffffff7f00000000094461746574696d654e656761746976650000000080ffffffff085472756500010846616c73650000034442506f696e746572002b0000000224726566000b000000636f6c6c656374696f6e00072469640057e193d7a9cc81b4027498b100034442526566003d0000000224726566000b000000636f6c6c656374696f6e00072469640057fd71e96e32ab4225b723fb02246462000900000064617461626173650000ff4d696e6b6579007f4d61786b6579000a4e756c6c000a556e646566696e65640000", + "canonical_extjson": "{\"_id\": {\"$oid\": \"57e193d7a9cc81b4027498b5\"}, \"Symbol\": {\"$symbol\": \"symbol\"}, \"String\": \"string\", \"Int32\": {\"$numberInt\": \"42\"}, \"Int64\": {\"$numberLong\": \"42\"}, \"Double\": {\"$numberDouble\": \"-1.0\"}, \"Binary\": { \"$binary\" : {\"base64\": \"o0w498Or7cijeBSpkquNtg==\", \"subType\": \"03\"}}, \"BinaryUserDefined\": { \"$binary\" : {\"base64\": \"AQIDBAU=\", \"subType\": \"80\"}}, \"Code\": {\"$code\": \"function() {}\"}, \"CodeWithScope\": {\"$code\": \"function() {}\", \"$scope\": {}}, \"Subdocument\": {\"foo\": \"bar\"}, \"Array\": [{\"$numberInt\": \"1\"}, {\"$numberInt\": \"2\"}, {\"$numberInt\": \"3\"}, {\"$numberInt\": \"4\"}, {\"$numberInt\": \"5\"}], \"Timestamp\": {\"$timestamp\": {\"t\": 42, \"i\": 1}}, \"Regex\": {\"$regularExpression\": {\"pattern\": \"pattern\", \"options\": \"\"}}, \"DatetimeEpoch\": {\"$date\": {\"$numberLong\": \"0\"}}, \"DatetimePositive\": {\"$date\": {\"$numberLong\": \"2147483647\"}}, \"DatetimeNegative\": {\"$date\": {\"$numberLong\": \"-2147483648\"}}, \"True\": true, \"False\": false, \"DBPointer\": {\"$dbPointer\": {\"$ref\": \"collection\", \"$id\": {\"$oid\": \"57e193d7a9cc81b4027498b1\"}}}, \"DBRef\": {\"$ref\": \"collection\", \"$id\": {\"$oid\": \"57fd71e96e32ab4225b723fb\"}, \"$db\": \"database\"}, \"Minkey\": {\"$minKey\": 1}, \"Maxkey\": {\"$maxKey\": 1}, \"Null\": null, \"Undefined\": {\"$undefined\": true}}", + "converted_extjson": "{\"_id\": {\"$oid\": \"57e193d7a9cc81b4027498b5\"}, \"Symbol\": \"symbol\", \"String\": \"string\", \"Int32\": {\"$numberInt\": \"42\"}, \"Int64\": {\"$numberLong\": \"42\"}, \"Double\": {\"$numberDouble\": \"-1.0\"}, \"Binary\": { \"$binary\" : {\"base64\": \"o0w498Or7cijeBSpkquNtg==\", \"subType\": \"03\"}}, \"BinaryUserDefined\": { \"$binary\" : {\"base64\": \"AQIDBAU=\", \"subType\": \"80\"}}, \"Code\": {\"$code\": \"function() {}\"}, \"CodeWithScope\": {\"$code\": \"function() {}\", \"$scope\": {}}, \"Subdocument\": {\"foo\": \"bar\"}, \"Array\": [{\"$numberInt\": \"1\"}, {\"$numberInt\": \"2\"}, {\"$numberInt\": \"3\"}, {\"$numberInt\": \"4\"}, {\"$numberInt\": \"5\"}], \"Timestamp\": {\"$timestamp\": {\"t\": 42, \"i\": 1}}, \"Regex\": {\"$regularExpression\": {\"pattern\": \"pattern\", \"options\": \"\"}}, \"DatetimeEpoch\": {\"$date\": {\"$numberLong\": \"0\"}}, \"DatetimePositive\": {\"$date\": {\"$numberLong\": \"2147483647\"}}, \"DatetimeNegative\": {\"$date\": {\"$numberLong\": \"-2147483648\"}}, \"True\": true, \"False\": false, \"DBPointer\": {\"$ref\": \"collection\", \"$id\": {\"$oid\": \"57e193d7a9cc81b4027498b1\"}}, \"DBRef\": {\"$ref\": \"collection\", \"$id\": {\"$oid\": \"57fd71e96e32ab4225b723fb\"}, \"$db\": \"database\"}, \"Minkey\": {\"$minKey\": 1}, \"Maxkey\": {\"$maxKey\": 1}, \"Null\": null, \"Undefined\": null}" } ] } + diff --git a/test/bson_corpus/regex.json b/test/bson_corpus/regex.json index c62b019cdf..223802169d 100644 --- a/test/bson_corpus/regex.json +++ b/test/bson_corpus/regex.json @@ -54,11 +54,11 @@ ], "decodeErrors": [ { - "description": "embedded null in pattern", + "description": "Null byte in pattern string", "bson": "0F0000000B610061006300696D0000" }, { - "description": "embedded null in flags", + "description": "Null byte in flags string", "bson": "100000000B61006162630069006D0000" } ] diff --git a/test/bson_corpus/symbol.json b/test/bson_corpus/symbol.json index 4e46cb9511..3dd3577ebd 100644 --- a/test/bson_corpus/symbol.json +++ b/test/bson_corpus/symbol.json @@ -50,31 +50,31 @@ "decodeErrors": [ { "description": "bad symbol length: 0 (but no 0x00 either)", - "bson": "0C0000000261000000000000" + "bson": "0C0000000E61000000000000" }, { "description": "bad symbol length: -1", - "bson": "0C000000026100FFFFFFFF00" + "bson": "0C0000000E6100FFFFFFFF00" }, { "description": "bad symbol length: eats terminator", - "bson": "10000000026100050000006200620000" + "bson": "100000000E6100050000006200620000" }, { "description": "bad symbol length: longer than rest of document", - "bson": "120000000200FFFFFF00666F6F6261720000" + "bson": "120000000E00FFFFFF00666F6F6261720000" }, { "description": "symbol is not null-terminated", - "bson": "1000000002610004000000616263FF00" + "bson": "100000000E610004000000616263FF00" }, { "description": "empty symbol, but extra null", - "bson": "0E00000002610001000000000000" + "bson": "0E0000000E610001000000000000" }, { "description": "invalid UTF-8", - "bson": "0E00000002610002000000E90000" + "bson": "0E0000000E610002000000E90000" } ] } diff --git a/test/bson_corpus/timestamp.json b/test/bson_corpus/timestamp.json index c76bc2998e..6f46564a32 100644 --- a/test/bson_corpus/timestamp.json +++ b/test/bson_corpus/timestamp.json @@ -18,6 +18,11 @@ "description": "Timestamp with high-order bit set on both seconds and increment", "canonical_bson": "10000000116100FFFFFFFFFFFFFFFF00", "canonical_extjson": "{\"a\" : {\"$timestamp\" : {\"t\" : 4294967295, \"i\" : 4294967295} } }" + }, + { + "description": "Timestamp with high-order bit set on both seconds and increment (not UINT32_MAX)", + "canonical_bson": "1000000011610000286BEE00286BEE00", + "canonical_extjson": "{\"a\" : {\"$timestamp\" : {\"t\" : 4000000000, \"i\" : 4000000000} } }" } ], "decodeErrors": [ diff --git a/test/bson_corpus/top.json b/test/bson_corpus/top.json index 68b51195ab..9c649b5e3f 100644 --- a/test/bson_corpus/top.json +++ b/test/bson_corpus/top.json @@ -3,9 +3,24 @@ "bson_type": "0x00", "valid": [ { - "description": "Document with keys that start with $", + "description": "Dollar-prefixed key in top-level document", "canonical_bson": "0F00000010246B6579002A00000000", "canonical_extjson": "{\"$key\": {\"$numberInt\": \"42\"}}" + }, + { + "description": "Dollar as key in top-level document", + "canonical_bson": "0E00000002240002000000610000", + "canonical_extjson": "{\"$\": \"a\"}" + }, + { + "description": "Dotted key in top-level document", + "canonical_bson": "1000000002612E620002000000630000", + "canonical_extjson": "{\"a.b\": \"c\"}" + }, + { + "description": "Dot as key in top-level document", + "canonical_bson": "0E000000022E0002000000610000", + "canonical_extjson": "{\".\": \"a\"}" } ], "decodeErrors": [ @@ -64,28 +79,32 @@ { "description": "Document truncated mid-key", "bson": "1200000002666F" + }, + { + "description": "Null byte in document key", + "bson": "0D000000107800000100000000" } ], "parseErrors": [ { "description" : "Bad $regularExpression (extra field)", - "string" : "{\"a\" : \"$regularExpression\": {\"pattern\": \"abc\", \"options\": \"\", \"unrelated\": true}}}" + "string" : "{\"a\" : {\"$regularExpression\": {\"pattern\": \"abc\", \"options\": \"\", \"unrelated\": true}}}" }, { "description" : "Bad $regularExpression (missing options field)", - "string" : "{\"a\" : \"$regularExpression\": {\"pattern\": \"abc\"}}}" + "string" : "{\"a\" : {\"$regularExpression\": {\"pattern\": \"abc\"}}}" }, { "description": "Bad $regularExpression (pattern is number, not string)", - "string": "{\"x\" : {\"$regularExpression\" : { \"pattern\": 42, \"$options\" : \"\"}}}" + "string": "{\"x\" : {\"$regularExpression\" : { \"pattern\": 42, \"options\" : \"\"}}}" }, { "description": "Bad $regularExpression (options are number, not string)", - "string": "{\"x\" : {\"$regularExpression\" : { \"pattern\": \"a\", \"$options\" : 0}}}" + "string": "{\"x\" : {\"$regularExpression\" : { \"pattern\": \"a\", \"options\" : 0}}}" }, { "description" : "Bad $regularExpression (missing pattern field)", - "string" : "{\"a\" : \"$regularExpression\": {\"options\":\"ix\"}}}" + "string" : "{\"a\" : {\"$regularExpression\": {\"options\":\"ix\"}}}" }, { "description": "Bad $oid (number, not string)", @@ -151,6 +170,10 @@ "description": "Bad $code (type is number, not string)", "string": "{\"a\" : {\"$code\" : 42}}" }, + { + "description": "Bad $code (type is number, not string) when $scope is also present", + "string": "{\"a\" : {\"$code\" : 42, \"$scope\" : {}}}" + }, { "description": "Bad $code (extra field)", "string": "{\"a\" : {\"$code\" : \"\", \"unrelated\": true}}" @@ -195,14 +218,6 @@ "description": "Bad $date (extra field)", "string": "{\"a\" : {\"$date\" : {\"$numberLong\" : \"1356351330501\"}, \"unrelated\": true}}" }, - { - "description": "Bad DBRef (ref is number, not string)", - "string": "{\"x\" : {\"$ref\" : 42, \"$id\" : \"abc\"}}" - }, - { - "description": "Bad DBRef (db is number, not string)", - "string": "{\"x\" : {\"$ref\" : \"a\", \"$id\" : \"abc\", \"$db\" : 42}}" - }, { "description": "Bad $minKey (boolean, not integer)", "string": "{\"a\" : {\"$minKey\" : true}}" @@ -230,7 +245,22 @@ { "description": "Bad DBpointer (extra field)", "string": "{\"a\": {\"$dbPointer\": {\"a\": {\"$numberInt\": \"1\"}, \"$id\": {\"$oid\": \"56e1fc72e0c917e9c4714161\"}, \"c\": {\"$numberInt\": \"2\"}, \"$ref\": \"b\"}}}" + }, + { + "description" : "Null byte in document key", + "string" : "{\"a\\u0000\": 1 }" + }, + { + "description" : "Null byte in sub-document key", + "string" : "{\"a\" : {\"b\\u0000\": 1 }}" + }, + { + "description": "Null byte in $regularExpression pattern", + "string": "{\"a\" : {\"$regularExpression\" : { \"pattern\": \"b\\u0000\", \"options\" : \"i\"}}}" + }, + { + "description": "Null byte in $regularExpression options", + "string": "{\"a\" : {\"$regularExpression\" : { \"pattern\": \"b\", \"options\" : \"i\\u0000\"}}}" } - ] } diff --git a/test/certificates/ca.pem b/test/certificates/ca.pem index 6ac86cfcc1..24beea2d48 100644 --- a/test/certificates/ca.pem +++ b/test/certificates/ca.pem @@ -18,4 +18,4 @@ gT564CmvkUat8uXPz6olOCdwkMpJ9Sj62i0mpgXJdBfxKQ6TZ9yGz6m3jannjZpN LchB7xSAEWtqUgvNusq0dApJsf4n7jZ+oBZVaQw2+tzaMfaLqHgMwcu1FzA8UKCD sxCgIsZUs8DdxaD418Ot6nPfheOTqe24n+TTa+Z6O0W0QtnofJBx7tmAo1aEc57i 77s89pfwIJetpIlhzNSMKurCAocFCJMJLAASJFuu6dyDvPo= ------END CERTIFICATE----- \ No newline at end of file +-----END CERTIFICATE----- diff --git a/test/certificates/trusted-ca.pem b/test/certificates/trusted-ca.pem new file mode 100644 index 0000000000..a6f6f312d0 --- /dev/null +++ b/test/certificates/trusted-ca.pem @@ -0,0 +1,82 @@ +# CA bundle file used to test tlsCAFile loading for OCSP. +# Copied from the server: +# https://github.com/mongodb/mongo/blob/r4.3.4/jstests/libs/trusted-ca.pem + +# Autogenerated file, do not edit. +# Generate using jstests/ssl/x509/mkcert.py --config jstests/ssl/x509/certs.yml trusted-ca.pem +# +# CA for alternate client/server certificate chain. +-----BEGIN CERTIFICATE----- +MIIDojCCAooCBG585gswDQYJKoZIhvcNAQELBQAwfDELMAkGA1UEBhMCVVMxETAP +BgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MRAwDgYDVQQK +DAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxHzAdBgNVBAMMFlRydXN0ZWQgS2Vy +bmVsIFRlc3QgQ0EwHhcNMTkwOTI1MjMyNzQxWhcNMzkwOTI3MjMyNzQxWjB8MQsw +CQYDVQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3Jr +IENpdHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEfMB0GA1UE +AwwWVHJ1c3RlZCBLZXJuZWwgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBANlRxtpMeCGhkotkjHQqgqvO6O6hoRoAGGJlDaTVtqrjmC8nwySz +1nAFndqUHttxS3A5j4enOabvffdOcV7+Z6vDQmREF6QZmQAk81pmazSc3wOnRiRs +AhXjld7i+rhB50CW01oYzQB50rlBFu+ONKYj32nBjD+1YN4AZ2tuRlbxfx2uf8Bo +Zowfr4n9nHVcWXBLFmaQLn+88WFO/wuwYUOn6Di1Bvtkvqum0or5QeAF0qkJxfhg +3a4vBnomPdwEXCgAGLvHlB41CWG09EuAjrnE3HPPi5vII8pjY2dKKMomOEYmA+KJ +AC1NlTWdN0TtsoaKnyhMMhLWs3eTyXL7kbkCAwEAAaMxMC8wDAYDVR0TBAUwAwEB +/zAfBgNVHREEGDAWgglsb2NhbGhvc3SCCTEyNy4wLjAuMTANBgkqhkiG9w0BAQsF +AAOCAQEAQk56MO9xAhtO077COCqIYe6pYv3uzOplqjXpJ7Cph7GXwQqdFWfKls7B +cLfF/fhIUZIu5itStEkY+AIwht4mBr1F5+hZUp9KZOed30/ewoBXAUgobLipJV66 +FKg8NRtmJbiZrrC00BSO+pKfQThU8k0zZjBmNmpjxnbKZZSFWUKtbhHV1vujver6 +SXZC7R6692vLwRBMoZxhgy/FkYRdiN0U9wpluKd63eo/O02Nt6OEMyeiyl+Z3JWi +8g5iHNrBYGBbGSnDOnqV6tjEY3eq600JDWiodpA1OQheLi78pkc/VQZwof9dyBCm +6BoCskTjip/UB+vIhdPFT9sgUdgDTg== +-----END CERTIFICATE----- +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDZUcbaTHghoZKL +ZIx0KoKrzujuoaEaABhiZQ2k1baq45gvJ8Mks9ZwBZ3alB7bcUtwOY+Hpzmm7333 +TnFe/merw0JkRBekGZkAJPNaZms0nN8Dp0YkbAIV45Xe4vq4QedAltNaGM0AedK5 +QRbvjjSmI99pwYw/tWDeAGdrbkZW8X8drn/AaGaMH6+J/Zx1XFlwSxZmkC5/vPFh +Tv8LsGFDp+g4tQb7ZL6rptKK+UHgBdKpCcX4YN2uLwZ6Jj3cBFwoABi7x5QeNQlh +tPRLgI65xNxzz4ubyCPKY2NnSijKJjhGJgPiiQAtTZU1nTdE7bKGip8oTDIS1rN3 +k8ly+5G5AgMBAAECggEAS7GjLKgT88reSzUTgubHquYf1fZwMak01RjTnsVdoboy +aMJVwzPsjgo2yEptUQvuNcGmz54cg5vJaVlmPaspGveg6WGaRmswEo/MP4GK98Fo +IFKkKM2CEHO74O14XLN/w8yFA02+IdtM3X/haEFE71VxXNmwawRXIBxN6Wp4j5Fb +mPLKIspnWQ/Y/Fn799sCFAzX5mKkbCt1IEgKssgQQEm1UkvmCkcZE+mdO/ErYP8A +COO0LpM+TK6WQY2LKiteeCCiosTZFb1GO7MkXrRP5uOBZKaW5kq1R0b6PcopJPCM +OcYF0Zli6KB7oiQLdXgU2jCaxYOnuRb6RYh2l7NvAQKBgQD6CZ9TKOn/EUQtukyw +pvYTyt1hoLXqYGcbRtLc1gcC+Z2BD28hd3eD/mEUv+g/8bq/OP4wYV9X+VRvR8xN +MmfAG/sJeOCOClz1A1TyNeA+G0GZ25qWHyHQ2W4WlSG1CXQgxGzU6wo/t6wiVW5R +O4jplFVEOXznf4vmVfBJK50R2QKBgQDegGxm23jF2N5sIYDZ14oxms8bbjPz8zH6 +tiIRYNGbSzI7J4KFGY2HiBwtf1yxS22HBL69Y1WrEzGm1vm4aZG/GUwBzI79QZAO ++YFIGaIrdlv12Zm6lpJMmAWlOs9XFirC17oQEwOQFweOdQSt7F/+HMZOigdikRBV +pK+8Kfay4QKBgQDarDevHwUmkg8yftA7Xomv3aenjkoK5KzH6jTX9kbDj1L0YG8s +sbLQuVRmNUAFTH+qZUnJPh+IbQIvIHfIu+CI3u+55QFeuCl8DqHoAr5PEr9Ys/qK +eEe2w7HIBj0oe1AYqDEWNUkNWLEuhdCpMowW3CeGN1DJlX7gvyAang4MYQKBgHwM +aWNnFQxo/oiWnTnWm2tQfgszA7AMdF7s0E2UBwhnghfMzU3bkzZuwhbznQATp3rR +QG5iRU7dop7717ni0akTN3cBTu8PcHuIy3UhJXLJyDdnG/gVHnepgew+v340E58R +muB/WUsqK8JWp0c4M8R+0mjTN47ShaLZ8EgdtTbBAoGBAKOcpuDfFEMI+YJgn8zX +h0nFT60LX6Lx+zcSDY9+6J6a4n5NhC+weYCDFOGlsLka1SwHcg1xanfrLVjpH7Ok +HDJGLrSh1FP2Rq/oFxZ/OKCjonHLa8IulqD/AA+sqYRbysKNsT3Pi0554F2xFEqQ +z/C84nlT1R2uTCWIxvrnpU2h +-----END PRIVATE KEY----- +# Pre Oct 2019 trusted-ca.pem +# Transitional pending BUILD update. +-----BEGIN CERTIFICATE----- +MIIDpjCCAo6gAwIBAgIDAghHMA0GCSqGSIb3DQEBBQUAMHwxHzAdBgNVBAMTFlRy +dXN0ZWQgS2VybmVsIFRlc3QgQ0ExDzANBgNVBAsTBktlcm5lbDEQMA4GA1UEChMH +TW9uZ29EQjEWMBQGA1UEBxMNTmV3IFlvcmsgQ2l0eTERMA8GA1UECBMITmV3IFlv +cmsxCzAJBgNVBAYTAlVTMB4XDTE2MDMzMTE0NTY1NVoXDTM2MDMzMTE0NTY1NVow +fDEfMB0GA1UEAxMWVHJ1c3RlZCBLZXJuZWwgVGVzdCBDQTEPMA0GA1UECxMGS2Vy +bmVsMRAwDgYDVQQKEwdNb25nb0RCMRYwFAYDVQQHEw1OZXcgWW9yayBDaXR5MREw +DwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMwggEiMA0GCSqGSIb3DQEBAQUA +A4IBDwAwggEKAoIBAQCePFHZTydC96SlSHSyu73vw//ddaE33kPllBB9DP2L7yRF +6D/blFmno9fSM+Dfg64VfGV+0pCXPIZbpH29nzJu0DkvHzKiWK7P1zUj8rAHaX++ +d6k0yeTLFM9v+7YE9rHoANVn22aOyDvTgAyMmA0CLn+SmUy6WObwMIf9cZn97Znd +lww7IeFNyK8sWtfsVN4yRBnjr7kKN2Qo0QmWeFa7jxVQptMJQrY8k1PcyVUOgOjQ +ocJLbWLlm9k0/OMEQSwQHJ+d9weUbKjlZ9ExOrm4QuuA2tJhb38baTdAYw3Jui4f +yD6iBAGD0Jkpc+3YaWv6CBmK8NEFkYJD/gn+lJ75AgMBAAGjMTAvMAwGA1UdEwQF +MAMBAf8wHwYDVR0RBBgwFoIJbG9jYWxob3N0ggkxMjcuMC4wLjEwDQYJKoZIhvcN +AQEFBQADggEBADYikjB6iwAUs6sglwkE4rOkeMkJdRCNwK/5LpFJTWrDjBvBQCdA +Y5hlAVq8PfIYeh+wEuSvsEHXmx7W29X2+p4VuJ95/xBA6NLapwtzuiijRj2RBAOG +1EGuyFQUPTL27DR3+tfayNykDclsVDNN8+l7nt56j8HojP74P5OMHtn+6HX5+mtF +FfZMTy0mWguCsMOkZvjAskm6s4U5gEC8pYEoC0ZRbfUdyYsxZe/nrXIFguVlVPCB +XnfB/0iG9t+VH5cUVj1LP9skXTW4kXfhQmljUuo+EVBNR6n2nfTnpoC65WeAgHV4 +V+s9mJsUv2x72KtKYypqEVT0gaJ1WIN9N1s= +-----END CERTIFICATE----- diff --git a/test/change_streams/change-streams-errors.json b/test/change_streams/change-streams-errors.json deleted file mode 100644 index 00f51eb47e..0000000000 --- a/test/change_streams/change-streams-errors.json +++ /dev/null @@ -1,113 +0,0 @@ -{ - "collection_name": "test", - "database_name": "change-stream-tests", - "collection2_name": "test2", - "database2_name": "change-stream-tests-2", - "tests": [ - { - "description": "The watch helper must not throw a custom exception when executed against a single server topology, but instead depend on a server error", - "minServerVersion": "3.6.0", - "target": "collection", - "topology": [ - "single" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [], - "expectations": [], - "result": { - "error": { - "code": 40573 - } - } - }, - { - "description": "Change Stream should error when an invalid aggregation stage is passed in", - "minServerVersion": "3.6.0", - "target": "collection", - "topology": [ - "replicaset" - ], - "changeStreamPipeline": [ - { - "$unsupported": "foo" - } - ], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "z": 3 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - }, - { - "$unsupported": "foo" - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "error": { - "code": 40324 - } - } - }, - { - "description": "Change Stream should error when _id is projected out", - "minServerVersion": "4.1.11", - "target": "collection", - "topology": [ - "replicaset", - "sharded" - ], - "changeStreamPipeline": [ - { - "$project": { - "_id": 0 - } - } - ], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "z": 3 - } - } - } - ], - "result": { - "error": { - "code": 280, - "errorLabels": [ - "NonResumableChangeStreamError" - ] - } - } - } - ] -} diff --git a/test/change_streams/change-streams.json b/test/change_streams/change-streams.json deleted file mode 100644 index 4aeb2c7f70..0000000000 --- a/test/change_streams/change-streams.json +++ /dev/null @@ -1,795 +0,0 @@ -{ - "collection_name": "test", - "database_name": "change-stream-tests", - "collection2_name": "test2", - "database2_name": "change-stream-tests-2", - "tests": [ - { - "description": "$changeStream must be the first stage in a change stream pipeline sent to the server", - "minServerVersion": "3.6.0", - "target": "collection", - "topology": [ - "replicaset" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "The server returns change stream responses in the specified server response format", - "minServerVersion": "3.6.0", - "target": "collection", - "topology": [ - "replicaset" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "Executing a watch helper on a Collection results in notifications for changes to the specified collection", - "minServerVersion": "3.6.0", - "target": "collection", - "topology": [ - "replicaset" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test2", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - }, - { - "database": "change-stream-tests-2", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "y": 2 - } - } - }, - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "z": 3 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "z": { - "$numberInt": "3" - } - } - } - ] - } - }, - { - "description": "Change Stream should allow valid aggregate pipeline stages", - "minServerVersion": "3.6.0", - "target": "collection", - "topology": [ - "replicaset" - ], - "changeStreamPipeline": [ - { - "$match": { - "fullDocument.z": 3 - } - } - ], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "y": 2 - } - } - }, - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "z": 3 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - }, - { - "$match": { - "fullDocument.z": { - "$numberInt": "3" - } - } - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "z": { - "$numberInt": "3" - } - } - } - ] - } - }, - { - "description": "Executing a watch helper on a Database results in notifications for changes to all collections in the specified database.", - "minServerVersion": "3.8.0", - "target": "database", - "topology": [ - "replicaset" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test2", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - }, - { - "database": "change-stream-tests-2", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "y": 2 - } - } - }, - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "z": 3 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": { - "$numberInt": "1" - }, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test2" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - }, - { - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "z": { - "$numberInt": "3" - } - } - } - ] - } - }, - { - "description": "Executing a watch helper on a MongoClient results in notifications for changes to all collections in all databases in the cluster.", - "minServerVersion": "3.8.0", - "target": "client", - "topology": [ - "replicaset" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test2", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - }, - { - "database": "change-stream-tests-2", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "y": 2 - } - } - }, - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "z": 3 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": { - "$numberInt": "1" - }, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "command_name": "aggregate", - "database_name": "admin" - } - } - ], - "result": { - "success": [ - { - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test2" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - }, - { - "operationType": "insert", - "ns": { - "db": "change-stream-tests-2", - "coll": "test" - }, - "fullDocument": { - "y": { - "$numberInt": "2" - } - } - }, - { - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "z": { - "$numberInt": "3" - } - } - } - ] - } - }, - { - "description": "Test insert, update, replace, and delete event types", - "minServerVersion": "3.6.0", - "target": "collection", - "topology": [ - "replicaset" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - }, - { - "database": "change-stream-tests", - "collection": "test", - "name": "updateOne", - "arguments": { - "filter": { - "x": 1 - }, - "update": { - "$set": { - "x": 2 - } - } - } - }, - { - "database": "change-stream-tests", - "collection": "test", - "name": "replaceOne", - "arguments": { - "filter": { - "x": 2 - }, - "replacement": { - "x": 3 - } - } - }, - { - "database": "change-stream-tests", - "collection": "test", - "name": "deleteOne", - "arguments": { - "filter": { - "x": 3 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - }, - { - "operationType": "update", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "updateDescription": { - "updatedFields": { - "x": { - "$numberInt": "2" - } - } - } - }, - { - "operationType": "replace", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "3" - } - } - }, - { - "operationType": "delete", - "ns": { - "db": "change-stream-tests", - "coll": "test" - } - } - ] - } - }, - { - "description": "Test rename and invalidate event types", - "minServerVersion": "4.0.1", - "target": "collection", - "topology": [ - "replicaset" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "rename", - "arguments": { - "to": "test2" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "operationType": "rename", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "to": { - "db": "change-stream-tests", - "coll": "test2" - } - }, - { - "operationType": "invalidate" - } - ] - } - }, - { - "description": "Test drop and invalidate event types", - "minServerVersion": "4.0.1", - "target": "collection", - "topology": [ - "replicaset" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "drop" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "operationType": "drop", - "ns": { - "db": "change-stream-tests", - "coll": "test" - } - }, - { - "operationType": "invalidate" - } - ] - } - }, - { - "description": "Test consecutive resume", - "minServerVersion": "4.1.7", - "target": "collection", - "topology": [ - "replicaset" - ], - "changeStreamPipeline": [], - "changeStreamOptions": { - "batchSize": 1 - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "getMore" - ], - "closeConnection": true - } - }, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - }, - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 2 - } - } - }, - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 3 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": { - "batchSize": 1 - }, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - }, - { - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "2" - } - } - }, - { - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "3" - } - } - } - ] - } - } - ] -} diff --git a/test/change_streams/unified/change-streams-clusterTime.json b/test/change_streams/unified/change-streams-clusterTime.json new file mode 100644 index 0000000000..55b4ae3fbc --- /dev/null +++ b/test/change_streams/unified/change-streams-clusterTime.json @@ -0,0 +1,82 @@ +{ + "description": "change-streams-clusterTime", + "schemaVersion": "1.4", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + } + ], + "runOnRequirements": [ + { + "minServerVersion": "4.0.0", + "topologies": [ + "replicaset", + "sharded-replicaset", + "load-balanced", + "sharded" + ], + "serverless": "forbid" + } + ], + "initialData": [ + { + "collectionName": "collection0", + "databaseName": "database0", + "documents": [] + } + ], + "tests": [ + { + "description": "clusterTime is present", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "ns": { + "db": "database0", + "coll": "collection0" + }, + "clusterTime": { + "$$exists": true + } + } + } + ] + } + ] +} diff --git a/test/change_streams/unified/change-streams-disambiguatedPaths.json b/test/change_streams/unified/change-streams-disambiguatedPaths.json new file mode 100644 index 0000000000..91d8e66da2 --- /dev/null +++ b/test/change_streams/unified/change-streams-disambiguatedPaths.json @@ -0,0 +1,252 @@ +{ + "description": "disambiguatedPaths", + "schemaVersion": "1.4", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + } + ], + "runOnRequirements": [ + { + "minServerVersion": "6.1.0", + "topologies": [ + "replicaset", + "sharded-replicaset", + "load-balanced", + "sharded" + ], + "serverless": "forbid" + } + ], + "initialData": [ + { + "collectionName": "collection0", + "databaseName": "database0", + "documents": [] + } + ], + "tests": [ + { + "description": "disambiguatedPaths is not present when showExpandedEvents is false/unset", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": { + "1": 1 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "a.1": 2 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "updateDescription": { + "updatedFields": { + "$$exists": true + }, + "removedFields": { + "$$exists": true + }, + "truncatedArrays": { + "$$exists": true + }, + "disambiguatedPaths": { + "$$exists": false + } + } + } + } + ] + }, + { + "description": "disambiguatedPaths is present on updateDescription when an ambiguous path is present", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": { + "1": 1 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "a.1": 2 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "updateDescription": { + "updatedFields": { + "$$exists": true + }, + "removedFields": { + "$$exists": true + }, + "truncatedArrays": { + "$$exists": true + }, + "disambiguatedPaths": { + "a.1": [ + "a", + "1" + ] + } + } + } + } + ] + }, + { + "description": "disambiguatedPaths returns array indices as integers", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": [ + { + "1": 1 + } + ] + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "a.0.1": 2 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "updateDescription": { + "updatedFields": { + "$$exists": true + }, + "removedFields": { + "$$exists": true + }, + "truncatedArrays": { + "$$exists": true + }, + "disambiguatedPaths": { + "a.0.1": [ + "a", + { + "$$type": "int" + }, + "1" + ] + } + } + } + } + ] + } + ] +} diff --git a/test/change_streams/unified/change-streams-errors.json b/test/change_streams/unified/change-streams-errors.json new file mode 100644 index 0000000000..04fe8f04f3 --- /dev/null +++ b/test/change_streams/unified/change-streams-errors.json @@ -0,0 +1,246 @@ +{ + "description": "change-streams-errors", + "schemaVersion": "1.7", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ], + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "globalClient", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + }, + { + "database": { + "id": "globalDatabase0", + "client": "globalClient", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "globalCollection0", + "database": "globalDatabase0", + "collectionName": "collection0" + } + } + ], + "initialData": [ + { + "collectionName": "collection0", + "databaseName": "database0", + "documents": [] + } + ], + "tests": [ + { + "description": "The watch helper must not throw a custom exception when executed against a single server topology, but instead depend on a server error", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "topologies": [ + "single" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "expectError": { + "errorCode": 40573 + } + } + ] + }, + { + "description": "Change Stream should error when an invalid aggregation stage is passed in", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$unsupported": "foo" + } + ] + }, + "expectError": { + "errorCode": 40324 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + }, + { + "$unsupported": "foo" + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "Change Stream should error when _id is projected out", + "runOnRequirements": [ + { + "minServerVersion": "4.1.11", + "topologies": [ + "replicaset", + "sharded-replicaset", + "load-balanced" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 0 + } + } + ] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "z": 3 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectError": { + "errorCode": 280 + } + } + ] + }, + { + "description": "change stream errors on ElectionInProgress", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded-replicaset", + "load-balanced" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 216, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "z": 3 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectError": { + "errorCode": 216 + } + } + ] + } + ] +} diff --git a/test/change_streams/unified/change-streams-pre_and_post_images.json b/test/change_streams/unified/change-streams-pre_and_post_images.json new file mode 100644 index 0000000000..8beefb2bc8 --- /dev/null +++ b/test/change_streams/unified/change-streams-pre_and_post_images.json @@ -0,0 +1,827 @@ +{ + "description": "change-streams-pre_and_post_images", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "6.0.0", + "topologies": [ + "replicaset", + "sharded-replicaset", + "load-balanced" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "collMod", + "insert", + "update", + "getMore", + "killCursors" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "change-stream-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "change-stream-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "fullDocument:whenAvailable with changeStreamPreAndPostImages enabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": true + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocument": "whenAvailable" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "updateDescription": { + "$$type": "object" + }, + "fullDocument": { + "_id": 1, + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocument": "whenAvailable" + } + } + ] + } + } + } + ] + } + ] + }, + { + "description": "fullDocument:whenAvailable with changeStreamPreAndPostImages disabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocument": "whenAvailable" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "updateDescription": { + "$$type": "object" + }, + "fullDocument": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocument": "whenAvailable" + } + } + ] + } + } + } + ] + } + ] + }, + { + "description": "fullDocument:required with changeStreamPreAndPostImages enabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": true + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocument": "required" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "updateDescription": { + "$$type": "object" + }, + "fullDocument": { + "_id": 1, + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocument": "required" + } + } + ] + } + } + } + ] + } + ] + }, + { + "description": "fullDocument:required with changeStreamPreAndPostImages disabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocument": "required" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocument": "required" + } + } + ] + } + } + } + ] + } + ] + }, + { + "description": "fullDocumentBeforeChange:whenAvailable with changeStreamPreAndPostImages enabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": true + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocumentBeforeChange": "whenAvailable" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "updateDescription": { + "$$type": "object" + }, + "fullDocumentBeforeChange": { + "_id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocumentBeforeChange": "whenAvailable" + } + } + ] + } + } + } + ] + } + ] + }, + { + "description": "fullDocumentBeforeChange:whenAvailable with changeStreamPreAndPostImages disabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocumentBeforeChange": "whenAvailable" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "updateDescription": { + "$$type": "object" + }, + "fullDocumentBeforeChange": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocumentBeforeChange": "whenAvailable" + } + } + ] + } + } + } + ] + } + ] + }, + { + "description": "fullDocumentBeforeChange:required with changeStreamPreAndPostImages enabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": true + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocumentBeforeChange": "required" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "updateDescription": { + "$$type": "object" + }, + "fullDocumentBeforeChange": { + "_id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocumentBeforeChange": "required" + } + } + ] + } + } + } + ] + } + ] + }, + { + "description": "fullDocumentBeforeChange:required with changeStreamPreAndPostImages disabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocumentBeforeChange": "required" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocumentBeforeChange": "required" + } + } + ] + } + } + } + ] + } + ] + }, + { + "description": "fullDocumentBeforeChange:off with changeStreamPreAndPostImages enabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": true + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocumentBeforeChange": "off" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "updateDescription": { + "$$type": "object" + }, + "fullDocumentBeforeChange": { + "$$exists": false + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocumentBeforeChange": "off" + } + } + ] + } + } + } + ] + } + ] + }, + { + "description": "fullDocumentBeforeChange:off with changeStreamPreAndPostImages disabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocumentBeforeChange": "off" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "updateDescription": { + "$$type": "object" + }, + "fullDocumentBeforeChange": { + "$$exists": false + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocumentBeforeChange": "off" + } + } + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/test/change_streams/unified/change-streams-resume-allowlist.json b/test/change_streams/unified/change-streams-resume-allowlist.json new file mode 100644 index 0000000000..b4953ec736 --- /dev/null +++ b/test/change_streams/unified/change-streams-resume-allowlist.json @@ -0,0 +1,2348 @@ +{ + "description": "change-streams-resume-allowlist", + "schemaVersion": "1.7", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset", + "sharded-replicaset", + "load-balanced" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ], + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "globalClient", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + }, + { + "database": { + "id": "globalDatabase0", + "client": "globalClient", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "globalCollection0", + "database": "globalDatabase0", + "collectionName": "collection0" + } + } + ], + "tests": [ + { + "description": "change stream resumes after a network error", + "runOnRequirements": [ + { + "minServerVersion": "4.2" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "closeConnection": true + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after HostUnreachable", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 6, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after HostNotFound", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 7, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after NetworkTimeout", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 89, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after ShutdownInProgress", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 91, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after PrimarySteppedDown", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 189, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after ExceededTimeLimit", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 262, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after SocketException", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 9001, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after NotWritablePrimary", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 10107, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after InterruptedAtShutdown", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 11600, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after InterruptedDueToReplStateChange", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 11602, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after NotPrimaryNoSecondaryOk", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 13435, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after NotPrimaryOrSecondary", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 13436, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after StaleShardVersion", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 63, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after StaleEpoch", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 150, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after RetryChangeStream", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 234, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after FailedToSatisfyReadPreference", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 133, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after CursorNotFound", + "runOnRequirements": [ + { + "minServerVersion": "4.2" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 43, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + } + ] +} diff --git a/test/change_streams/unified/change-streams-resume-errorLabels.json b/test/change_streams/unified/change-streams-resume-errorLabels.json new file mode 100644 index 0000000000..f5f4505a9f --- /dev/null +++ b/test/change_streams/unified/change-streams-resume-errorLabels.json @@ -0,0 +1,2130 @@ +{ + "description": "change-streams-resume-errorlabels", + "schemaVersion": "1.7", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "topologies": [ + "replicaset", + "sharded-replicaset", + "load-balanced" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ], + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "globalClient", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + }, + { + "database": { + "id": "globalDatabase0", + "client": "globalClient", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "globalCollection0", + "database": "globalDatabase0", + "collectionName": "collection0" + } + } + ], + "tests": [ + { + "description": "change stream resumes after HostUnreachable", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 6, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after HostNotFound", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 7, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after NetworkTimeout", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 89, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after ShutdownInProgress", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 91, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after PrimarySteppedDown", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 189, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after ExceededTimeLimit", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 262, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after SocketException", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 9001, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after NotWritablePrimary", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 10107, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after InterruptedAtShutdown", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 11600, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after InterruptedDueToReplStateChange", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 11602, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after NotPrimaryNoSecondaryOk", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 13435, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after NotPrimaryOrSecondary", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 13436, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after StaleShardVersion", + "runOnRequirements": [ + { + "maxServerVersion": "6.0.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 63, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after StaleEpoch", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 150, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after RetryChangeStream", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 234, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after FailedToSatisfyReadPreference", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 133, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes if error contains ResumableChangeStreamError", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 50, + "closeConnection": false, + "errorLabels": [ + "ResumableChangeStreamError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream does not resume if error does not contain ResumableChangeStreamError", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 6, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectError": { + "errorCode": 6 + } + } + ] + } + ] +} diff --git a/test/change_streams/unified/change-streams-showExpandedEvents.json b/test/change_streams/unified/change-streams-showExpandedEvents.json new file mode 100644 index 0000000000..3eed2f534a --- /dev/null +++ b/test/change_streams/unified/change-streams-showExpandedEvents.json @@ -0,0 +1,517 @@ +{ + "description": "change-streams-showExpandedEvents", + "schemaVersion": "1.7", + "runOnRequirements": [ + { + "minServerVersion": "6.0.0", + "topologies": [ + "replicaset", + "sharded-replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ], + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + }, + { + "database": { + "id": "database1", + "client": "client0", + "databaseName": "database1" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "collection1" + } + }, + { + "database": { + "id": "shardedDb", + "client": "client0", + "databaseName": "shardedDb" + } + }, + { + "database": { + "id": "adminDb", + "client": "client0", + "databaseName": "admin" + } + }, + { + "collection": { + "id": "shardedCollection", + "database": "shardedDb", + "collectionName": "shardedCollection" + } + } + ], + "initialData": [ + { + "collectionName": "collection0", + "databaseName": "database0", + "documents": [] + } + ], + "tests": [ + { + "description": "when provided, showExpandedEvents is sent as a part of the aggregate command", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "showExpandedEvents": true + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "when omitted, showExpandedEvents is not sent as a part of the aggregate command", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "showExpandedEvents": { + "$$exists": false + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "when showExpandedEvents is true, new fields on change stream events are handled appropriately", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "foo" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "foo" + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "a": 1 + } + } + }, + { + "name": "createIndex", + "object": "collection0", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + }, + { + "name": "rename", + "object": "collection0", + "arguments": { + "to": "foo", + "dropTarget": true + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "collectionUUID": { + "$$exists": true + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "createIndexes", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "operationDescription": { + "$$exists": true + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "rename", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "to": { + "db": "database0", + "coll": "foo" + }, + "operationDescription": { + "dropTarget": { + "$$exists": true + }, + "to": { + "db": "database0", + "coll": "foo" + } + } + } + } + ] + }, + { + "description": "when showExpandedEvents is true, createIndex events are reported", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "operationType": { + "$ne": "create" + } + } + } + ], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "createIndex", + "object": "collection0", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "createIndexes" + } + } + ] + }, + { + "description": "when showExpandedEvents is true, dropIndexes events are reported", + "operations": [ + { + "name": "createIndex", + "object": "collection0", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "dropIndex", + "object": "collection0", + "arguments": { + "name": "x_1" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "dropIndexes" + } + } + ] + }, + { + "description": "when showExpandedEvents is true, create events are reported", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "foo" + } + }, + { + "name": "createChangeStream", + "object": "database0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "foo" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "create" + } + } + ] + }, + { + "description": "when showExpandedEvents is true, create events on views are reported", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "foo" + } + }, + { + "name": "createChangeStream", + "object": "database0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "foo", + "viewOn": "testName" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "create" + } + } + ] + }, + { + "description": "when showExpandedEvents is true, modify events are reported", + "operations": [ + { + "name": "createIndex", + "object": "collection0", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_2" + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "command": { + "collMod": "collection0" + }, + "commandName": "collMod" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "modify" + } + } + ] + }, + { + "description": "when showExpandedEvents is true, shardCollection events are reported", + "runOnRequirements": [ + { + "topologies": [ + "sharded-replicaset", + "sharded" + ] + } + ], + "operations": [ + { + "name": "dropCollection", + "object": "shardedDb", + "arguments": { + "collection": "shardedCollection" + } + }, + { + "name": "createCollection", + "object": "shardedDb", + "arguments": { + "collection": "shardedCollection" + } + }, + { + "name": "createChangeStream", + "object": "shardedCollection", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "runCommand", + "object": "adminDb", + "arguments": { + "command": { + "shardCollection": "shardedDb.shardedCollection", + "key": { + "_id": 1 + } + }, + "commandName": "shardCollection" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "shardCollection" + } + } + ] + } + ] +} diff --git a/test/change_streams/unified/change-streams.json b/test/change_streams/unified/change-streams.json new file mode 100644 index 0000000000..c8b60ed4e2 --- /dev/null +++ b/test/change_streams/unified/change-streams.json @@ -0,0 +1,1795 @@ +{ + "description": "change-streams", + "schemaVersion": "1.7", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ], + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "globalClient", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + }, + { + "database": { + "id": "database1", + "client": "client0", + "databaseName": "database1" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "collection1" + } + }, + { + "database": { + "id": "globalDatabase0", + "client": "globalClient", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "globalCollection0", + "database": "globalDatabase0", + "collectionName": "collection0" + } + }, + { + "database": { + "id": "globalDatabase1", + "client": "globalClient", + "databaseName": "database1" + } + }, + { + "collection": { + "id": "globalCollection1", + "database": "globalDatabase1", + "collectionName": "collection1" + } + }, + { + "collection": { + "id": "globalDb1Collection0", + "database": "globalDatabase1", + "collectionName": "collection0" + } + }, + { + "collection": { + "id": "globalDb0Collection1", + "database": "globalDatabase0", + "collectionName": "collection1" + } + } + ], + "initialData": [ + { + "collectionName": "collection0", + "databaseName": "database0", + "documents": [] + } + ], + "tests": [ + { + "description": "Test array truncation", + "runOnRequirements": [ + { + "minServerVersion": "4.7" + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1, + "array": [ + "foo", + { + "a": "bar" + }, + 1, + 2, + 3 + ] + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "array": [ + "foo", + { + "a": "bar" + } + ] + } + } + ] + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "updateDescription": { + "updatedFields": {}, + "removedFields": [], + "truncatedArrays": [ + { + "field": "array", + "newSize": 2 + } + ] + } + } + } + ] + }, + { + "description": "Test with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "comment": { + "name": "test1" + } + }, + "saveResultAsEntity": "changeStream0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "pipeline": [ + { + "$changeStream": {} + } + ], + "comment": { + "name": "test1" + } + } + } + } + ] + } + ] + }, + { + "description": "Test with document comment - pre 4.4", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "comment": { + "name": "test1" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "pipeline": [ + { + "$changeStream": {} + } + ], + "comment": { + "name": "test1" + } + } + } + } + ] + } + ] + }, + { + "description": "Test with string comment", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "comment": "comment" + }, + "saveResultAsEntity": "changeStream0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "pipeline": [ + { + "$changeStream": {} + } + ], + "comment": "comment" + } + } + } + ] + } + ] + }, + { + "description": "Test that comment is set on getMore", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "comment": { + "key": "value" + } + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "pipeline": [ + { + "$changeStream": {} + } + ], + "comment": { + "key": "value" + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "collection0", + "documents": [ + { + "_id": 1, + "a": 1 + } + ] + } + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection0", + "comment": { + "key": "value" + } + }, + "commandName": "getMore", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "Test that comment is not set on getMore - pre 4.4", + "runOnRequirements": [ + { + "maxServerVersion": "4.3.99" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "comment": "comment" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "pipeline": [ + { + "$changeStream": {} + } + ], + "comment": "comment" + } + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "collection0", + "documents": [ + { + "_id": 1, + "a": 1 + } + ] + } + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection0", + "comment": { + "$$exists": false + } + }, + "commandName": "getMore", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "to field is set in a rename change event", + "runOnRequirements": [ + { + "minServerVersion": "4.0.1" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "collection1" + } + }, + { + "name": "rename", + "object": "collection0", + "arguments": { + "to": "collection1" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "rename", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "to": { + "db": "database0", + "coll": "collection1" + } + } + } + ] + }, + { + "description": "Test unknown operationType MUST NOT err", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "operationType": "addedInFutureMongoDBVersion", + "ns": 1 + } + } + ] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "addedInFutureMongoDBVersion", + "ns": { + "db": "database0", + "coll": "collection0" + } + } + } + ] + }, + { + "description": "Test newField added in response MUST NOT err", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "operationType": 1, + "ns": 1, + "newField": "newFieldValue" + } + } + ] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "newField": "newFieldValue" + } + } + ] + }, + { + "description": "Test new structure in ns document MUST NOT err", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "maxServerVersion": "5.2.99" + }, + { + "minServerVersion": "6.0" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "operationType": "insert", + "ns.viewOn": "db.coll" + } + } + ] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "viewOn": "db.coll" + } + } + } + ] + }, + { + "description": "Test modified structure in ns document MUST NOT err", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "operationType": "insert", + "ns": { + "db": "$ns.db", + "coll": "$ns.coll", + "viewOn": "db.coll" + } + } + } + ] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0", + "viewOn": "db.coll" + } + } + } + ] + }, + { + "description": "Test server error on projecting out _id", + "runOnRequirements": [ + { + "minServerVersion": "4.2" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 0 + } + } + ] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectError": { + "errorCode": 280, + "errorCodeName": "ChangeStreamFatalError", + "errorLabelsContain": [ + "NonResumableChangeStreamError" + ] + } + } + ] + }, + { + "description": "Test projection in change stream returns expected fields", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "optype": "$operationType", + "ns": 1, + "newField": "value" + } + } + ] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "optype": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "newField": "value" + } + } + ] + }, + { + "description": "$changeStream must be the first stage in a change stream pipeline sent to the server", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "The server returns change stream responses in the specified server response format", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ] + }, + { + "description": "Executing a watch helper on a Collection results in notifications for changes to the specified collection", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalDb0Collection1", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "insertOne", + "object": "globalDb1Collection0", + "arguments": { + "document": { + "y": 2 + } + } + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "z": 3 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "z": 3, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "Change Stream should allow valid aggregate pipeline stages", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "fullDocument.z": 3 + } + } + ] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "y": 2 + } + } + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "z": 3 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "z": 3, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + }, + { + "$match": { + "fullDocument.z": 3 + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "Executing a watch helper on a Database results in notifications for changes to all collections in the specified database.", + "runOnRequirements": [ + { + "minServerVersion": "3.8.0" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "database0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalDb0Collection1", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "insertOne", + "object": "globalDb1Collection0", + "arguments": { + "document": { + "y": 2 + } + } + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "z": 3 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection1" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "z": 3, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "Executing a watch helper on a MongoClient results in notifications for changes to all collections in all databases in the cluster.", + "runOnRequirements": [ + { + "minServerVersion": "3.8.0" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "client0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalDb0Collection1", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "insertOne", + "object": "globalDb1Collection0", + "arguments": { + "document": { + "y": 2 + } + } + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "z": 3 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection1" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database1", + "coll": "collection0" + }, + "fullDocument": { + "y": 2, + "_id": { + "$$exists": true + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "z": 3, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "Test insert, update, replace, and delete event types", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "updateOne", + "object": "globalCollection0", + "arguments": { + "filter": { + "x": 1 + }, + "update": { + "$set": { + "x": 2 + } + } + } + }, + { + "name": "replaceOne", + "object": "globalCollection0", + "arguments": { + "filter": { + "x": 2 + }, + "replacement": { + "x": 3 + } + } + }, + { + "name": "deleteOne", + "object": "globalCollection0", + "arguments": { + "filter": { + "x": 3 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "updateDescription": { + "updatedFields": { + "x": 2 + }, + "removedFields": [], + "truncatedArrays": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "replace", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 3, + "_id": { + "$$exists": true + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "delete", + "ns": { + "db": "database0", + "coll": "collection0" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "Test rename and invalidate event types", + "runOnRequirements": [ + { + "minServerVersion": "4.0.1" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "collection1" + } + }, + { + "name": "rename", + "object": "globalCollection0", + "arguments": { + "to": "collection1" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "rename", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "to": { + "db": "database0", + "coll": "collection1" + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "invalidate" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "Test drop and invalidate event types", + "runOnRequirements": [ + { + "minServerVersion": "4.0.1" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "collection0" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "drop", + "ns": { + "db": "database0", + "coll": "collection0" + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "invalidate" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "Test consecutive resume", + "runOnRequirements": [ + { + "minServerVersion": "4.1.7" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "getMore" + ], + "closeConnection": true + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "batchSize": 1 + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 2 + } + } + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 3 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 2, + "_id": { + "$$exists": true + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 3, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": { + "batchSize": 1 + }, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "Test wallTime field is set in a change event", + "runOnRequirements": [ + { + "minServerVersion": "6.0.0" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "wallTime": { + "$$exists": true + } + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/corpus/corpus-encrypted.json b/test/client-side-encryption/corpus/corpus-encrypted.json index 998b058b0f..1b72aa8a39 100644 --- a/test/client-side-encryption/corpus/corpus-encrypted.json +++ b/test/client-side-encryption/corpus/corpus-encrypted.json @@ -4021,5 +4021,5495 @@ "subType": "06" } } + }, + "azure_double_rand_auto_id": { + "kms": "azure", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAB0S2kOZe54q6iZqeTLndkX+kehTKtb30jTP7FS+Zx+cxhFs626OrGY+jrH41cLfroCccacyNHUZFRinfqZPNOyw==", + "subType": "06" + } + } + }, + "azure_double_rand_auto_altname": { + "kms": "azure", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAABYViH7PLjCIdmTibW9dGCJADwXx2dRSMYxEmulPu89clAoeLDa8pwJ7YxLFQCcTGmZRfmp58dDDAzV8tyyE8QMg==", + "subType": "06" + } + } + }, + "azure_double_rand_explicit_id": { + "kms": "azure", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAABeRahSj4pniBp0rLIEZE8MdeyiIKcYuTZiuGzGiXbFbntEPow88DFHIBSxbMGR7p/8jCpPL+GqBwFkPkafXbMzg==", + "subType": "06" + } + } + }, + "azure_double_rand_explicit_altname": { + "kms": "azure", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAABdaa3vKtO4cAEUjYJfOPl1KbbgeWtphfUuJd6MxR9VReNSf1jc+kONwmkPVQs2WyZ1n+TSQMGRoBp1nHRttDdTg==", + "subType": "06" + } + } + }, + "azure_double_det_explicit_id": { + "kms": "azure", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDouble": "1.2339999999999999858" + } + }, + "azure_double_det_explicit_altname": { + "kms": "azure", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDouble": "1.2339999999999999858" + } + }, + "azure_string_rand_auto_id": { + "kms": "azure", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAACeoztcDg9oZ7ixHinReWQTrAumpsfyb0E1s3BGOFHgBCi1tW79CEXfqN8riFRc1YeRTlN4k5ShgHaBWBlax+XoQ==", + "subType": "06" + } + } + }, + "azure_string_rand_auto_altname": { + "kms": "azure", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAACov9cXQvDHeKOS5Gxcxa8vdAcTsTXDYgUucGzsCyh4TnTWKGQEVk3DHndUXX569TKCjq5QsC//oWEwweCn1nZ4g==", + "subType": "06" + } + } + }, + "azure_string_rand_explicit_id": { + "kms": "azure", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAACKU5qTdMdO0buQ/37ZRANUAAafcsoNMOTxJsDOfkqUb+/kRgM1ePlwVvk4EJiAGhJ/4SEmEOpwv05TT3PxGur2Q==", + "subType": "06" + } + } + }, + "azure_string_rand_explicit_altname": { + "kms": "azure", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAACX/ODKGHUyAKxoJ/c/3lEDBTc+eP/VS8OHrLhYoP96McpnFSgYi5jfUwvrFYa715fkass4N0nAHE6TzoGTYyk6Q==", + "subType": "06" + } + } + }, + "azure_string_det_auto_id": { + "kms": "azure", + "type": "string", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAACmVI7YK4JLOzutEdQ79he817Vk5EDP/3hXwOlGmERZCtp8J8HcqClhV+pyvRLGbwmlh12fbSs9nEp7mrobQm9wA==", + "subType": "06" + } + } + }, + "azure_string_det_explicit_id": { + "kms": "azure", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAACmVI7YK4JLOzutEdQ79he817Vk5EDP/3hXwOlGmERZCtp8J8HcqClhV+pyvRLGbwmlh12fbSs9nEp7mrobQm9wA==", + "subType": "06" + } + } + }, + "azure_string_det_explicit_altname": { + "kms": "azure", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAACmVI7YK4JLOzutEdQ79he817Vk5EDP/3hXwOlGmERZCtp8J8HcqClhV+pyvRLGbwmlh12fbSs9nEp7mrobQm9wA==", + "subType": "06" + } + } + }, + "azure_object_rand_auto_id": { + "kms": "azure", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAADWkZMsfCo4dOPMH1RXC7GkZFt1RCjJf0vaLDA09ih1Jl47SOetZELQ7B1TQjRQitktzrfD43jk8Fn4J5ZYZu1qQ==", + "subType": "06" + } + } + }, + "azure_object_rand_auto_altname": { + "kms": "azure", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAADJFMymfstltZP1oAqj4bgbCk8uLGtCd12eLqvSq0ZO+JDvls7PAovwmoWwigHunP8BBXT8sLydK+jn1sHfnhrlw==", + "subType": "06" + } + } + }, + "azure_object_rand_explicit_id": { + "kms": "azure", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAADCen+XrLYKg7gIVubVfdbQwuJ0mFHxhSUUyyBWj4RCeLeLUYXckboPGixXWB9XdwcOnInfF9u6qvktY67GtYASQ==", + "subType": "06" + } + } + }, + "azure_object_rand_explicit_altname": { + "kms": "azure", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAADnUyp/7eLmxxxOdsP+mNuJABK4PQoKFWDAY7lDrH6MYa03ryASOihPZWYZWXZLrbAf7cQQhElEkKqKwY8+NXgqg==", + "subType": "06" + } + } + }, + "azure_object_det_explicit_id": { + "kms": "azure", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "azure_object_det_explicit_altname": { + "kms": "azure", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "azure_array_rand_auto_id": { + "kms": "azure", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAEtk14WyoatZcNPlg3y/XJNsBt6neFJeQwR06B9rMGV58oIsmeE5zMtUOBYTgzlnwyKpqI/XVAg8s1VxvsrvGCyLVPwGVyDztwtMgVSW6QM3s=", + "subType": "06" + } + } + }, + "azure_array_rand_auto_altname": { + "kms": "azure", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAERTO63J4Nj1BpFlqVduA2IrAiGoV4jEOH3FnFgx7ZP7da/YBmLX/bc1EqdpC8v4faHxp74iU0xAB0yW4WgySDX7rriL5cw9sMpqgLRaBxGug=", + "subType": "06" + } + } + }, + "azure_array_rand_explicit_id": { + "kms": "azure", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAEs09qQdNVwh+KFqKPREQkw0XFdRNHAvjYJzs5MDE9+QxvtKlmVKSK3wkxDdCrcH4r7ePV2nCy2h1IHYqaDnnt4s5dSawI2l88iTT+bBcCSrU=", + "subType": "06" + } + } + }, + "azure_array_rand_explicit_altname": { + "kms": "azure", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAEaQ/YL50up4YIMJuVJSiAP06IQ+YjdKLIfkN/prbOZMiXErcD1Vq1hwGhfGdpEsLVu8E7IhJb4wakVC/2dLZoRP95az6HqRRauNNZAIQMKfY=", + "subType": "06" + } + } + }, + "azure_array_det_explicit_id": { + "kms": "azure", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "azure_array_det_explicit_altname": { + "kms": "azure", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "azure_binData=00_rand_auto_id": { + "kms": "azure", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAFl/leuLAHf1p6aRKHdFyN9FM6MW2XzBemql2xQgqkwJ6YOQXW6Pu/aI1scXVOrvrSu3+wBvByjHu++1AqFgzZRQ==", + "subType": "06" + } + } + }, + "azure_binData=00_rand_auto_altname": { + "kms": "azure", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAF4Nq/LwyufT/mx0LtFSkupNHTuyjbr4yUy1N5/37XhkpqZ1e4sWCHGNaTDEm5+cvdnbqZ/MMkBv855dc8N7vnGA==", + "subType": "06" + } + } + }, + "azure_binData=00_rand_explicit_id": { + "kms": "azure", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAFv1Kbv54uXJ76Ih63vtmszQtzkXqDlv8LDCFO3sjzu70+tgRXOhLm3J8uZpwoiNkgM6oNLn0en7tnEekYB9++CA==", + "subType": "06" + } + } + }, + "azure_binData=00_rand_explicit_altname": { + "kms": "azure", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAFgcYC1n7cGGXpv0qf1Kb8t9y/6kbhscGt2QJkQpAiqadFPPYDU/wwaKdDz94NpAHMZizUbhf9tvZ3UXl1bozhDA==", + "subType": "06" + } + } + }, + "azure_binData=00_det_auto_id": { + "kms": "azure", + "type": "binData=00", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAFvswfP3+jgia6rAyrypvbso3Xm4d7MEgJRUCWFYzA+9ov++vmeirgoTp/rFavTNOPb+61fvl1WKbVwrgODusaMg==", + "subType": "06" + } + } + }, + "azure_binData=00_det_explicit_id": { + "kms": "azure", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAFvswfP3+jgia6rAyrypvbso3Xm4d7MEgJRUCWFYzA+9ov++vmeirgoTp/rFavTNOPb+61fvl1WKbVwrgODusaMg==", + "subType": "06" + } + } + }, + "azure_binData=00_det_explicit_altname": { + "kms": "azure", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAFvswfP3+jgia6rAyrypvbso3Xm4d7MEgJRUCWFYzA+9ov++vmeirgoTp/rFavTNOPb+61fvl1WKbVwrgODusaMg==", + "subType": "06" + } + } + }, + "azure_binData=04_rand_auto_id": { + "kms": "azure", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAFMzMC3BLn/zWE9dxpcD8G0h4aifSY0zSHS9xTVJXgq21s2WU++Ov2UvHatVozmtZltsUN9JvSWqOBQRkFsrXvI7bc4lYfOoOmfpTHFcRDA/c=", + "subType": "06" + } + } + }, + "azure_binData=04_rand_auto_altname": { + "kms": "azure", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAFDlBN5hUTcjamOg/sgyeG0S52kphsjUgvlpuqHYz6VVdLtZ69cGHOVqqyml3x2rVqWUZJjd4ZodOhlwWq9p+i5IYNot2QaBvi8NZSaiThTc0=", + "subType": "06" + } + } + }, + "azure_binData=04_rand_explicit_id": { + "kms": "azure", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAFjvS2ozJuAL3rCvyBpraVtgL91OMdiskmgYnyfKlzd8EhYLd1cL4yxnTUjRXx+W+p8uN0/QZo+mynhcWnwcq83raY+I1HftSTx+S6rZ0qyDM=", + "subType": "06" + } + } + }, + "azure_binData=04_rand_explicit_altname": { + "kms": "azure", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAFqUMd/I0yOdy5W4THvFc6yrgSzB6arkRs/06b0M9Ii+QtAY6vbz+/aJ0Iy3Jm8TahC1wOZVmTj5luQpr+PHZMCEAFadv+0K/Nsx6xVhAh9gg=", + "subType": "06" + } + } + }, + "azure_binData=04_det_auto_id": { + "kms": "azure", + "type": "binData=04", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAFmN+KMrERGmfmue8/hG4D+ZcGzxC2HntdYBLjEolzvS9FV5JH/adxyUAnMpyL8FNznARL51rbv/G1nXPn9mPabsQ4BtWEAQbHx9TiXd+xbB0=", + "subType": "06" + } + } + }, + "azure_binData=04_det_explicit_id": { + "kms": "azure", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAFmN+KMrERGmfmue8/hG4D+ZcGzxC2HntdYBLjEolzvS9FV5JH/adxyUAnMpyL8FNznARL51rbv/G1nXPn9mPabsQ4BtWEAQbHx9TiXd+xbB0=", + "subType": "06" + } + } + }, + "azure_binData=04_det_explicit_altname": { + "kms": "azure", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAFmN+KMrERGmfmue8/hG4D+ZcGzxC2HntdYBLjEolzvS9FV5JH/adxyUAnMpyL8FNznARL51rbv/G1nXPn9mPabsQ4BtWEAQbHx9TiXd+xbB0=", + "subType": "06" + } + } + }, + "azure_undefined_rand_explicit_id": { + "kms": "azure", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "azure_undefined_rand_explicit_altname": { + "kms": "azure", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "azure_undefined_det_explicit_id": { + "kms": "azure", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "azure_undefined_det_explicit_altname": { + "kms": "azure", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "azure_objectId_rand_auto_id": { + "kms": "azure", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAH3sYVJpCKi310YxndMwm5ltEbbiRO1RwZxxeEkzI8tptbNXC8t7RkrT8VSJZ43wbGYCiqH5RZy9v8pYwtUm4STw==", + "subType": "06" + } + } + }, + "azure_objectId_rand_auto_altname": { + "kms": "azure", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAHD7agzVEc0JwesHHhkpGYIDAHQ+3Hc691kqic6YmVvK2N45fD5aRKftaZNs5OxSj3tNHSo7lQ+DVtPj8uSSpsVg==", + "subType": "06" + } + } + }, + "azure_objectId_rand_explicit_id": { + "kms": "azure", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAHEgKgy2mpMLpfeEWqbvQOaRZAy+cEGXGon3e53/JoH6dZneEyyt4ZrcrK6uRqyUPWX0q104JbCYxfbtHtdzWgPQ==", + "subType": "06" + } + } + }, + "azure_objectId_rand_explicit_altname": { + "kms": "azure", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAHqSv6Nruw3TIi7y0FPRjSfnJmWSdv5XMhAtnHNkT8MVuHeM32ayo0yc8dTA1wlkRtAI5JrGxTfERCXYuCojvvXg==", + "subType": "06" + } + } + }, + "azure_objectId_det_auto_id": { + "kms": "azure", + "type": "objectId", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAHcPRjIOyLDUJCDcdWkUySKCFS2AFkIa1OQyQAfC3Zh5HwJ1O7j2o+iYKRerhbni8lBiZH7EUMm1JcxM99lLC5jQ==", + "subType": "06" + } + } + }, + "azure_objectId_det_explicit_id": { + "kms": "azure", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAHcPRjIOyLDUJCDcdWkUySKCFS2AFkIa1OQyQAfC3Zh5HwJ1O7j2o+iYKRerhbni8lBiZH7EUMm1JcxM99lLC5jQ==", + "subType": "06" + } + } + }, + "azure_objectId_det_explicit_altname": { + "kms": "azure", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAHcPRjIOyLDUJCDcdWkUySKCFS2AFkIa1OQyQAfC3Zh5HwJ1O7j2o+iYKRerhbni8lBiZH7EUMm1JcxM99lLC5jQ==", + "subType": "06" + } + } + }, + "azure_bool_rand_auto_id": { + "kms": "azure", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAIYVWPvzSmiCs9LwRlv/AoQWhaS5mzoKX4W26M5eg/gPjOZbEVYOV80pWMxCcZWRAyV/NDWDUmKtRQDMU9b8lCJw==", + "subType": "06" + } + } + }, + "azure_bool_rand_auto_altname": { + "kms": "azure", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAIsAB01Ugqtw4T9SkuJBQN1y/ewpRAyz0vjFPdKI+jmPMmaXpMlXDJU8ZbTKm/nh6sjJCFcY5oZJ83ylbp2gHc6w==", + "subType": "06" + } + } + }, + "azure_bool_rand_explicit_id": { + "kms": "azure", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAIr8/qFd564X1mqHEhB0y7bzGFdrHuw+Gk45nXla3VvGHzeIJy6j2Wdl0uziWslMmBvNp8WweW+jQ6E2Fu7SiojQ==", + "subType": "06" + } + } + }, + "azure_bool_rand_explicit_altname": { + "kms": "azure", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAIWsca5FAnS2zhHnmKmexvvXMTgsZZ7uAFHnjQassUcay6mvIWH4hOnGiRxt5Zm0wO4S6cZq+PZrmEH5/n9rJcJQ==", + "subType": "06" + } + } + }, + "azure_bool_det_explicit_id": { + "kms": "azure", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": true + }, + "azure_bool_det_explicit_altname": { + "kms": "azure", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": true + }, + "azure_date_rand_auto_id": { + "kms": "azure", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAJwKo7XW5daIFlwY1mDAnJdHlcUgF+74oViL28hQGhde63pkPyyS6lPkYrc1gcCK5DL7PwsSX4Vb9SsNAG9860xw==", + "subType": "06" + } + } + }, + "azure_date_rand_auto_altname": { + "kms": "azure", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAJYZdWIqvqTztGKJkSASMEOjyrUFKnYql8fMIEzfEZWx2BYsIkxxOUUUCASg/Jsn09fTLVQ7yLD+LwycuI2uaXsw==", + "subType": "06" + } + } + }, + "azure_date_rand_explicit_id": { + "kms": "azure", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAJuWzKqi3KV8GbGGnT7i9N4BACUuNjt5AgKsjWIfrWRXK1+jRQFq0bYlVWaliT9CNIygL2aTF0H4eHl55PAI84MQ==", + "subType": "06" + } + } + }, + "azure_date_rand_explicit_altname": { + "kms": "azure", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAJ5JTtTuP4zTnEbaVlS/W59SrZ08LOC4ZIl+h+H4RnfHUfBXDwUou+APolVaYko+VZMKecrikdPeewgzWaqazJ1g==", + "subType": "06" + } + } + }, + "azure_date_det_auto_id": { + "kms": "azure", + "type": "date", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAJCREIp/SPolAZcVU1iOmaJaN2tFId5HhrjNmhp6xhA1AIPLnN+U7TAqesxFN7iebR9fXI5fZxYNgyWqQC1rqUJw==", + "subType": "06" + } + } + }, + "azure_date_det_explicit_id": { + "kms": "azure", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAJCREIp/SPolAZcVU1iOmaJaN2tFId5HhrjNmhp6xhA1AIPLnN+U7TAqesxFN7iebR9fXI5fZxYNgyWqQC1rqUJw==", + "subType": "06" + } + } + }, + "azure_date_det_explicit_altname": { + "kms": "azure", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAJCREIp/SPolAZcVU1iOmaJaN2tFId5HhrjNmhp6xhA1AIPLnN+U7TAqesxFN7iebR9fXI5fZxYNgyWqQC1rqUJw==", + "subType": "06" + } + } + }, + "azure_null_rand_explicit_id": { + "kms": "azure", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "azure_null_rand_explicit_altname": { + "kms": "azure", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "azure_null_det_explicit_id": { + "kms": "azure", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "azure_null_det_explicit_altname": { + "kms": "azure", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "azure_regex_rand_auto_id": { + "kms": "azure", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAALsMm3W2ogEiI6m0l8dS5Xhqnw+vMBvN1EesOTqAZOk4tQleX6fWARwUUnjFxbuejU7ISb50fc/Ul+ntL9z/2nHQ==", + "subType": "06" + } + } + }, + "azure_regex_rand_auto_altname": { + "kms": "azure", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAALITQNQI0hfCeMTxH0Hce1Cf5tinQG+Bq8EolUACvxUUQcDqIXfFXn19tV/Qyj4lIdnnwh/18hiswgEpJRK7uLGw==", + "subType": "06" + } + } + }, + "azure_regex_rand_explicit_id": { + "kms": "azure", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAALw/1QI/bKeiGUrrtC+yXOTvxZ2mJjSelPPGOm1mge0ws8DsX0DPHmo6MjhnRO4u0c/LWiE3hwHG2rYjAFlFXZ5A==", + "subType": "06" + } + } + }, + "azure_regex_rand_explicit_altname": { + "kms": "azure", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAL6Sl58UfFCHCZzWIB4r19/ZjeSRAoWeTFCFedKiwyR8/xnL+8jzXK/9+vTIspP6j35lFapr+f4iBNB9WjdpYNKA==", + "subType": "06" + } + } + }, + "azure_regex_det_auto_id": { + "kms": "azure", + "type": "regex", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAALxshM91Tsql/8kPe3dC16oP36XSUIN6godiRVIJLJ+NAwYtEkThthQsln7CrkIxIx6npN6A/hw1CBJERS/cqWhw==", + "subType": "06" + } + } + }, + "azure_regex_det_explicit_id": { + "kms": "azure", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAALxshM91Tsql/8kPe3dC16oP36XSUIN6godiRVIJLJ+NAwYtEkThthQsln7CrkIxIx6npN6A/hw1CBJERS/cqWhw==", + "subType": "06" + } + } + }, + "azure_regex_det_explicit_altname": { + "kms": "azure", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAALxshM91Tsql/8kPe3dC16oP36XSUIN6godiRVIJLJ+NAwYtEkThthQsln7CrkIxIx6npN6A/hw1CBJERS/cqWhw==", + "subType": "06" + } + } + }, + "azure_dbPointer_rand_auto_id": { + "kms": "azure", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAMaAd1v/XCYM2Kzi/f4utR6aHOFORmzZ17EepEjkn5IeKshktUpPWjI/dBwSunn5Qxx2zI3nm06c3SDvp6tw8qb7u4qXjLQYhlsQ0bHvvm+vE=", + "subType": "06" + } + } + }, + "azure_dbPointer_rand_auto_altname": { + "kms": "azure", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAM6VNjkN9bMIzfC7AX0ZhOEXPpyPE0nzYq3c5TNHrgeGWdZDR9GVdbO9t55zQrQJJ2Mmevh8c0WaAUV+YODv7ty6TDBsPbaKWWqMzu/v9RXHo=", + "subType": "06" + } + } + }, + "azure_dbPointer_rand_explicit_id": { + "kms": "azure", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAM66tywuMhwdyUjxfl7EOdKHNCLeIPnct3PgKrAKlOQFjiNQUIA2ShVy0qYpJcvvFsuQ5e8Bjr0IqeBc8mC7n4euRSM1UXpLqI5XHgXMMaYpI=", + "subType": "06" + } + } + }, + "azure_dbPointer_rand_explicit_altname": { + "kms": "azure", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAMtPQEbZ4gWoSYjVZLd5X6j0XxutWY1Ecrys2ErKRgZaxP0uGe8uw0cnr2Z5PYylaYmsSicLwD1PwWY42PKmaGBDraHmdfqDOPvrNxhBrfU/E=", + "subType": "06" + } + } + }, + "azure_dbPointer_det_auto_id": { + "kms": "azure", + "type": "dbPointer", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAMxUcVqq6RpAUCv08qGkmjuwVAIgLeYyh7xZnMeCYVGmhJKIP1Zdt1SvRGRV0jzwCQmXgxNd04adRwJnG/PRQIsL9aH3ilJgEnUbOo1nqR7yw=", + "subType": "06" + } + } + }, + "azure_dbPointer_det_explicit_id": { + "kms": "azure", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAMxUcVqq6RpAUCv08qGkmjuwVAIgLeYyh7xZnMeCYVGmhJKIP1Zdt1SvRGRV0jzwCQmXgxNd04adRwJnG/PRQIsL9aH3ilJgEnUbOo1nqR7yw=", + "subType": "06" + } + } + }, + "azure_dbPointer_det_explicit_altname": { + "kms": "azure", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAMxUcVqq6RpAUCv08qGkmjuwVAIgLeYyh7xZnMeCYVGmhJKIP1Zdt1SvRGRV0jzwCQmXgxNd04adRwJnG/PRQIsL9aH3ilJgEnUbOo1nqR7yw=", + "subType": "06" + } + } + }, + "azure_javascript_rand_auto_id": { + "kms": "azure", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAANWXPb5z3a0S7F26vkmBF3fV+oXYUj15OEtnSlXlUrc+gbhbPDxSvCPnTBEy5sNu4ndkvEZZxYgZInkF2q4rhlfQ==", + "subType": "06" + } + } + }, + "azure_javascript_rand_auto_altname": { + "kms": "azure", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAANN4mcwLz/J4eOUknhVsy6kdF1ThDP8cx6dNpOwJWAiyPHEsn+i6JmMTlfQMBrUp9HB/u3R+jLO5yz4XgLUKE8Tw==", + "subType": "06" + } + } + }, + "azure_javascript_rand_explicit_id": { + "kms": "azure", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAANJ+t5Z8hSQaoNzszzkWndAo4A0avDf9bKFa7euznz8ZYInnl9RUVqWMyxjSuIotAvTyYSJzxh+w2hKCgVf+MjEA==", + "subType": "06" + } + } + }, + "azure_javascript_rand_explicit_altname": { + "kms": "azure", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAANRLOQFpmkEg/KdWMmaurkNtUhy45rgtoipc9kQz6olgDWiMim81XC0AW5cOvjbHXL3w7Du28Kwdsp4j0PTTXHUQ==", + "subType": "06" + } + } + }, + "azure_javascript_det_auto_id": { + "kms": "azure", + "type": "javascript", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAANUrNUS/7/dmKVWBd+2JKGEn1hxbFSyu3p5sDNatukG2m16t4WwxzmYAg8PuQbAxekprs7iaLA+7D2Kn3ZuMSQOw==", + "subType": "06" + } + } + }, + "azure_javascript_det_explicit_id": { + "kms": "azure", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAANUrNUS/7/dmKVWBd+2JKGEn1hxbFSyu3p5sDNatukG2m16t4WwxzmYAg8PuQbAxekprs7iaLA+7D2Kn3ZuMSQOw==", + "subType": "06" + } + } + }, + "azure_javascript_det_explicit_altname": { + "kms": "azure", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAANUrNUS/7/dmKVWBd+2JKGEn1hxbFSyu3p5sDNatukG2m16t4WwxzmYAg8PuQbAxekprs7iaLA+7D2Kn3ZuMSQOw==", + "subType": "06" + } + } + }, + "azure_symbol_rand_auto_id": { + "kms": "azure", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAORMcgtQSU+/2Qlq57neRrVuAFSeSwkqdo+z1fh6IKjyEzhCy+u5bTzSzTopyKJQTCUZA2mSpRezWkM87oiGfhMFkBRVreMcE62eH+BLlgUaM=", + "subType": "06" + } + } + }, + "azure_symbol_rand_auto_altname": { + "kms": "azure", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAOIKlAw/A3nwHn0tO2cYtJx0azB8MGmXtt+bRptzn8yHlUSpMpYaiU0ssBBiLkmMLAITYebLqDk3NHESyP7PvbSfX1E2XVn2Nf694ZqPWMec8=", + "subType": "06" + } + } + }, + "azure_symbol_rand_explicit_id": { + "kms": "azure", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAO8SXW76AEr/6D6zyP1RYwmwdVM2AINaXZn3Ipy+fynWTUV6XIPIRR7xMTttNo2zlh7fgXDZ28PmjooGlQzn0q0JVQmXPCIPM3aqAmMcgyuqg=", + "subType": "06" + } + } + }, + "azure_symbol_rand_explicit_altname": { + "kms": "azure", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAOtoJWm2Ucre0foHIiOutsX1WIyub7t3Lby3/F8zRXn+l6ixlTjAPgWFwpRnYg96Lt2ACDDQ9CO51ejr9qk0b8LDBwG3qU5Cuibsp7vo1VsdI=", + "subType": "06" + } + } + }, + "azure_symbol_det_auto_id": { + "kms": "azure", + "type": "symbol", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAOvp/FMMmWVMkiuN51uFMFBiRQAcc9jftlNsHsLoNtohZaGni26kgX94b+/EI8pdWF5xA/73JlGlij0Rt+vC9s/zTDItRpn0bJL54WPphDcmA=", + "subType": "06" + } + } + }, + "azure_symbol_det_explicit_id": { + "kms": "azure", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAOvp/FMMmWVMkiuN51uFMFBiRQAcc9jftlNsHsLoNtohZaGni26kgX94b+/EI8pdWF5xA/73JlGlij0Rt+vC9s/zTDItRpn0bJL54WPphDcmA=", + "subType": "06" + } + } + }, + "azure_symbol_det_explicit_altname": { + "kms": "azure", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAOvp/FMMmWVMkiuN51uFMFBiRQAcc9jftlNsHsLoNtohZaGni26kgX94b+/EI8pdWF5xA/73JlGlij0Rt+vC9s/zTDItRpn0bJL54WPphDcmA=", + "subType": "06" + } + } + }, + "azure_javascriptWithScope_rand_auto_id": { + "kms": "azure", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAPCw9NnvJyuTYIgZxr1w1UiG85PGZ4rO62DWWDF98HwVM/Y6u7hNdNjkaWjYFsPMl38ioHw/pS8GFR62QmH2RAw/BV0wI7pNy2evANr3i3gKg=", + "subType": "06" + } + } + }, + "azure_javascriptWithScope_rand_auto_altname": { + "kms": "azure", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAPXQzqnQ2UWkIYof8/OfadNMa7iVKAbOaiu7YGm8iVrx+W6uxKLPFugVqHtQ29hYXXf33xr8rqGNxDlAe7/x1OeYEif71f7LUkmKF9WxJV9Ko=", + "subType": "06" + } + } + }, + "azure_javascriptWithScope_rand_explicit_id": { + "kms": "azure", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAP0nxlppgPyjLx0eBempbOlL21G6KbABSrE6+YuNDcsjJjxCQuLR9+aoAwa+yCDEC7GZ1E3oP489edKUuNpE4Ts26jy4aRegu4DmyECUeBwAg=", + "subType": "06" + } + } + }, + "azure_javascriptWithScope_rand_explicit_altname": { + "kms": "azure", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAPO89afu9Sb+cK9wwM1cO1DPjvu5UNyObjjTScy1hy9PzllJGfj7b84f0Ah74jPYsMPwI0Eslu/IYF3+5jmquq5Qp/VUQESlxqRqRK0xIeMfs=", + "subType": "06" + } + } + }, + "azure_javascriptWithScope_det_explicit_id": { + "kms": "azure", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "azure_javascriptWithScope_det_explicit_altname": { + "kms": "azure", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "azure_int_rand_auto_id": { + "kms": "azure", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAQUyy4uWmWdzypsK81q9egREg4s80X3L2hzxJzC+fL08Xzy1z9grpPPCfJrluUVKMMGmmZR8gJPJ70igN3unJbzg==", + "subType": "06" + } + } + }, + "azure_int_rand_auto_altname": { + "kms": "azure", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAQr4gyoHKpGsSJo8CMsYSJk/KilFMJhsDCmxrha7yfNW1uR5sjyZj4B4s6uTXGw76x7aR/AvecDlY3QFJb8L1mjg==", + "subType": "06" + } + } + }, + "azure_int_rand_explicit_id": { + "kms": "azure", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAQ0zgXYPV1MuEFksmDpVDoWkoZQelm3+rYrMiT64KYywO//75799W8TbR3a7O6Q/ErjKQOin2OCp8EWwZqTDdz5w==", + "subType": "06" + } + } + }, + "azure_int_rand_explicit_altname": { + "kms": "azure", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAQG+qz00yizREbP3tla1elMiwf8TKLbUU2XWUP+E0vey/wvbjTTIzqwUlz/b9St77CHJhavypP3hMrngXR9GapbQ==", + "subType": "06" + } + } + }, + "azure_int_det_auto_id": { + "kms": "azure", + "type": "int", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAQCkJH+CataLqp/xBjO77QBprC2xPV+rE+goSZ3C6aqwXIeTYHTOqEbeaFb5iZcqYH5nWvNvnfbZSIMyvSfrPjhw==", + "subType": "06" + } + } + }, + "azure_int_det_explicit_id": { + "kms": "azure", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAQCkJH+CataLqp/xBjO77QBprC2xPV+rE+goSZ3C6aqwXIeTYHTOqEbeaFb5iZcqYH5nWvNvnfbZSIMyvSfrPjhw==", + "subType": "06" + } + } + }, + "azure_int_det_explicit_altname": { + "kms": "azure", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAQCkJH+CataLqp/xBjO77QBprC2xPV+rE+goSZ3C6aqwXIeTYHTOqEbeaFb5iZcqYH5nWvNvnfbZSIMyvSfrPjhw==", + "subType": "06" + } + } + }, + "azure_timestamp_rand_auto_id": { + "kms": "azure", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAARwcXYtx+A7g/zGkjGdkyVxZGCO9Nzj3D70NIpl2TeH2j9qYGP4DenwL1xSgrL2Ez+X58d2BvNhKrjA9y2w1Z8kA==", + "subType": "06" + } + } + }, + "azure_timestamp_rand_auto_altname": { + "kms": "azure", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAARQ0Pjx3l92Aqhn2e1hot2M9rQ6aLPE2Iw8AVhm5AD8FWywWih12Fn2p9+kiE33yKPOCyrTWQHKPtB4yYhqnJgGg==", + "subType": "06" + } + } + }, + "azure_timestamp_rand_explicit_id": { + "kms": "azure", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAARvFMlIzh2IjpHkTJ8buqTOqBA0+CxVDsZacUhSHVMgJLN+0DJsJy8OfkmKMu9Lk5hULY00Udoja87x+79mYfmeQ==", + "subType": "06" + } + } + }, + "azure_timestamp_rand_explicit_altname": { + "kms": "azure", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAR+2SCd7V5ukAkh7CYpNPIatzTL8osNoA4Mb5jjjbos8eMamImw0fbH8YA+Rdm4CgGdQQ9VDX7MtMWlArkj0Jpew==", + "subType": "06" + } + } + }, + "azure_timestamp_det_auto_id": { + "kms": "azure", + "type": "timestamp", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAARe72T/oC09QGE1vuljb6ZEHa6llEwMLT+C4s9u1fREkOKndpmrOlGE8zOey4teizY1ypOMkIZ8GDQJJ4kLSpNkQ==", + "subType": "06" + } + } + }, + "azure_timestamp_det_explicit_id": { + "kms": "azure", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAARe72T/oC09QGE1vuljb6ZEHa6llEwMLT+C4s9u1fREkOKndpmrOlGE8zOey4teizY1ypOMkIZ8GDQJJ4kLSpNkQ==", + "subType": "06" + } + } + }, + "azure_timestamp_det_explicit_altname": { + "kms": "azure", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAARe72T/oC09QGE1vuljb6ZEHa6llEwMLT+C4s9u1fREkOKndpmrOlGE8zOey4teizY1ypOMkIZ8GDQJJ4kLSpNkQ==", + "subType": "06" + } + } + }, + "azure_long_rand_auto_id": { + "kms": "azure", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAASSSgX7k8iw0xFe0AiIzOu0e0P7Ujyfsk/Cdl0fR5X8V3QLVER+1Qa47Qpb8iWL2VLBSh+55HvIEtvhWn8SwXaog==", + "subType": "06" + } + } + }, + "azure_long_rand_auto_altname": { + "kms": "azure", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAASUhKr5K7ulGTeFbhIvJ2DDE10gRAFn5+2zqnsIFSY8lYV2PBYcENdeNBXZs6kyIAYhJdQyuOChVCerTI5jmQWDw==", + "subType": "06" + } + } + }, + "azure_long_rand_explicit_id": { + "kms": "azure", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAASHxawpjTHdXYRWQSZ7Qi7gFC+o4dW2mPH8s5nQkPFY/EubcJbdAZ5HFp66NfPaDJ/NSH6Vy+TkpX3683RC+bjSQ==", + "subType": "06" + } + } + }, + "azure_long_rand_explicit_altname": { + "kms": "azure", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAASVaMAv6UjuBOUZMJ9qz+58TQWmgaMpS9xrJziJY80ml9aRlDTtRubP7U40CgbDvrtY1QgHbkF/di1XDCB6iXMMg==", + "subType": "06" + } + } + }, + "azure_long_det_auto_id": { + "kms": "azure", + "type": "long", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAS06L8oEPeMvVlA32VlobdOWG24OoyMbv9PyYsHLsbT0bHFwU7lYUSQG9EkYVRNPEDzvXpciE1jT7KT8CRY8XT/g==", + "subType": "06" + } + } + }, + "azure_long_det_explicit_id": { + "kms": "azure", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAS06L8oEPeMvVlA32VlobdOWG24OoyMbv9PyYsHLsbT0bHFwU7lYUSQG9EkYVRNPEDzvXpciE1jT7KT8CRY8XT/g==", + "subType": "06" + } + } + }, + "azure_long_det_explicit_altname": { + "kms": "azure", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAS06L8oEPeMvVlA32VlobdOWG24OoyMbv9PyYsHLsbT0bHFwU7lYUSQG9EkYVRNPEDzvXpciE1jT7KT8CRY8XT/g==", + "subType": "06" + } + } + }, + "azure_decimal_rand_auto_id": { + "kms": "azure", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAATJ6LZgPu9F+rPtYsMuvwOx62+g1dAk858BUtE9FjC/300DnbDiolhkHNcyoFs07NYUNgLthW2rISb/ejmsDCt/oqnf8zWYf9vrJEfHaS/Ocw=", + "subType": "06" + } + } + }, + "azure_decimal_rand_auto_altname": { + "kms": "azure", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAATX8eD6qFYWKwIGvXtQG79fXKuPW9hkIV0OwrmNNIqRltw6gPHl+/1X8Q6rgmjCxqvhB05AxTj7xz64gP+ILkPQY8e8VGuCOvOdwDo2IPwy18=", + "subType": "06" + } + } + }, + "azure_decimal_rand_explicit_id": { + "kms": "azure", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAATBjQ9E5wDdTS/iI1XDqGmDBC5aLbPB4nSyrjRLfv1zEoPRjmcHlQmMRJA0mori2VQv6EBFNHeczFCenJaSAkuh77czeXM2vH3T6qwEIDs4dw=", + "subType": "06" + } + } + }, + "azure_decimal_rand_explicit_altname": { + "kms": "azure", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAATtkjbhdve7MNuLaTm6qvaewuVUxeC1DMz1fd4RC4jeiBFMd5uZUVJTiOIerwQ6P5G5lkMlezKDWgKl2FUvZH6c7V3JknhsaWcV5iLWGUL6Zc=", + "subType": "06" + } + } + }, + "azure_decimal_det_explicit_id": { + "kms": "azure", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "azure_decimal_det_explicit_altname": { + "kms": "azure", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "azure_minKey_rand_explicit_id": { + "kms": "azure", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "azure_minKey_rand_explicit_altname": { + "kms": "azure", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "azure_minKey_det_explicit_id": { + "kms": "azure", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "azure_minKey_det_explicit_altname": { + "kms": "azure", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "azure_maxKey_rand_explicit_id": { + "kms": "azure", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "azure_maxKey_rand_explicit_altname": { + "kms": "azure", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "azure_maxKey_det_explicit_id": { + "kms": "azure", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "azure_maxKey_det_explicit_altname": { + "kms": "azure", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "gcp_double_rand_auto_id": { + "kms": "gcp", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAABFoHQxnh1XSC0k1B01uFFg7rE9sZVBn4PXo26JX8gx9tuxu+4l9Avb23H9BfOzuWiEc43iw87K/W2y0VfKp5CCg==", + "subType": "06" + } + } + }, + "gcp_double_rand_auto_altname": { + "kms": "gcp", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAABRkZkEtQEFB/r268cNfYRQbN4u5Cxjl9Uh+8wq9TFWLQH2E/9wj2vTLlxQ2cQsM7Qd+XxR5idjfBf9CKAfvUa/A==", + "subType": "06" + } + } + }, + "gcp_double_rand_explicit_id": { + "kms": "gcp", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAABDSUZ+0BbDDEZxCXA+J2T6Js8Uor2dfXSf7s/hpLrg6dxcW2chpht9XLiLOXG5w83TzCAI5pF8cQgBpBpYjR8RQ==", + "subType": "06" + } + } + }, + "gcp_double_rand_explicit_altname": { + "kms": "gcp", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAABCYxugs7L+4S+1rr0VILSbtBm79JPTLuzluQAv0+8hbu5Z6zReOL6Ta1vQH1oA+pSPGYA4euye3zNl1X6ZewbPw==", + "subType": "06" + } + } + }, + "gcp_double_det_explicit_id": { + "kms": "gcp", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDouble": "1.2339999999999999858" + } + }, + "gcp_double_det_explicit_altname": { + "kms": "gcp", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDouble": "1.2339999999999999858" + } + }, + "gcp_string_rand_auto_id": { + "kms": "gcp", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAACx3wSslJEiD80YLTH0n4Bbs4yWVPQl15AU8pZMLLQePqEtI+BJy3t2bqNP1098jS0CGSf+LQmQvXhJn1aNFeMTw==", + "subType": "06" + } + } + }, + "gcp_string_rand_auto_altname": { + "kms": "gcp", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAC5BTe5KP5UxSIk6dJlkz8aaZ/9fg44XPWHafiiL/48lcv3AWbu2gcBo1EDuc1sJQu6XMrtDCRQ7PCHsL7sEQMGQ==", + "subType": "06" + } + } + }, + "gcp_string_rand_explicit_id": { + "kms": "gcp", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAACyJN55OcyXXJ71x8VphTaIuIg6kQtGgVKPhWx0LSdYc6JOjB6LTdA7SEWiSlSWWFZE26UmKcPbkbLDAYf4IVrzQ==", + "subType": "06" + } + } + }, + "gcp_string_rand_explicit_altname": { + "kms": "gcp", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAACoa0d9gqfPP5s3+GoruwzxoQFgli8SmjpTVRLAOcFxqGdfrwSbpYffSw/OR45sZPxXCL6T2MtUvZsl7ukv0jBnw==", + "subType": "06" + } + } + }, + "gcp_string_det_auto_id": { + "kms": "gcp", + "type": "string", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAACTCkyETcWayIZ9YEoQEBVIF3i7iXEe6M3KjYYaSVCYdqSbSHBzlwKWYbP+Xj/MMYBYTLZ1aiRQWCMK4gWPYppZw==", + "subType": "06" + } + } + }, + "gcp_string_det_explicit_id": { + "kms": "gcp", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAACTCkyETcWayIZ9YEoQEBVIF3i7iXEe6M3KjYYaSVCYdqSbSHBzlwKWYbP+Xj/MMYBYTLZ1aiRQWCMK4gWPYppZw==", + "subType": "06" + } + } + }, + "gcp_string_det_explicit_altname": { + "kms": "gcp", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAACTCkyETcWayIZ9YEoQEBVIF3i7iXEe6M3KjYYaSVCYdqSbSHBzlwKWYbP+Xj/MMYBYTLZ1aiRQWCMK4gWPYppZw==", + "subType": "06" + } + } + }, + "gcp_object_rand_auto_id": { + "kms": "gcp", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAADy+8fkyeNYdIK001YogXfKc25zRXS1VGIFVWR6jRfrexy9C8LBBfX3iDwGNPbP2pkC3Tq16OoziQB6iNGf7s7yg==", + "subType": "06" + } + } + }, + "gcp_object_rand_auto_altname": { + "kms": "gcp", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAADixoDdvm57gH8ooOaKI57WyZD5uaPmuYgmrgAFuV8I+oaalqYctnNSYlzQKCMQX/mIcTxvW3oOWY7+IzAz7npvw==", + "subType": "06" + } + } + }, + "gcp_object_rand_explicit_id": { + "kms": "gcp", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAADvq0OAoijgHaVMhsoNMdfWFLyISDo6Y13sYM0CoBXS/oXJNIJJvhgKPbFSV/h4IgiDLy4qNYOTJQvpqt094RPgQ==", + "subType": "06" + } + } + }, + "gcp_object_rand_explicit_altname": { + "kms": "gcp", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAADuTZF7/uqGjFbjzBYspPkxGWvvVAEN/ib8bfPOQrEobtTWuU+ju9H3TlT9DMuFy7RdUZnPB0D3HkM8+zky5xeBw==", + "subType": "06" + } + } + }, + "gcp_object_det_explicit_id": { + "kms": "gcp", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "gcp_object_det_explicit_altname": { + "kms": "gcp", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "gcp_array_rand_auto_id": { + "kms": "gcp", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAE085kJIBX6S93D94bcRjkOegEKsksi2R1cxoVDoOpSdHh3S6bZAOh50W405wvnOKf3KTP9SICDUehQKQZSC026Y5dwVQ2GiM7PtpSedthKJs=", + "subType": "06" + } + } + }, + "gcp_array_rand_auto_altname": { + "kms": "gcp", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAEk/FAXsaqyVr6I+MY5L0axeLhskcEfLZeB8whLMKbjLDLa8Iep+IdrFVSfKo03Zr/7Ah8Js01aT6+Vt4EDMJK0mGKZJOjsrAf3b6RS+Mzebg=", + "subType": "06" + } + } + }, + "gcp_array_rand_explicit_id": { + "kms": "gcp", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAEDY7J9JGiurctYr7ytakNjcryVm42fkubcVpQpUYEkpK/G9NLGjrJuFgNW5ZVjYiPKEBbDB7vEtJqGux0BU++hrvVHNJ3wUT2mbDE18NE4KE=", + "subType": "06" + } + } + }, + "gcp_array_rand_explicit_altname": { + "kms": "gcp", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAErFFlw8W9J2y+751RnYLw0TSK9ThD6sP3i4zPbZtiuhc90RFoJhScvqM9i4sDKuYePZZRLBxdX4EZhZClOmswCGDLCIWsQlSvCwgDcIsRR/w=", + "subType": "06" + } + } + }, + "gcp_array_det_explicit_id": { + "kms": "gcp", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "gcp_array_det_explicit_altname": { + "kms": "gcp", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "gcp_binData=00_rand_auto_id": { + "kms": "gcp", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAF0R5BNkQKfm6wx/tob8nVGDEYV/pvy9UeCqc9gFNuB5d9KxCkgyxryV65rbB90OriqvWFO2jcxzchRYgRI3fQ+A==", + "subType": "06" + } + } + }, + "gcp_binData=00_rand_auto_altname": { + "kms": "gcp", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAF4wcT8XGc3xNdKYDX5/cbUwPDdnkIXlWWCCYeSXSk2oWPxMZnPsVQ44nXKJJsKitoE3r/hL1sSG5239WzCWyx9g==", + "subType": "06" + } + } + }, + "gcp_binData=00_rand_explicit_id": { + "kms": "gcp", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAF07OFs5mlx0AB6QBanaybLuhuFbG+19KxSqHlSgELcz6TQKI6equX97OZdaWSWf2SSeiYm5E6+Y3lgA5l4KxC2A==", + "subType": "06" + } + } + }, + "gcp_binData=00_rand_explicit_altname": { + "kms": "gcp", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAFZ74Q7JMm7y2i3wRmjIRKefhmdnrhP1NXJgploi+44eQ2eRraZsW7peGPYyIfsXEbhgV5+aLmiYgvemBywfdogQ==", + "subType": "06" + } + } + }, + "gcp_binData=00_det_auto_id": { + "kms": "gcp", + "type": "binData=00", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAFhwJkocj36WXoY3mg2GWUrJ5IQTo9MvkwEwRFKdkcxm9pX2PZPK7bN5ZWw3IFcQ/0GfaW6V4LYr8WarZdLF0p5g==", + "subType": "06" + } + } + }, + "gcp_binData=00_det_explicit_id": { + "kms": "gcp", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAFhwJkocj36WXoY3mg2GWUrJ5IQTo9MvkwEwRFKdkcxm9pX2PZPK7bN5ZWw3IFcQ/0GfaW6V4LYr8WarZdLF0p5g==", + "subType": "06" + } + } + }, + "gcp_binData=00_det_explicit_altname": { + "kms": "gcp", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAFhwJkocj36WXoY3mg2GWUrJ5IQTo9MvkwEwRFKdkcxm9pX2PZPK7bN5ZWw3IFcQ/0GfaW6V4LYr8WarZdLF0p5g==", + "subType": "06" + } + } + }, + "gcp_binData=04_rand_auto_id": { + "kms": "gcp", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAFmDO47RTVXzm8D4hfhLICILrQJg3yOwG3HYfCdz7yaanPow2Y6bMxvXxk+kDS29aS8pJKDqJQQoMGc1ZFD3yYKsLQHRi/8rW6TNDQd4sCQ00=", + "subType": "06" + } + } + }, + "gcp_binData=04_rand_auto_altname": { + "kms": "gcp", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAFpiu9Q3LTuPmgdWBqo5Kw0vGF9xU1rMyE4xwR8GccZ7ZMrUcR4AnZnAP7ah5Oz8e7qonNYX4d09obesYSLlIjyK7J7qg+GWiEURgbvmOngaA=", + "subType": "06" + } + } + }, + "gcp_binData=04_rand_explicit_id": { + "kms": "gcp", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAFHRy8dveGuMng9WMmadIp39jD7iEfl3bEjKmzyNoAc0wIcSJZo9kdGbNEwZ4p+A1gz273fmAt/AJwAxwvqdlanLWBr4wiSKz1Mu9VaBcTlyY=", + "subType": "06" + } + } + }, + "gcp_binData=04_rand_explicit_altname": { + "kms": "gcp", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAFiqO+sKodqXuVox0zTbKuY4Ng0QE1If2hDLWXljAEZdYABPk20UJyL/CHR49WP2Cwvi4evJCf8sEfKpR+ugPiyxWzP3iVe6qqTzP93BBjqoc=", + "subType": "06" + } + } + }, + "gcp_binData=04_det_auto_id": { + "kms": "gcp", + "type": "binData=04", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAFEp5Gut6iENHUqDMVdBm4cxQy35gnslTf7vSWW9InFh323BvaTTiubxbxTiMKIa/u47MfMprL9HNQSwgpAQc4lped+YnlRW8RYvTcG4frFtA=", + "subType": "06" + } + } + }, + "gcp_binData=04_det_explicit_id": { + "kms": "gcp", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAFEp5Gut6iENHUqDMVdBm4cxQy35gnslTf7vSWW9InFh323BvaTTiubxbxTiMKIa/u47MfMprL9HNQSwgpAQc4lped+YnlRW8RYvTcG4frFtA=", + "subType": "06" + } + } + }, + "gcp_binData=04_det_explicit_altname": { + "kms": "gcp", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAFEp5Gut6iENHUqDMVdBm4cxQy35gnslTf7vSWW9InFh323BvaTTiubxbxTiMKIa/u47MfMprL9HNQSwgpAQc4lped+YnlRW8RYvTcG4frFtA=", + "subType": "06" + } + } + }, + "gcp_undefined_rand_explicit_id": { + "kms": "gcp", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "gcp_undefined_rand_explicit_altname": { + "kms": "gcp", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "gcp_undefined_det_explicit_id": { + "kms": "gcp", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "gcp_undefined_det_explicit_altname": { + "kms": "gcp", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "gcp_objectId_rand_auto_id": { + "kms": "gcp", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAH8Kt6coc8bPI4QIwS1tIdk6pPA05xlZvrOyAQgvoqaozMtWzG15OunQLDdS3yJ5WRiV7kO6CIKqRrvL2RykB5sw==", + "subType": "06" + } + } + }, + "gcp_objectId_rand_auto_altname": { + "kms": "gcp", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAHU5Yzmz2mbgNQrGSvglgVuv14nQWzipBkZUVSO4eYZ7wLrj/9t0fnizsu7Isgg5oA9fV0Snh/A9pDnHZWoccXUw==", + "subType": "06" + } + } + }, + "gcp_objectId_rand_explicit_id": { + "kms": "gcp", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAHsdq5/FLqbjMDiNzf+6k9yxUtFVjS/xSqErqaboOl21934pAzgkOzBGodpKKFuK0Ta4f3h21XS+84wlIYPMlTtw==", + "subType": "06" + } + } + }, + "gcp_objectId_rand_explicit_altname": { + "kms": "gcp", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAHokIdXxNQ/NBMdMAVNxyVuz/J5pMMdtfxxJxr7PbsRJ3FoD2QNjTgE1Wsz0G4o09Wv9UWD+/mIqPVlLgx1sRtPw==", + "subType": "06" + } + } + }, + "gcp_objectId_det_auto_id": { + "kms": "gcp", + "type": "objectId", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAHkcbaj3Hy3b4HkjRkMgiw5h6jBW7Sc56QSJmAPmVSc2T4B8d79A49dW0RyEiInZJcnVRjrYzUTRtgRaG4/FRd8g==", + "subType": "06" + } + } + }, + "gcp_objectId_det_explicit_id": { + "kms": "gcp", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAHkcbaj3Hy3b4HkjRkMgiw5h6jBW7Sc56QSJmAPmVSc2T4B8d79A49dW0RyEiInZJcnVRjrYzUTRtgRaG4/FRd8g==", + "subType": "06" + } + } + }, + "gcp_objectId_det_explicit_altname": { + "kms": "gcp", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAHkcbaj3Hy3b4HkjRkMgiw5h6jBW7Sc56QSJmAPmVSc2T4B8d79A49dW0RyEiInZJcnVRjrYzUTRtgRaG4/FRd8g==", + "subType": "06" + } + } + }, + "gcp_bool_rand_auto_id": { + "kms": "gcp", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAIf7vUYS5XFrEU4g03lzj9dk8a2MkaQdlH8nE/507D2Gm5XKQLi2jCENZ9UaQm3MQtVr4Uqrgz2GZiQHt9mXcG3w==", + "subType": "06" + } + } + }, + "gcp_bool_rand_auto_altname": { + "kms": "gcp", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAIdOC4Tx/TaVLRtOL/Qh8RUFIzHFB6nSegZoITwZeDethd8V3+R+aIAgzfN3pvmZzagHyVCm2nbNYJNdjOJhuDrg==", + "subType": "06" + } + } + }, + "gcp_bool_rand_explicit_id": { + "kms": "gcp", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAIzB14mX2vaZdiW9kGc+wYEgTCXA0FB5AVEyuERD00+K7U5Otlc6ZUwMtb9nGUu+M7PnnfxiDFHCrUWrTkAZzSUw==", + "subType": "06" + } + } + }, + "gcp_bool_rand_explicit_altname": { + "kms": "gcp", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAIhRLg79ACCMfeERBgG1wirirrZXZzbK11RxHkAbf14Fji2L3sdMBdLBU5I028+rmtDdC7khcNMt11V6XGKpAjnA==", + "subType": "06" + } + } + }, + "gcp_bool_det_explicit_id": { + "kms": "gcp", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": true + }, + "gcp_bool_det_explicit_altname": { + "kms": "gcp", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": true + }, + "gcp_date_rand_auto_id": { + "kms": "gcp", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAJL+mjI8xBmSahOOi3XkGRGxjhGNdJb445KZtRAaUdCV0vMKbrefuiDHJDPCYo7mLYNhRSIhQfs63IFYMrlKP26A==", + "subType": "06" + } + } + }, + "gcp_date_rand_auto_altname": { + "kms": "gcp", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAJbeyqO5FRmqvPYyOb0tdKtK6JOg8QKbCl37/iFeEm7N0T0Pjb8Io4U0ndB3O6fjokc3kDQrZcQkV+OFWIMuKFjw==", + "subType": "06" + } + } + }, + "gcp_date_rand_explicit_id": { + "kms": "gcp", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAJVz3rSYIcoYtM0tZ8pB2Ytgh8RvYPeZvW7aUVJfZkZlIhfUHOHEf5kHqxzt8E1l2n3lmK/7ZVCFUuCCmr8cZyWw==", + "subType": "06" + } + } + }, + "gcp_date_rand_explicit_altname": { + "kms": "gcp", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAJAiQqNyUcpuDEpFt7skp2NSHFCux2XObrIIFgXReYgtWoapL/n4zksJXl89PGavzNPBZbzgEa8uwwAe+S+Y6TLg==", + "subType": "06" + } + } + }, + "gcp_date_det_auto_id": { + "kms": "gcp", + "type": "date", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAJmATV2A1P5DmrS8uES6AMD9y+EU3x7u4K4J0p296iSkCEgIdZZORhPIEnuJK3FHw1II6IEShW2nd7sOJRZSGKcg==", + "subType": "06" + } + } + }, + "gcp_date_det_explicit_id": { + "kms": "gcp", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAJmATV2A1P5DmrS8uES6AMD9y+EU3x7u4K4J0p296iSkCEgIdZZORhPIEnuJK3FHw1II6IEShW2nd7sOJRZSGKcg==", + "subType": "06" + } + } + }, + "gcp_date_det_explicit_altname": { + "kms": "gcp", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAJmATV2A1P5DmrS8uES6AMD9y+EU3x7u4K4J0p296iSkCEgIdZZORhPIEnuJK3FHw1II6IEShW2nd7sOJRZSGKcg==", + "subType": "06" + } + } + }, + "gcp_null_rand_explicit_id": { + "kms": "gcp", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "gcp_null_rand_explicit_altname": { + "kms": "gcp", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "gcp_null_det_explicit_id": { + "kms": "gcp", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "gcp_null_det_explicit_altname": { + "kms": "gcp", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "gcp_regex_rand_auto_id": { + "kms": "gcp", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAALiebb3hWwJRqlgVEhLYKKvo6cnlU7BFnZnvlZ8GuIr11fUvcnS9Tg2m7vPmfL7WVyuNrXlR48x28Es49YuaxuIg==", + "subType": "06" + } + } + }, + "gcp_regex_rand_auto_altname": { + "kms": "gcp", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAALouDFNLVgBXqhJvBRj9DKacuD1AQ2NAVDW93P9NpZDFFwGOFxmKUcklbPj8KkHqvma8ovVUBTLLUDR+tKFRvC2Q==", + "subType": "06" + } + } + }, + "gcp_regex_rand_explicit_id": { + "kms": "gcp", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAALtdcT9+3R1he4eniT+1opqs/YtujFlqzBXssv+hCKhJQVY/IXde32nNpQ1WTgUc7jfIJl/v9HvuA9cDHPtDWWTg==", + "subType": "06" + } + } + }, + "gcp_regex_rand_explicit_altname": { + "kms": "gcp", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAALAwlRAlj4Zpn+wu9eOcs5CsNgrkVwrgmu1tc4wyQp0Lt+3UcplYsXQMrMPcTx3yB0JcI4Kh65n/DrAaA+G/a6iw==", + "subType": "06" + } + } + }, + "gcp_regex_det_auto_id": { + "kms": "gcp", + "type": "regex", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAALbCutQ7D94gk0djewcQiEdMFVVa21+Dn5enQf/mqPi3o7vPy7OejDBk9fiZRffsioRMhlx2cxqa8T3+AkeN96yg==", + "subType": "06" + } + } + }, + "gcp_regex_det_explicit_id": { + "kms": "gcp", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAALbCutQ7D94gk0djewcQiEdMFVVa21+Dn5enQf/mqPi3o7vPy7OejDBk9fiZRffsioRMhlx2cxqa8T3+AkeN96yg==", + "subType": "06" + } + } + }, + "gcp_regex_det_explicit_altname": { + "kms": "gcp", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAALbCutQ7D94gk0djewcQiEdMFVVa21+Dn5enQf/mqPi3o7vPy7OejDBk9fiZRffsioRMhlx2cxqa8T3+AkeN96yg==", + "subType": "06" + } + } + }, + "gcp_dbPointer_rand_auto_id": { + "kms": "gcp", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAMG8P+Y2YNIgknxE0/yPDCHASBvCU1IJwsEyaJPuOjn03enxEN7z/wbjVMN0lGUptDP3SVL+OIZtQ35VRP84MtnbdhcfZWqMhLjzrCjmtHUEg=", + "subType": "06" + } + } + }, + "gcp_dbPointer_rand_auto_altname": { + "kms": "gcp", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAMKCLFUN6ApB5fSVEWazRddhKTEwgqI/mxfe0BBxht69pZQYhTjhOJP0YcIrtr+RCeHOa4FIJgQod1CFOellIzO5YH5CuV4wPxCAlOdbJcBK8=", + "subType": "06" + } + } + }, + "gcp_dbPointer_rand_explicit_id": { + "kms": "gcp", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAM7ULEA6uKKv4Pu4Sa3aAt7dXtEwfQC98aJoLBapHT+xXtn5GWPynOZQNtV3lGaYExQjiGdYbzOcav3SVy/sYTe3ktgkQnuZfe0tk0zyvKIMM=", + "subType": "06" + } + } + }, + "gcp_dbPointer_rand_explicit_altname": { + "kms": "gcp", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAMoMveHO1MadAKuT498xiKWWBUKRbH7k7P2YETDg/BufVw0swos07rk6WJa1vqyF61QEmACjy4pmlK/5P0VfKJBAIvif51YqHPQkobJVS3nVA=", + "subType": "06" + } + } + }, + "gcp_dbPointer_det_auto_id": { + "kms": "gcp", + "type": "dbPointer", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAMz+9m1bE+Th9YeyPmJdtJPO0F5QYsGYtU/Eom/LSoYjDmTmV2ehkKx/cevIxJfZUc+Mvv/uGoeuubGl8tiX4l+f6yLrSIS6QBtIHYKXk+JNE=", + "subType": "06" + } + } + }, + "gcp_dbPointer_det_explicit_id": { + "kms": "gcp", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAMz+9m1bE+Th9YeyPmJdtJPO0F5QYsGYtU/Eom/LSoYjDmTmV2ehkKx/cevIxJfZUc+Mvv/uGoeuubGl8tiX4l+f6yLrSIS6QBtIHYKXk+JNE=", + "subType": "06" + } + } + }, + "gcp_dbPointer_det_explicit_altname": { + "kms": "gcp", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAMz+9m1bE+Th9YeyPmJdtJPO0F5QYsGYtU/Eom/LSoYjDmTmV2ehkKx/cevIxJfZUc+Mvv/uGoeuubGl8tiX4l+f6yLrSIS6QBtIHYKXk+JNE=", + "subType": "06" + } + } + }, + "gcp_javascript_rand_auto_id": { + "kms": "gcp", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAANqBD0ITMn4BaFnDp7BX7vXbRBkFwmjQRVUeBbwsQtv5WVlJMAd/2+w7tyH8Wc44x0/9U/DA5GVhpTrtdDyPBI3w==", + "subType": "06" + } + } + }, + "gcp_javascript_rand_auto_altname": { + "kms": "gcp", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAANtA0q4mbkAaKX4x1xk0/094Mln0wnh2bYnI6s6dh+l2WLDH7A9JMZxCl6kc4uOsEfbOvjP/PLIYtdMGs14EjM5A==", + "subType": "06" + } + } + }, + "gcp_javascript_rand_explicit_id": { + "kms": "gcp", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAANfrW3pmeiFdBFt5tJS6Auq9Wo/J4r/vMRiueLWxig5S1zYuf9kFPJMK/nN9HqQPIcBIJIC2i/uEPgeepaNXACCw==", + "subType": "06" + } + } + }, + "gcp_javascript_rand_explicit_altname": { + "kms": "gcp", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAANL7UZNzpwfwhRn/HflWIE9CSxGYNwLSo9d86HsOJ42rrZKq6HQqm/hiEAg0lyqCxVIVFxYEc2BUWSaq4/+SSyZw==", + "subType": "06" + } + } + }, + "gcp_javascript_det_auto_id": { + "kms": "gcp", + "type": "javascript", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAANB2d97R8nUJqnG0JPsWzyFe5pct5jvUljdkPnlZvLN1ZH+wSu4WmLfjri6IzzYP//f8tywn4Il+R4lZ0Kr/RAeA==", + "subType": "06" + } + } + }, + "gcp_javascript_det_explicit_id": { + "kms": "gcp", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAANB2d97R8nUJqnG0JPsWzyFe5pct5jvUljdkPnlZvLN1ZH+wSu4WmLfjri6IzzYP//f8tywn4Il+R4lZ0Kr/RAeA==", + "subType": "06" + } + } + }, + "gcp_javascript_det_explicit_altname": { + "kms": "gcp", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAANB2d97R8nUJqnG0JPsWzyFe5pct5jvUljdkPnlZvLN1ZH+wSu4WmLfjri6IzzYP//f8tywn4Il+R4lZ0Kr/RAeA==", + "subType": "06" + } + } + }, + "gcp_symbol_rand_auto_id": { + "kms": "gcp", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAOsGdnr6EKcBdOAvYrP0o1pWbhhJbYsqfVwwwS1zq6ZkBayOss2J3TuYwBGXhJFlq3iIiWLdxGQ883XIvuAECnqUNuvpK2rOLwtDg8xJLiH24=", + "subType": "06" + } + } + }, + "gcp_symbol_rand_auto_altname": { + "kms": "gcp", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAOpfa6CUSnJBvnWdd7pSZ2pXAbYm68Yka6xa/fuyhVx/Tc926/JpqmOmQtXqbOj8dZra0rQ3/yxHySwgD7s9Qr+xvyL7LvAguGkGmEV5H4Xz4=", + "subType": "06" + } + } + }, + "gcp_symbol_rand_explicit_id": { + "kms": "gcp", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAO085iqYGFdtjiFWHcNqE0HuKMNHmk49DVh+pX8Pb4p3ehB57JL1nRqaXqHPqhFenxSEInT/te9HQRr+ADcHADvUGsScfm/n85v85nq6X+5y4=", + "subType": "06" + } + } + }, + "gcp_symbol_rand_explicit_altname": { + "kms": "gcp", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAOiidb+2TsbAb2wc7MtDzb/UYsjgVNSw410Sz9pm+Uy7aZROE5SURKXdLjrCH2ZM2a+XCAl3o9yAoNgmAjEvYVxjmyzLK00EVjT42MBOrdA+k=", + "subType": "06" + } + } + }, + "gcp_symbol_det_auto_id": { + "kms": "gcp", + "type": "symbol", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAOFBGo77joqvZl7QQMB9ebMsAI3uro8ILQTJsTUgAqNzSh1mNzqihGHZYe84xtgMrVxNuwcjkidkRbNnLXWLuarOx4tgmOLx5A5G1eYEe3s7Q=", + "subType": "06" + } + } + }, + "gcp_symbol_det_explicit_id": { + "kms": "gcp", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAOFBGo77joqvZl7QQMB9ebMsAI3uro8ILQTJsTUgAqNzSh1mNzqihGHZYe84xtgMrVxNuwcjkidkRbNnLXWLuarOx4tgmOLx5A5G1eYEe3s7Q=", + "subType": "06" + } + } + }, + "gcp_symbol_det_explicit_altname": { + "kms": "gcp", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAOFBGo77joqvZl7QQMB9ebMsAI3uro8ILQTJsTUgAqNzSh1mNzqihGHZYe84xtgMrVxNuwcjkidkRbNnLXWLuarOx4tgmOLx5A5G1eYEe3s7Q=", + "subType": "06" + } + } + }, + "gcp_javascriptWithScope_rand_auto_id": { + "kms": "gcp", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAPUsQHeXWhdmyfQ2Sq1ev1HMuMhBTc/FZFKO9tMMcI9qzjr+z4IdCOFCcx24/T/6NCsDpMiOGNnCdaBCCNRwNM0CTIkpHNLO+RSZORDgAsm9Q=", + "subType": "06" + } + } + }, + "gcp_javascriptWithScope_rand_auto_altname": { + "kms": "gcp", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAPRZawtuu0gErebyFqiQw0LxniWhdeujGzaqfAXriGo/2fU7PalzTlWQa8wsv0y7Q/i1K4JbQwCEFpJWLppmtZshCGbVWjpPljB2BH4NNrLPE=", + "subType": "06" + } + } + }, + "gcp_javascriptWithScope_rand_explicit_id": { + "kms": "gcp", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAP0qkQjuKmKIqdrsrR9djxt+1jFlEL7K9bP1oz7QWuY38dZJOoGwa6G1bP4wDzjsucJLCEgU2IY+t7BHraBFXvR/Aar8ID5eXcvJ7iOPIyqUw=", + "subType": "06" + } + } + }, + "gcp_javascriptWithScope_rand_explicit_altname": { + "kms": "gcp", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAP6L41iuBWGLg3hQZuhXp4MupTQvIT07+/+CRY292sC02mehk5BkuSOEVrehlvyvBJFKia4Bqd/UWvY8PnUPLqFKTLnokONWbAuh36y3gjStw=", + "subType": "06" + } + } + }, + "gcp_javascriptWithScope_det_explicit_id": { + "kms": "gcp", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "gcp_javascriptWithScope_det_explicit_altname": { + "kms": "gcp", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "gcp_int_rand_auto_id": { + "kms": "gcp", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAQ+6oRKWMSvC+3UGrHSyGeVlR9bFnZtFTmYlUoGn04k6ndtCl8rsmBVUV6dMMYd7znnZtTSIGPI8q6jwf/NJjdIw==", + "subType": "06" + } + } + }, + "gcp_int_rand_auto_altname": { + "kms": "gcp", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAQnz5jAbrrdutTPFA4m3MvlVJr3bpurTKY5xjwO5k8DZpeWTJzr+kVEJjG6M8/RgC/0UFNgBBrDbDhYa8PZHRijw==", + "subType": "06" + } + } + }, + "gcp_int_rand_explicit_id": { + "kms": "gcp", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAQfRFoxUgjrv8up/eZ/fLlr/z++d/jFm30nYvKqsnQT7vkmmujJWc8yAtthR9OI6W5biBgAkounqRHhvatLZC6gA==", + "subType": "06" + } + } + }, + "gcp_int_rand_explicit_altname": { + "kms": "gcp", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAQY/ePk59RY6vLejx9a5ITwkT9000KAubVSqMoQwv7lNXO+GKZfZoLHG6k1MA/IxTvl1Zbz1Tw1bTctmj0HPEGNA==", + "subType": "06" + } + } + }, + "gcp_int_det_auto_id": { + "kms": "gcp", + "type": "int", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAQE9RVV9pOuysUUEGKq0u6ztFM0gTpoOHcHsTFQstA7+L9XTvxWEgL3RgNeq5KtKdODlxl62niV8dnQwlSoDSSWw==", + "subType": "06" + } + } + }, + "gcp_int_det_explicit_id": { + "kms": "gcp", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAQE9RVV9pOuysUUEGKq0u6ztFM0gTpoOHcHsTFQstA7+L9XTvxWEgL3RgNeq5KtKdODlxl62niV8dnQwlSoDSSWw==", + "subType": "06" + } + } + }, + "gcp_int_det_explicit_altname": { + "kms": "gcp", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAQE9RVV9pOuysUUEGKq0u6ztFM0gTpoOHcHsTFQstA7+L9XTvxWEgL3RgNeq5KtKdODlxl62niV8dnQwlSoDSSWw==", + "subType": "06" + } + } + }, + "gcp_timestamp_rand_auto_id": { + "kms": "gcp", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAARLnk1LpJIriKr6iiY1yBDGnfkRaHNwWcQyL+mORtYC4+AQ6oMv0qpGrJxS2QCbYY1tGmAISqZHCIExCG+TIv4bw==", + "subType": "06" + } + } + }, + "gcp_timestamp_rand_auto_altname": { + "kms": "gcp", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAARaqYXh9AVZI6gvRZrBwbprE5P3K5Qf4PIK1ca+mLRNOof0EExyAhtku7mYXusLeq0ww/tV6Zt1cA36KsT8a0Nog==", + "subType": "06" + } + } + }, + "gcp_timestamp_rand_explicit_id": { + "kms": "gcp", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAARLXzBjkCN8BpfXDIrb94kuZCD07Uo/DMBfMIWQtAb1++tTheUoY2ClQz33Luh4g8NXwuMJ7h8ufE70N2+b1yrUg==", + "subType": "06" + } + } + }, + "gcp_timestamp_rand_explicit_altname": { + "kms": "gcp", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAARe44QH9ZvTAuHsWhEMoue8eHod+cJpBm+Kl/Xtw7NI/6UTOOHC5Kkg20EvX3+GwXdAGk0bUSCFiTZb/yPox1OlA==", + "subType": "06" + } + } + }, + "gcp_timestamp_det_auto_id": { + "kms": "gcp", + "type": "timestamp", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAARzXjP6d6j/iQxiz1/TC/m+IfAGLFH9wY2ksS//i9x15QttlhcRrT3XmPvxaP5OjTHac4Gq3m2aXiJH56lETyl8A==", + "subType": "06" + } + } + }, + "gcp_timestamp_det_explicit_id": { + "kms": "gcp", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAARzXjP6d6j/iQxiz1/TC/m+IfAGLFH9wY2ksS//i9x15QttlhcRrT3XmPvxaP5OjTHac4Gq3m2aXiJH56lETyl8A==", + "subType": "06" + } + } + }, + "gcp_timestamp_det_explicit_altname": { + "kms": "gcp", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAARzXjP6d6j/iQxiz1/TC/m+IfAGLFH9wY2ksS//i9x15QttlhcRrT3XmPvxaP5OjTHac4Gq3m2aXiJH56lETyl8A==", + "subType": "06" + } + } + }, + "gcp_long_rand_auto_id": { + "kms": "gcp", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAASuGZs48eEyVBJ9vvM6cvRySfuR0WM4kL7lx52rSGXBKtkZywyP5rJwNtRn9WTBMDqc1O/4jUgYXpqHx39SLhUPA==", + "subType": "06" + } + } + }, + "gcp_long_rand_auto_altname": { + "kms": "gcp", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAS/62F71oKTX1GlvOP89uNhXpIyLZ5OdnuLeM/hvL5HWyOudSb06cG3+xnPg3QgppAYFK5X2PGgrEcrA87AykLPg==", + "subType": "06" + } + } + }, + "gcp_long_rand_explicit_id": { + "kms": "gcp", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAASSgx+p4YzTvjZ+GCZCFHEKHNXJUSloPnLRHE4iJ515Epb8Tox7h8/aIAkB3ulnDS9BiT5UKdye2TWf8OBEwkXzg==", + "subType": "06" + } + } + }, + "gcp_long_rand_explicit_altname": { + "kms": "gcp", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAStqszyEfltpgd3aYeoyqaJX27OX861o06VhNX/N2fdSfKx0NQq/hWlWTkX6hK3hjCijiTtHmhFQR6QLkHD/6THw==", + "subType": "06" + } + } + }, + "gcp_long_det_auto_id": { + "kms": "gcp", + "type": "long", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAS0wJHtZKnxJlWnlSu0xuq7bZR25UdwcbdCRSaXBC0EXEFuqlzrZSn1lcwKPKGZQO8EQ6SdQDqK95alMLmM8eQrQ==", + "subType": "06" + } + } + }, + "gcp_long_det_explicit_id": { + "kms": "gcp", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAS0wJHtZKnxJlWnlSu0xuq7bZR25UdwcbdCRSaXBC0EXEFuqlzrZSn1lcwKPKGZQO8EQ6SdQDqK95alMLmM8eQrQ==", + "subType": "06" + } + } + }, + "gcp_long_det_explicit_altname": { + "kms": "gcp", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAS0wJHtZKnxJlWnlSu0xuq7bZR25UdwcbdCRSaXBC0EXEFuqlzrZSn1lcwKPKGZQO8EQ6SdQDqK95alMLmM8eQrQ==", + "subType": "06" + } + } + }, + "gcp_decimal_rand_auto_id": { + "kms": "gcp", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAATg4U3nbHBX/Az3ie2yurEIJO6cFryQWKiCpBbx1z0NF7RXd7kFC1XzaY6zcBjfl2AfRO8FFmgjTmFXb6gTRSSF0iAZJZTslfe3n6YFtwSKDI=", + "subType": "06" + } + } + }, + "gcp_decimal_rand_auto_altname": { + "kms": "gcp", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAATdSSyp0ewboV5zI3T3TV/FOrdx0UQbFHhqcH+yqpotoWPSw5dxE+BEoihYLeaPKuVU/rUIY4TUv05Egj7Ovg62Kpk3cPscxsGtE/T2Ppbt6o=", + "subType": "06" + } + } + }, + "gcp_decimal_rand_explicit_id": { + "kms": "gcp", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAATl7k20T22pf5Y9knVwIDyOIlbHyZBJqyi3Mai8APEZIYjpSKDKs8QNAH69CIjupyge8Izw4Cuch0bRrvMbp6YFfrUgk1JIQ4iLKkqqzHpBTY=", + "subType": "06" + } + } + }, + "gcp_decimal_rand_explicit_altname": { + "kms": "gcp", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAATF7YLkhkuLhXdxrQk2fJTs128tRNYHeodkqw7ha/TxW3Czr5gE272gnkdzfNoS7uu9XwOr1yjrC6y/8gHALAWn77WvGrAlBktLQbIIinsuds=", + "subType": "06" + } + } + }, + "gcp_decimal_det_explicit_id": { + "kms": "gcp", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "gcp_decimal_det_explicit_altname": { + "kms": "gcp", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "gcp_minKey_rand_explicit_id": { + "kms": "gcp", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "gcp_minKey_rand_explicit_altname": { + "kms": "gcp", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "gcp_minKey_det_explicit_id": { + "kms": "gcp", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "gcp_minKey_det_explicit_altname": { + "kms": "gcp", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "gcp_maxKey_rand_explicit_id": { + "kms": "gcp", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "gcp_maxKey_rand_explicit_altname": { + "kms": "gcp", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "gcp_maxKey_det_explicit_id": { + "kms": "gcp", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "gcp_maxKey_det_explicit_altname": { + "kms": "gcp", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "kmip_double_rand_auto_id": { + "kms": "kmip", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAB1hL/nPkpQtqxQUANbIJr30PQ98vPvaoy4JWUoElOL+cCnrSra3o7W+12dydy0rCS2EKrVm7Fw0C8L9nf1hpWjw==", + "subType": "06" + } + } + }, + "kmip_double_rand_auto_altname": { + "kms": "kmip", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAABxlcphy2SxXlkRBvO1Z3nNUqchmeOhIhkdYBbbW7CwYeLVRDciXFsZN73Nb9Bm+W4IpUNpo6mqFEtfjevIjtFyg==", + "subType": "06" + } + } + }, + "kmip_double_rand_explicit_id": { + "kms": "kmip", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAABx5AfRSiblFc1DGwxRIaUSP2kaM76ryzPUKL9KnEgnX1kjIlFz5B15uMht2cxdrntHFe1qZZk8V9PxTBpWZhJ8Q==", + "subType": "06" + } + } + }, + "kmip_double_rand_explicit_altname": { + "kms": "kmip", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAABXUC9v9HPrmU9tINzFmr2sQM9f7GHDus+y5T4pWX28PRtfnTysN/ANCfB9RosoR/wuKsbznwwD2JfSzOvlKo3PQ==", + "subType": "06" + } + } + }, + "kmip_double_det_explicit_id": { + "kms": "kmip", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDouble": "1.2339999999999999858" + } + }, + "kmip_double_det_explicit_altname": { + "kms": "kmip", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDouble": "1.2339999999999999858" + } + }, + "kmip_string_rand_auto_id": { + "kms": "kmip", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAACGHmqW1qbfqVlfB0x0CkXCk9smhs3yXsxJ/8eypSgbDQqVLSW2nf5bbHpnoCHHNtQ7I7ZBXzPzDLH2GgMJpopeQ==", + "subType": "06" + } + } + }, + "kmip_string_rand_auto_altname": { + "kms": "kmip", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAC9BJTD1pEMbslAjbJYt7yx/jzKkcZF3axu96+NYwp8afUCjXG5TOUZzODOwkbJuWgr7DBxa2GkZTvaAEk86h+Ow==", + "subType": "06" + } + } + }, + "kmip_string_rand_explicit_id": { + "kms": "kmip", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAACQlG28ECy8KHXC7GEPdC8+raBo2RMJwl5pofcPaTGkPUEbkreguMd1mYctNb90vXxby1nNeJY4o5zJJCMiNhNXg==", + "subType": "06" + } + } + }, + "kmip_string_rand_explicit_altname": { + "kms": "kmip", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAACbWuK+3nzeKSNVjmgHb0Ii7rA+CsAd+gYubPiMiHXZwE/o6i9FYWN+t/VK3p4K0CwIi6q3cycrMb2IgcvM27Q7Q==", + "subType": "06" + } + } + }, + "kmip_string_det_auto_id": { + "kms": "kmip", + "type": "string", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAC5OZgr9keCXOIj5Fi06i4win1xt7gpsyPA4Os+HdFn1MIP9tnktvWNRb8Rqhuj2O9KO83brx74Hu3EQ4nT6uCMw==", + "subType": "06" + } + } + }, + "kmip_string_det_explicit_id": { + "kms": "kmip", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAC5OZgr9keCXOIj5Fi06i4win1xt7gpsyPA4Os+HdFn1MIP9tnktvWNRb8Rqhuj2O9KO83brx74Hu3EQ4nT6uCMw==", + "subType": "06" + } + } + }, + "kmip_string_det_explicit_altname": { + "kms": "kmip", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAC5OZgr9keCXOIj5Fi06i4win1xt7gpsyPA4Os+HdFn1MIP9tnktvWNRb8Rqhuj2O9KO83brx74Hu3EQ4nT6uCMw==", + "subType": "06" + } + } + }, + "kmip_object_rand_auto_id": { + "kms": "kmip", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAADh2nGqaAUwHDRVjqYpj8JAPH7scmiHp1Z9SGBZQ6Fapxm+zWDdTBHyitM9U69BctJ5DaaafyqFOj5yr6sJ+ebJQ==", + "subType": "06" + } + } + }, + "kmip_object_rand_auto_altname": { + "kms": "kmip", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAD1YhOKyNle4y0Qbeio1HlCULLeTCALCLgKSITd50bilD+oDyqQawixJAwphcdjhLdFzbFwst5RWqpsiWMPHx4hQ==", + "subType": "06" + } + } + }, + "kmip_object_rand_explicit_id": { + "kms": "kmip", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAADveILoWFgX7AhUWCv8UL52TUa75qHuoNadnTQydJlqd6PVmtRKj+8vS7VwxNWPaH4wB1Tk7emMyFEbZpvvzjxqQ==", + "subType": "06" + } + } + }, + "kmip_object_rand_explicit_altname": { + "kms": "kmip", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAADB/LN9V/4SROJn+ESHRLM7wwcUltQUx3+LbbYXjPDXiiV14HK76Iyy6ZxJ+M5qC9bRj3afhTKuWLBblB8WwksOg==", + "subType": "06" + } + } + }, + "kmip_object_det_explicit_id": { + "kms": "kmip", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "kmip_object_det_explicit_altname": { + "kms": "kmip", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "kmip_array_rand_auto_id": { + "kms": "kmip", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAEasWXQam8XtOkSO0nEttMCQ0iZ4V8DDmhMKyQDFDsiNHyF2h98Ya/xFv4ZSlbpGWXPBvBATEGgov/PDg2vhVi53y4Pk33RHfY60hABuksp3o=", + "subType": "06" + } + } + }, + "kmip_array_rand_auto_altname": { + "kms": "kmip", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAEj3A1DYSEHm/3SlEmusA+pewxRPUoZ2NAjs60ioEBlCw9n6yiiB+X8d/w40TKsjZcOSfh05NC0z3gnpqQvrNolkxkvi9dmFiZeiiv5vBZUPI=", + "subType": "06" + } + } + }, + "kmip_array_rand_explicit_id": { + "kms": "kmip", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAEqeJW+L6lP0bn5QcD0FMI0C8vv2n5kV7SKgqKi1o5mxaxmp3Cjlspf7yumfSiQ5js6G9yJVAvHuxlqv14UFyR9RgXS0PIA8WzsAqkL0sJSw0=", + "subType": "06" + } + } + }, + "kmip_array_rand_explicit_altname": { + "kms": "kmip", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAEnPlPwy0B1VKuNum1GzkZwQjZia5jNYL5bf/k+PbfhnToTRWGxx8+E3R7XXp6YT/rFkjPlzU8ww9+iZNo2oqNpYuHdrIC8ybhO6HZAlvcERo=", + "subType": "06" + } + } + }, + "kmip_array_det_explicit_id": { + "kms": "kmip", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "kmip_array_det_explicit_altname": { + "kms": "kmip", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "kmip_binData=00_rand_auto_id": { + "kms": "kmip", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAFliNDZ6DmjoVcYQBCKDI9njpBsDELg+TD6XLF7xbZnMaJCCHLHr7w3x2/xFfrFSN44CtGAKOniYPCMAspaxHqOA==", + "subType": "06" + } + } + }, + "kmip_binData=00_rand_auto_altname": { + "kms": "kmip", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAF/P8LPmHKGgG0l5/Xi7jdkwfxpGPxoY0417suCvN6zjM3JNdufytzkektrm9CbBb1SnZCGYF9c0FCMzFG+tN/dg==", + "subType": "06" + } + } + }, + "kmip_binData=00_rand_explicit_id": { + "kms": "kmip", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAFWI0N4RbnYdEiFrzNpbRN9p+bSLm8Lthiu4K3/CvBg6GQpLMVQFhjW01Bud0lxpT2ohRnOK+ASUhiFcUU/t/lWQ==", + "subType": "06" + } + } + }, + "kmip_binData=00_rand_explicit_altname": { + "kms": "kmip", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAFQZvAtpY4cjEr1rJWVoUGaZKmzocSJ0muHose7Tk5kRDczjFa4Jcu4hN7JLM9qz2z4g+WJC3KQTdW4ZBXStke/Q==", + "subType": "06" + } + } + }, + "kmip_binData=00_det_auto_id": { + "kms": "kmip", + "type": "binData=00", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAFohIHrvzu8xLxVHsnYEDhZmv8BpEoEtFSjMUQzvBLUInvvTuU/rOzlVL88CkAEII7M3hcvrz8FKY7b7lC1veoYg==", + "subType": "06" + } + } + }, + "kmip_binData=00_det_explicit_id": { + "kms": "kmip", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAFohIHrvzu8xLxVHsnYEDhZmv8BpEoEtFSjMUQzvBLUInvvTuU/rOzlVL88CkAEII7M3hcvrz8FKY7b7lC1veoYg==", + "subType": "06" + } + } + }, + "kmip_binData=00_det_explicit_altname": { + "kms": "kmip", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAFohIHrvzu8xLxVHsnYEDhZmv8BpEoEtFSjMUQzvBLUInvvTuU/rOzlVL88CkAEII7M3hcvrz8FKY7b7lC1veoYg==", + "subType": "06" + } + } + }, + "kmip_binData=04_rand_auto_id": { + "kms": "kmip", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAFn7rhdO8tYq77uVxcqd9Qjz84Yg7JnJMYf0ULTMTh1vJHacckkhXw+8fIMMiAKwuOVwGkMAtu5RBvrFqdfxryCg8RLTxu1YYVthufiClEIS0=", + "subType": "06" + } + } + }, + "kmip_binData=04_rand_auto_altname": { + "kms": "kmip", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAFwwXQx9dKyoyHq7GBMmHzYe9ysoJK/f/ZWzA6nErau9MtX1gqi7VRsYqkamb47/zVbsLZwPMmdgNyPxEh3kqbV2D61t5RG2A3VeqhO1pTF8c=", + "subType": "06" + } + } + }, + "kmip_binData=04_rand_explicit_id": { + "kms": "kmip", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAFALeGeinJ8DE+WZniLdCIW2gfJUj445Ukp9PvRLgBXLGedl8mIXlLF2eu3BA9vP6s5y9w6peQjhn+oEofrsUVYD2duyzeIRMKgNiNchjf6TU=", + "subType": "06" + } + } + }, + "kmip_binData=04_rand_explicit_altname": { + "kms": "kmip", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAF06Fx8CO3OSKE3fGri0VwK0e22YiG9LH2QkDTsRdFbT2lBm+bDD9FrEY8vKWS5RljMuysaxjBOzZ98d2LEs6k8LMOm83Nz/RESe4ZbbcfdQ0=", + "subType": "06" + } + } + }, + "kmip_binData=04_det_auto_id": { + "kms": "kmip", + "type": "binData=04", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAFzmZI909fJgxOykJtvOlv5LsX8z6BxUX2Xg5TsIwOxJMPSC8usm/zR7sZawoVBOuJxtNVLY/8oNP/4pFtAmQo02bUOtTo1yxNz/IZa9x+Q5E=", + "subType": "06" + } + } + }, + "kmip_binData=04_det_explicit_id": { + "kms": "kmip", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAFzmZI909fJgxOykJtvOlv5LsX8z6BxUX2Xg5TsIwOxJMPSC8usm/zR7sZawoVBOuJxtNVLY/8oNP/4pFtAmQo02bUOtTo1yxNz/IZa9x+Q5E=", + "subType": "06" + } + } + }, + "kmip_binData=04_det_explicit_altname": { + "kms": "kmip", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAFzmZI909fJgxOykJtvOlv5LsX8z6BxUX2Xg5TsIwOxJMPSC8usm/zR7sZawoVBOuJxtNVLY/8oNP/4pFtAmQo02bUOtTo1yxNz/IZa9x+Q5E=", + "subType": "06" + } + } + }, + "kmip_undefined_rand_explicit_id": { + "kms": "kmip", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "kmip_undefined_rand_explicit_altname": { + "kms": "kmip", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "kmip_undefined_det_explicit_id": { + "kms": "kmip", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "kmip_undefined_det_explicit_altname": { + "kms": "kmip", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "kmip_objectId_rand_auto_id": { + "kms": "kmip", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAHZFzE908RuO5deEt3t2QQdT12ybwqbm8D+sMJrdKt2Wp4kVPsw4ocAGGsRYN6VXe46P5fmyG5HqVWn0hkflZnQg==", + "subType": "06" + } + } + }, + "kmip_objectId_rand_auto_altname": { + "kms": "kmip", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAH3dPKyCCStvOtVGzlgIS33fsl8OAwQblt9i21pOVuLiliY1Tup9EtkSic88+nNEtXnq9gRknRzLthXv/k1ql+7Q==", + "subType": "06" + } + } + }, + "kmip_objectId_rand_explicit_id": { + "kms": "kmip", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAHcEjxVfHDSfLzFxAuK/rs/Pn/XV7jLkgKXZYeY0PNlRi1MHojN2AvQqI3J2rOvAjuYfikGcpvGPp/goqUbV9HYw==", + "subType": "06" + } + } + }, + "kmip_objectId_rand_explicit_altname": { + "kms": "kmip", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAHX65sNHnRYpx3VbWPCdQyFe7u0Y5ItabLEduqDeVsPk/iK4X3GjCSHQfw1yPi+CA+/veVpgdonwws6RiYV4ZZ5Q==", + "subType": "06" + } + } + }, + "kmip_objectId_det_auto_id": { + "kms": "kmip", + "type": "objectId", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAHKU7mcdGEq2WGrDB6TicipLQstAk6G3PkiNt5F3bMavpKLjz04UBrd8aWGVG2gJTTON1UKRztiYFgRvb8f+LK/Q==", + "subType": "06" + } + } + }, + "kmip_objectId_det_explicit_id": { + "kms": "kmip", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAHKU7mcdGEq2WGrDB6TicipLQstAk6G3PkiNt5F3bMavpKLjz04UBrd8aWGVG2gJTTON1UKRztiYFgRvb8f+LK/Q==", + "subType": "06" + } + } + }, + "kmip_objectId_det_explicit_altname": { + "kms": "kmip", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAHKU7mcdGEq2WGrDB6TicipLQstAk6G3PkiNt5F3bMavpKLjz04UBrd8aWGVG2gJTTON1UKRztiYFgRvb8f+LK/Q==", + "subType": "06" + } + } + }, + "kmip_bool_rand_auto_id": { + "kms": "kmip", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAIw/xgJlKEvErmVtue3X3RFsOI2sttAbxnzh1INc9GUQ2vok1VwYt9k88RxMPiOwMAZG7P1MlAdx7zt865onPKOw==", + "subType": "06" + } + } + }, + "kmip_bool_rand_auto_altname": { + "kms": "kmip", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAIn8IuzlNHbpTgXOd1wEp364zJOBxj2Zf7a9B5osUV1sDY0G1OVpEnuDvZeUsdiUSyRjTTxzyuD/KZlKZ3+qrnrA==", + "subType": "06" + } + } + }, + "kmip_bool_rand_explicit_id": { + "kms": "kmip", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAI3Nz9PdjUYQRGfTtvYSR8EQuUKFL0wdlEdfSCTBmMBhBPuuF9KxqCgy+ldVu1DRRgg3346DOKEEtE9BJPPInJ6Q==", + "subType": "06" + } + } + }, + "kmip_bool_rand_explicit_altname": { + "kms": "kmip", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAIEGjqoerIZBk8Rw+YTO7jFKWzagDS8mEpD+9Wm1Q0r0ZHUmV0dQZcIqRV4oUk8U8uHUn0N3t2qGLr+rhUs4GH/g==", + "subType": "06" + } + } + }, + "kmip_bool_det_explicit_id": { + "kms": "kmip", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": true + }, + "kmip_bool_det_explicit_altname": { + "kms": "kmip", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": true + }, + "kmip_date_rand_auto_id": { + "kms": "kmip", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAJgr0v4xetUXjlLcPcyKv/rzjtWOKp9CZJcm23Noglu5RR/rXJS0qKI+W9MmJ64TMf27KvaJ0UXwfTRrvOC1plCg==", + "subType": "06" + } + } + }, + "kmip_date_rand_auto_altname": { + "kms": "kmip", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAJoeysAaiPsVK+JL1P1vD/9xF92m5kKidUdn6yklPlSKN4VVEBTymDetTLujULs1u1TlrS71jVLxo3xEwpG/KQvg==", + "subType": "06" + } + } + }, + "kmip_date_rand_explicit_id": { + "kms": "kmip", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAJVwu4+Su0DktpnZvzTBHYpWbWTq5gho/SLijrcIrFJcvq4YrjjPCXv+odCl95tkH+J1RlJdQ5Cr0umEIazLa6GA==", + "subType": "06" + } + } + }, + "kmip_date_rand_explicit_altname": { + "kms": "kmip", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAJWTYpjbDkIf82QXHMGrvd0SqhP8cBIakfYJf5aNcNrs86vxRhiG3KwETWPeOOlPZ6n1WjE2bOLB+DJTAxmJvahA==", + "subType": "06" + } + } + }, + "kmip_date_det_auto_id": { + "kms": "kmip", + "type": "date", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAJ/+sQrUqQh+JADSVIKM0d68gDUhDy37M1z1uvROzQw6hUAbQeD0DWdztADKg560UTPM4uOgH4NAyhLyBLMrWWHg==", + "subType": "06" + } + } + }, + "kmip_date_det_explicit_id": { + "kms": "kmip", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAJ/+sQrUqQh+JADSVIKM0d68gDUhDy37M1z1uvROzQw6hUAbQeD0DWdztADKg560UTPM4uOgH4NAyhLyBLMrWWHg==", + "subType": "06" + } + } + }, + "kmip_date_det_explicit_altname": { + "kms": "kmip", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAJ/+sQrUqQh+JADSVIKM0d68gDUhDy37M1z1uvROzQw6hUAbQeD0DWdztADKg560UTPM4uOgH4NAyhLyBLMrWWHg==", + "subType": "06" + } + } + }, + "kmip_null_rand_explicit_id": { + "kms": "kmip", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "kmip_null_rand_explicit_altname": { + "kms": "kmip", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "kmip_null_det_explicit_id": { + "kms": "kmip", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "kmip_null_det_explicit_altname": { + "kms": "kmip", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "kmip_regex_rand_auto_id": { + "kms": "kmip", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAALi8avMfpxSlDsSTqdxO8O2B1M79gOElyUIdXySQo7mvgHlf4oHQ7r94lL9dnsA2t/jmUmBKoGypaUQUSQE+9x+A==", + "subType": "06" + } + } + }, + "kmip_regex_rand_auto_altname": { + "kms": "kmip", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAALfHerZ/KolaBrb5qi3SpeNVW+i/nh5mkcdtQg5f1pHePr68KryHucM/XDAzbMqrPlag2/41STGYdJqzYO7Mbppg==", + "subType": "06" + } + } + }, + "kmip_regex_rand_explicit_id": { + "kms": "kmip", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAALOhKDVAN5cuDyB1EuRFWgKKt0wGJ63E5pPY8Tq2TXMNgCxUUc5O+TE+Ux4ls/uMyOBA3gPzND0CZKiru0i7ACUQ==", + "subType": "06" + } + } + }, + "kmip_regex_rand_explicit_altname": { + "kms": "kmip", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAALK3Hg8xX9gX+d3vKh7aosRP9CS2CIFeG9sapZv3OAPv1eWjY62Cp/G16kJ0BQt33RYD+DzD3gWupfUSyNZR0gng==", + "subType": "06" + } + } + }, + "kmip_regex_det_auto_id": { + "kms": "kmip", + "type": "regex", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAALaQXA8rItT7ELVxO8XtAWdHuiXFFPmnMhS5PMrUy/6mRtbq4fvU9dascW7ozonKOh8ad6+MIT7B/STv9dVBF4Kw==", + "subType": "06" + } + } + }, + "kmip_regex_det_explicit_id": { + "kms": "kmip", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAALaQXA8rItT7ELVxO8XtAWdHuiXFFPmnMhS5PMrUy/6mRtbq4fvU9dascW7ozonKOh8ad6+MIT7B/STv9dVBF4Kw==", + "subType": "06" + } + } + }, + "kmip_regex_det_explicit_altname": { + "kms": "kmip", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAALaQXA8rItT7ELVxO8XtAWdHuiXFFPmnMhS5PMrUy/6mRtbq4fvU9dascW7ozonKOh8ad6+MIT7B/STv9dVBF4Kw==", + "subType": "06" + } + } + }, + "kmip_dbPointer_rand_auto_id": { + "kms": "kmip", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAMoGkfmmUWTI+0aW7jVyCJ5Dgru1SCXBUmJSRzDL0D57pNruQ+79tVVcI6Uz5j87DhZFxShHbPjj583vLOOBNM3WGzZCpqH3serhHTWvXK+NM=", + "subType": "06" + } + } + }, + "kmip_dbPointer_rand_auto_altname": { + "kms": "kmip", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAMwu1WaRhhv43xgxLNxuenbND9M6mxGtCs9o4J5+yfL95XNB9Daie3RcLlyngz0pncBie6IqjhTycXsxTLQ94Jdg6m5GD5cU541LYKvhbv5f4=", + "subType": "06" + } + } + }, + "kmip_dbPointer_rand_explicit_id": { + "kms": "kmip", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAM+CIoCAisUwhhJtWQLolxQGQWafniwYyvaJQHmJC94Uwbf1gPfhMR42v2VtrmIVP0J0BaP/xf0cco2/qWRdKGZpgkK2CK6M972NtnZ/2x03A=", + "subType": "06" + } + } + }, + "kmip_dbPointer_rand_explicit_altname": { + "kms": "kmip", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAMjbeE9+EaJYjGfeAuxsV8teOdsW8bfnlkvji/tE11Zq89UMGx+oUsZzeLjUgVZ5nxsZKCZjEAq+DPnwFVC+MgqNeqWL7fRChODFlPGH2ZC+8=", + "subType": "06" + } + } + }, + "kmip_dbPointer_det_auto_id": { + "kms": "kmip", + "type": "dbPointer", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAM5B+fjbjYCZzCYUu4N/pJI3srCCXN+OCCHweeweqmpIEmB7yw87bQRIMGtCm6HuekcZ5J5q+nY5AQb0du/wh1YIoOrC3u4w7ZcLHkDmuAJPg=", + "subType": "06" + } + } + }, + "kmip_dbPointer_det_explicit_id": { + "kms": "kmip", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAM5B+fjbjYCZzCYUu4N/pJI3srCCXN+OCCHweeweqmpIEmB7yw87bQRIMGtCm6HuekcZ5J5q+nY5AQb0du/wh1YIoOrC3u4w7ZcLHkDmuAJPg=", + "subType": "06" + } + } + }, + "kmip_dbPointer_det_explicit_altname": { + "kms": "kmip", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAM5B+fjbjYCZzCYUu4N/pJI3srCCXN+OCCHweeweqmpIEmB7yw87bQRIMGtCm6HuekcZ5J5q+nY5AQb0du/wh1YIoOrC3u4w7ZcLHkDmuAJPg=", + "subType": "06" + } + } + }, + "kmip_javascript_rand_auto_id": { + "kms": "kmip", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAANuzlkWs/c8xArrAxPgYuCeShjj1zCfIMHOTPohspcyNofo9iY3P5MlhEOprZDiS8dBFg6EB7fZDzDdczx6VCN2A==", + "subType": "06" + } + } + }, + "kmip_javascript_rand_auto_altname": { + "kms": "kmip", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAANwJ72y7UqCBJh1NwVRiE3vU1ex7FMv/X5YWCMuO9MHPMo4g1V5eaO4KfOr+K8+9NtkflgMpeDkvwP92rfR5ud5Q==", + "subType": "06" + } + } + }, + "kmip_javascript_rand_explicit_id": { + "kms": "kmip", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAANj5q+888itRnLsw9PNGsBLhgqpvem5IJBOE2292r6zwjVueoEK/2I2PesRnn0esnkwdia1ADoMkcLUegwcFRkWQ==", + "subType": "06" + } + } + }, + "kmip_javascript_rand_explicit_altname": { + "kms": "kmip", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAANnvbnmApys7OIe8LGTsZKDG1F1G1SI/rfZVmF6q1fq5U7feYPp1ejb2t2S2+v7LfcOHytsQWGcYuWCDcl+vosvQ==", + "subType": "06" + } + } + }, + "kmip_javascript_det_auto_id": { + "kms": "kmip", + "type": "javascript", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAANOR9R/Da8j5iVxllLiGFlv4U/bVn/PyN9/5WeGJkGJeE/j/osKrKx6IL1igI0YVI+pKKzsINqJGIv+bJX0s7MNw==", + "subType": "06" + } + } + }, + "kmip_javascript_det_explicit_id": { + "kms": "kmip", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAANOR9R/Da8j5iVxllLiGFlv4U/bVn/PyN9/5WeGJkGJeE/j/osKrKx6IL1igI0YVI+pKKzsINqJGIv+bJX0s7MNw==", + "subType": "06" + } + } + }, + "kmip_javascript_det_explicit_altname": { + "kms": "kmip", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAANOR9R/Da8j5iVxllLiGFlv4U/bVn/PyN9/5WeGJkGJeE/j/osKrKx6IL1igI0YVI+pKKzsINqJGIv+bJX0s7MNw==", + "subType": "06" + } + } + }, + "kmip_symbol_rand_auto_id": { + "kms": "kmip", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAOe+vXpJSkmBM3WkxZrn4ea9/C6iNyMXWUzkQIzIYlnbkyu8od8nfOdhobUhoFxcKnvdaxN1s5NhJ1FA97RN/upGYN+AI/7cTCElmFSpdSvkI=", + "subType": "06" + } + } + }, + "kmip_symbol_rand_auto_altname": { + "kms": "kmip", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAOPpCgK6Hc/M2elOJkwIU9J7PZa+h1chody2yvfDu/UlB6T5sxnEZ6aEY/ISNLhJlhsRzuApSgFOmnrcG6Eg9VnSKin2yK0ll+VFxQEDHAcSA=", + "subType": "06" + } + } + }, + "kmip_symbol_rand_explicit_id": { + "kms": "kmip", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAOVoHX9GaOn71L5D9TpZmmxkx/asr0FHCLG5ZgLLA04yIhZHsDjt2DiVGGO/Mf4KwvoBn7Cf08qMhW7rQh2LgvvSLBO3zbw5l+MZ/bSn+Jylo=", + "subType": "06" + } + } + }, + "kmip_symbol_rand_explicit_altname": { + "kms": "kmip", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAOPobmcO/I4QObtCUEmGWpSCJ6tlYyhbO59q78LZBucSNl7DSkf/13tOJ9t+WKXACcMKVMmfPoFsgHbVj1nKWULBT07n1OWWDTZkuMD6C2+Fc=", + "subType": "06" + } + } + }, + "kmip_symbol_det_auto_id": { + "kms": "kmip", + "type": "symbol", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAOPpwX4mafoQJYHuzYfbKW1JunpjpB7Nd2slTC3n8Hsas9wQYf9VkModQhe5M4wZHOIXpehaODRcjKKfKRmpnNBOURSLm/ORJvy+UxtSLsnqo=", + "subType": "06" + } + } + }, + "kmip_symbol_det_explicit_id": { + "kms": "kmip", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAOPpwX4mafoQJYHuzYfbKW1JunpjpB7Nd2slTC3n8Hsas9wQYf9VkModQhe5M4wZHOIXpehaODRcjKKfKRmpnNBOURSLm/ORJvy+UxtSLsnqo=", + "subType": "06" + } + } + }, + "kmip_symbol_det_explicit_altname": { + "kms": "kmip", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAOPpwX4mafoQJYHuzYfbKW1JunpjpB7Nd2slTC3n8Hsas9wQYf9VkModQhe5M4wZHOIXpehaODRcjKKfKRmpnNBOURSLm/ORJvy+UxtSLsnqo=", + "subType": "06" + } + } + }, + "kmip_javascriptWithScope_rand_auto_id": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAPW2VMMm+EvsYpVtJQhsxgxgvV35kr9nxqKxP2qqIOAOQ58R/1oyYScFkNwB/tw0A1/zdvhoo+ERa7c0tjLIojFrosXhX2N/8Z4VnbZruz0Nk=", + "subType": "06" + } + } + }, + "kmip_javascriptWithScope_rand_auto_altname": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAPjPq9BQR4EwG/CD+RthOJY04m99LCl/shY6HnaU/QL627kN1dbBAG5vs+MXfa+glg8waVTNgB94vm3j72FMV1ZOKvbl4faWF1Rl2EOpOlR9U=", + "subType": "06" + } + } + }, + "kmip_javascriptWithScope_rand_explicit_id": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAPtqebrCAidKzBMvp3B5/vBeetqeCoMKS+vo+hLAYooXrnBunWxwRHpr45XYUvroG3aqOMkLtVZSgw8sO6Y/3z1viO2G0sGQW1ZMoW0/PX5Uw=", + "subType": "06" + } + } + }, + "kmip_javascriptWithScope_rand_explicit_altname": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAPtkJwXKlq8Fx1f1+9HFofM4uKi6lHQRFRyiOyUFJYxxZY1LR/2WXXTqWz3MWtrcJFCB+QSVOb1N/ieC7AZUboPgIuPJISM3Hu5VU2x/Isbdc=", + "subType": "06" + } + } + }, + "kmip_javascriptWithScope_det_explicit_id": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "kmip_javascriptWithScope_det_explicit_altname": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "kmip_int_rand_auto_id": { + "kms": "kmip", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAQ50kE7Tby9od2OsmIGZhp9k/mj4vy/YdnmF6YsSPxihbjV1vXGMraI/nGCr+0H1riwzq3m4sCT7aPw2VgiuwKMA==", + "subType": "06" + } + } + }, + "kmip_int_rand_auto_altname": { + "kms": "kmip", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAQkNL14OSMX/bJbsLtB/UumRoat6QOY7fvwZxRrkXTS3VJVHigthI1cUX7Is/uUsY8oHOfk/ZuHklQkifmfdcklQ==", + "subType": "06" + } + } + }, + "kmip_int_rand_explicit_id": { + "kms": "kmip", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAQtN2gNVU9Itoj+vgcK/4jEB5baSUH+Qz2WqTY7m0XaA3bPWGFCiWY4Sdw+qovednrSSSbC+azWi1QYclFRraldQ==", + "subType": "06" + } + } + }, + "kmip_int_rand_explicit_altname": { + "kms": "kmip", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAQk6uBqwXXFF9zEM4bc124goI3pBy2Jdi8Cd0ycKkjXrPG7GVCUm2UMbO+zEzYODeVo35N11g2yMXcv9RVgjWtNA==", + "subType": "06" + } + } + }, + "kmip_int_det_auto_id": { + "kms": "kmip", + "type": "int", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAQgrkPEf+RBZMn/J7HZObqEfus8icYls6ecaUrlabI6v1ALgxLuv23WSIfTr6mqpQCounqdA14DWS/Wl3kSkVC0w==", + "subType": "06" + } + } + }, + "kmip_int_det_explicit_id": { + "kms": "kmip", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAQgrkPEf+RBZMn/J7HZObqEfus8icYls6ecaUrlabI6v1ALgxLuv23WSIfTr6mqpQCounqdA14DWS/Wl3kSkVC0w==", + "subType": "06" + } + } + }, + "kmip_int_det_explicit_altname": { + "kms": "kmip", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAQgrkPEf+RBZMn/J7HZObqEfus8icYls6ecaUrlabI6v1ALgxLuv23WSIfTr6mqpQCounqdA14DWS/Wl3kSkVC0w==", + "subType": "06" + } + } + }, + "kmip_timestamp_rand_auto_id": { + "kms": "kmip", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAR2Cu3o2e/u5o69MndeZPJU5ngVA1G2MNYn00t+up/GlmaUC1ni1CVl0ZR0EVZ0gCDUrfxwPISPib8y23tNjbsog==", + "subType": "06" + } + } + }, + "kmip_timestamp_rand_auto_altname": { + "kms": "kmip", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAARgi8stgSQwqnN4Ws2ZBILOREsjreZcS1MBerL7dbGLVfzW99tqECglhGokkrE0aY69L0xMgcAUIaFRN4GanQAPg==", + "subType": "06" + } + } + }, + "kmip_timestamp_rand_explicit_id": { + "kms": "kmip", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAARPxEEI8L5Q3Jybu88BLdf31T3uYEUbijgSlKlkTt141RYrlE8nxtiYU5/5H9GXBis0Qq1s2C+MauD2h/cNijTCA==", + "subType": "06" + } + } + }, + "kmip_timestamp_rand_explicit_altname": { + "kms": "kmip", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAARh/QaU1dnGbii4LtXCpT5o6vencc8E2fzarjJFbSEd0ixW/UV1ppZdvD729d0umkaIwIEVA4q+XVvHfl/ckKPFg==", + "subType": "06" + } + } + }, + "kmip_timestamp_det_auto_id": { + "kms": "kmip", + "type": "timestamp", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAARqdpLb72mmzb75QBrE+ATMfS5LLqzAD/1g5ScT8zfgh0IHsZZBWCJlSVRNC12Sgr3zdXHMtYp8C3OZT6/tPkQGg==", + "subType": "06" + } + } + }, + "kmip_timestamp_det_explicit_id": { + "kms": "kmip", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAARqdpLb72mmzb75QBrE+ATMfS5LLqzAD/1g5ScT8zfgh0IHsZZBWCJlSVRNC12Sgr3zdXHMtYp8C3OZT6/tPkQGg==", + "subType": "06" + } + } + }, + "kmip_timestamp_det_explicit_altname": { + "kms": "kmip", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAARqdpLb72mmzb75QBrE+ATMfS5LLqzAD/1g5ScT8zfgh0IHsZZBWCJlSVRNC12Sgr3zdXHMtYp8C3OZT6/tPkQGg==", + "subType": "06" + } + } + }, + "kmip_long_rand_auto_id": { + "kms": "kmip", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAASVv+ClXkh9spIaXWJYRV/o8UZjG+WWWrNpIjZ9LQn2bXakrKJ3REvdkrzGuxASmBhBYTplEyvxVCJwXuWRAGGYw==", + "subType": "06" + } + } + }, + "kmip_long_rand_auto_altname": { + "kms": "kmip", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAASeAz/dK+Gc4/jx3W07B2rNFvQ0LoyCllFRvRVGu1Xf1NByc4cRZLOMzlr99syz/fifF6WY30bOi5Pani9QtFuGg==", + "subType": "06" + } + } + }, + "kmip_long_rand_explicit_id": { + "kms": "kmip", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAASP1HD9uoDlwTldaznKxW71JUQcLsa4/cUWzeTnelQwdpohCbZsM8fBZBqgwwTWnjpYY/LBUipC6yhwLKfUXBoBQ==", + "subType": "06" + } + } + }, + "kmip_long_rand_explicit_altname": { + "kms": "kmip", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAASnGPH77bS/ETB1hn+VTvsBrxEvIHA6EAb8Z2SEz6BHt7SVeI+I7DLERvRVpV5kNJFcKgXDrvRmD+Et0rhSmk9sw==", + "subType": "06" + } + } + }, + "kmip_long_det_auto_id": { + "kms": "kmip", + "type": "long", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAS+zKmtijSTPOEVlpwmaeMIOuzVNuZpV4Jw9zP8Yqa1xYtlItXDozqdibacRaA74KU49KNySdR1T7fxwxa2OOTrQ==", + "subType": "06" + } + } + }, + "kmip_long_det_explicit_id": { + "kms": "kmip", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAS+zKmtijSTPOEVlpwmaeMIOuzVNuZpV4Jw9zP8Yqa1xYtlItXDozqdibacRaA74KU49KNySdR1T7fxwxa2OOTrQ==", + "subType": "06" + } + } + }, + "kmip_long_det_explicit_altname": { + "kms": "kmip", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAS+zKmtijSTPOEVlpwmaeMIOuzVNuZpV4Jw9zP8Yqa1xYtlItXDozqdibacRaA74KU49KNySdR1T7fxwxa2OOTrQ==", + "subType": "06" + } + } + }, + "kmip_decimal_rand_auto_id": { + "kms": "kmip", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAATu/BbCc5Ti9SBlMR2B8zj3Q1yQ16Uob+10LWaT5QKS192IcnBGy4wmmNkIsTys060xUby9KKQF80dVPnjYfqJwEXCe/pVaPQZftE0DolKv78=", + "subType": "06" + } + } + }, + "kmip_decimal_rand_auto_altname": { + "kms": "kmip", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAATpq6/dtxq2ZUZHrK10aB0YjjPalEaXYcyAyRZjfXWAYCLZdT9sIybjX3Axjxisim+VSHx0QU7oXkKUfcbLgHyjUXj8g9059FHxKFkUsNv4Z8=", + "subType": "06" + } + } + }, + "kmip_decimal_rand_explicit_id": { + "kms": "kmip", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAATS++9KcfM7uiShZYxRpFPrBJquKv7dyvFRTjnxs6aaaPo0fiqpv6bco/cMLsldEVpWDEA/Tc2HtSXYPp4UJsMfASyBjoxCloL5SaRWyD9Ye8=", + "subType": "06" + } + } + }, + "kmip_decimal_rand_explicit_altname": { + "kms": "kmip", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAATREcETS5KoAGyj/P45owPrdFfy5ng8Z1ND+F+780lLddOyPeDnIsa7yg6uvhTZ65mHfGLvKcFocclYenq/AX1dY4xdjLRg/AfT088A27ORUA=", + "subType": "06" + } + } + }, + "kmip_decimal_det_explicit_id": { + "kms": "kmip", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "kmip_decimal_det_explicit_altname": { + "kms": "kmip", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "kmip_minKey_rand_explicit_id": { + "kms": "kmip", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "kmip_minKey_rand_explicit_altname": { + "kms": "kmip", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "kmip_minKey_det_explicit_id": { + "kms": "kmip", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "kmip_minKey_det_explicit_altname": { + "kms": "kmip", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "kmip_maxKey_rand_explicit_id": { + "kms": "kmip", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "kmip_maxKey_rand_explicit_altname": { + "kms": "kmip", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "kmip_maxKey_det_explicit_id": { + "kms": "kmip", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "kmip_maxKey_det_explicit_altname": { + "kms": "kmip", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } } -} +} \ No newline at end of file diff --git a/test/client-side-encryption/corpus/corpus-key-azure.json b/test/client-side-encryption/corpus/corpus-key-azure.json new file mode 100644 index 0000000000..31a564edb8 --- /dev/null +++ b/test/client-side-encryption/corpus/corpus-key-azure.json @@ -0,0 +1,33 @@ +{ + "_id": { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "n+HWZ0ZSVOYA3cvQgP7inN4JSXfOH85IngmeQxRpQHjCCcqT3IFqEWNlrsVHiz3AELimHhX4HKqOLWMUeSIT6emUDDoQX9BAv8DR1+E1w4nGs/NyEneac78EYFkK3JysrFDOgl2ypCCTKAypkn9CkAx1if4cfgQE93LW4kczcyHdGiH36CIxrCDGv1UzAvERN5Qa47DVwsM6a+hWsF2AAAJVnF0wYLLJU07TuRHdMrrphPWXZsFgyV+lRqJ7DDpReKNO8nMPLV/mHqHBHGPGQiRdb9NoJo8CvokGz4+KE8oLwzKf6V24dtwZmRkrsDV4iOhvROAzz+Euo1ypSkL3mw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1601573901680" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1601573901680" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyAltNames": ["azure"] +} \ No newline at end of file diff --git a/test/client-side-encryption/corpus/corpus-key-gcp.json b/test/client-side-encryption/corpus/corpus-key-gcp.json new file mode 100644 index 0000000000..79d6999b08 --- /dev/null +++ b/test/client-side-encryption/corpus/corpus-key-gcp.json @@ -0,0 +1,35 @@ +{ + "_id": { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "CiQAIgLj0WyktnB4dfYHo5SLZ41K4ASQrjJUaSzl5vvVH0G12G0SiQEAjlV8XPlbnHDEDFbdTO4QIe8ER2/172U1ouLazG0ysDtFFIlSvWX5ZnZUrRMmp/R2aJkzLXEt/zf8Mn4Lfm+itnjgo5R9K4pmPNvvPKNZX5C16lrPT+aA+rd+zXFSmlMg3i5jnxvTdLHhg3G7Q/Uv1ZIJskKt95bzLoe0tUVzRWMYXLIEcohnQg==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1601574333107" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1601574333107" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyAltNames": ["gcp"] +} \ No newline at end of file diff --git a/test/client-side-encryption/corpus/corpus-key-kmip.json b/test/client-side-encryption/corpus/corpus-key-kmip.json new file mode 100644 index 0000000000..7c7069700e --- /dev/null +++ b/test/client-side-encryption/corpus/corpus-key-kmip.json @@ -0,0 +1,32 @@ +{ + "_id": { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "eUYDyB0HuWb+lQgUwO+6qJQyTTDTY2gp9FbemL7ZFo0pvr0x6rm6Ff9OVUTGH6HyMKipaeHdiIJU1dzsLwvqKvi7Beh+U4iaIWX/K0oEg1GOsJc0+Z/in8gNHbGUYLmycHViM3LES3kdt7FdFSUl5rEBHrM71yoNEXImz17QJWMGOuT4x6yoi2pvnaRJwfrI4DjpmnnTrDMac92jgZehbg==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1634220190041" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1634220190041" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "kmip", + "keyId": "1" + }, + "keyAltNames": ["kmip"] +} \ No newline at end of file diff --git a/test/client-side-encryption/corpus/corpus-schema.json b/test/client-side-encryption/corpus/corpus-schema.json index e4838d8aae..e74bc914f5 100644 --- a/test/client-side-encryption/corpus/corpus-schema.json +++ b/test/client-side-encryption/corpus/corpus-schema.json @@ -34,11 +34,19 @@ }, "aws_double_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_double_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_string_rand_auto_id": { "bsonType": "object", @@ -73,11 +81,19 @@ }, "aws_string_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_string_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_string_det_auto_id": { "bsonType": "object", @@ -100,11 +116,19 @@ }, "aws_string_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_string_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_object_rand_auto_id": { "bsonType": "object", @@ -139,11 +163,19 @@ }, "aws_object_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_object_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_array_rand_auto_id": { "bsonType": "object", @@ -178,11 +210,19 @@ }, "aws_array_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_array_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_binData=00_rand_auto_id": { "bsonType": "object", @@ -217,11 +257,19 @@ }, "aws_binData=00_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_binData=00_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_binData=00_det_auto_id": { "bsonType": "object", @@ -244,11 +292,19 @@ }, "aws_binData=00_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_binData=00_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_binData=04_rand_auto_id": { "bsonType": "object", @@ -283,11 +339,19 @@ }, "aws_binData=04_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_binData=04_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_binData=04_det_auto_id": { "bsonType": "object", @@ -310,11 +374,19 @@ }, "aws_binData=04_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_binData=04_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_objectId_rand_auto_id": { "bsonType": "object", @@ -349,11 +421,19 @@ }, "aws_objectId_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_objectId_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_objectId_det_auto_id": { "bsonType": "object", @@ -376,11 +456,19 @@ }, "aws_objectId_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_objectId_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_bool_rand_auto_id": { "bsonType": "object", @@ -415,11 +503,19 @@ }, "aws_bool_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_bool_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_date_rand_auto_id": { "bsonType": "object", @@ -454,11 +550,19 @@ }, "aws_date_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_date_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_date_det_auto_id": { "bsonType": "object", @@ -481,11 +585,19 @@ }, "aws_date_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_date_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_regex_rand_auto_id": { "bsonType": "object", @@ -520,11 +632,19 @@ }, "aws_regex_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_regex_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_regex_det_auto_id": { "bsonType": "object", @@ -547,11 +667,19 @@ }, "aws_regex_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_regex_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_dbPointer_rand_auto_id": { "bsonType": "object", @@ -586,11 +714,19 @@ }, "aws_dbPointer_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_dbPointer_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_dbPointer_det_auto_id": { "bsonType": "object", @@ -613,11 +749,19 @@ }, "aws_dbPointer_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_dbPointer_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_javascript_rand_auto_id": { "bsonType": "object", @@ -652,11 +796,19 @@ }, "aws_javascript_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_javascript_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_javascript_det_auto_id": { "bsonType": "object", @@ -679,11 +831,19 @@ }, "aws_javascript_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_javascript_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_symbol_rand_auto_id": { "bsonType": "object", @@ -718,11 +878,19 @@ }, "aws_symbol_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_symbol_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_symbol_det_auto_id": { "bsonType": "object", @@ -745,11 +913,19 @@ }, "aws_symbol_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_symbol_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_javascriptWithScope_rand_auto_id": { "bsonType": "object", @@ -784,11 +960,19 @@ }, "aws_javascriptWithScope_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_javascriptWithScope_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_int_rand_auto_id": { "bsonType": "object", @@ -823,11 +1007,19 @@ }, "aws_int_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_int_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_int_det_auto_id": { "bsonType": "object", @@ -850,11 +1042,19 @@ }, "aws_int_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_int_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_timestamp_rand_auto_id": { "bsonType": "object", @@ -889,11 +1089,19 @@ }, "aws_timestamp_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_timestamp_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_timestamp_det_auto_id": { "bsonType": "object", @@ -916,11 +1124,19 @@ }, "aws_timestamp_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_timestamp_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_long_rand_auto_id": { "bsonType": "object", @@ -955,11 +1171,19 @@ }, "aws_long_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_long_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_long_det_auto_id": { "bsonType": "object", @@ -982,11 +1206,19 @@ }, "aws_long_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_long_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_decimal_rand_auto_id": { "bsonType": "object", @@ -1021,11 +1253,19 @@ }, "aws_decimal_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_decimal_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_double_rand_auto_id": { "bsonType": "object", @@ -1060,11 +1300,19 @@ }, "local_double_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_double_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_string_rand_auto_id": { "bsonType": "object", @@ -1099,11 +1347,19 @@ }, "local_string_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_string_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_string_det_auto_id": { "bsonType": "object", @@ -1126,11 +1382,19 @@ }, "local_string_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_string_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_object_rand_auto_id": { "bsonType": "object", @@ -1165,11 +1429,19 @@ }, "local_object_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_object_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_array_rand_auto_id": { "bsonType": "object", @@ -1204,11 +1476,19 @@ }, "local_array_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_array_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_binData=00_rand_auto_id": { "bsonType": "object", @@ -1243,11 +1523,19 @@ }, "local_binData=00_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_binData=00_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_binData=00_det_auto_id": { "bsonType": "object", @@ -1270,11 +1558,19 @@ }, "local_binData=00_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_binData=00_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_binData=04_rand_auto_id": { "bsonType": "object", @@ -1309,11 +1605,19 @@ }, "local_binData=04_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_binData=04_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_binData=04_det_auto_id": { "bsonType": "object", @@ -1336,11 +1640,19 @@ }, "local_binData=04_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_binData=04_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_objectId_rand_auto_id": { "bsonType": "object", @@ -1375,11 +1687,19 @@ }, "local_objectId_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_objectId_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_objectId_det_auto_id": { "bsonType": "object", @@ -1402,11 +1722,19 @@ }, "local_objectId_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_objectId_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_bool_rand_auto_id": { "bsonType": "object", @@ -1441,11 +1769,19 @@ }, "local_bool_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_bool_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_date_rand_auto_id": { "bsonType": "object", @@ -1480,11 +1816,19 @@ }, "local_date_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_date_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_date_det_auto_id": { "bsonType": "object", @@ -1507,11 +1851,19 @@ }, "local_date_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_date_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_regex_rand_auto_id": { "bsonType": "object", @@ -1546,11 +1898,19 @@ }, "local_regex_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_regex_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_regex_det_auto_id": { "bsonType": "object", @@ -1573,11 +1933,19 @@ }, "local_regex_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_regex_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_dbPointer_rand_auto_id": { "bsonType": "object", @@ -1612,11 +1980,19 @@ }, "local_dbPointer_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_dbPointer_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_dbPointer_det_auto_id": { "bsonType": "object", @@ -1639,11 +2015,19 @@ }, "local_dbPointer_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_dbPointer_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_javascript_rand_auto_id": { "bsonType": "object", @@ -1678,11 +2062,19 @@ }, "local_javascript_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_javascript_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_javascript_det_auto_id": { "bsonType": "object", @@ -1705,11 +2097,19 @@ }, "local_javascript_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_javascript_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_symbol_rand_auto_id": { "bsonType": "object", @@ -1744,11 +2144,19 @@ }, "local_symbol_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_symbol_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_symbol_det_auto_id": { "bsonType": "object", @@ -1771,11 +2179,19 @@ }, "local_symbol_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_symbol_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_javascriptWithScope_rand_auto_id": { "bsonType": "object", @@ -1810,11 +2226,19 @@ }, "local_javascriptWithScope_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_javascriptWithScope_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_int_rand_auto_id": { "bsonType": "object", @@ -1849,11 +2273,19 @@ }, "local_int_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_int_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_int_det_auto_id": { "bsonType": "object", @@ -1876,11 +2308,19 @@ }, "local_int_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_int_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_timestamp_rand_auto_id": { "bsonType": "object", @@ -1915,11 +2355,19 @@ }, "local_timestamp_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_timestamp_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_timestamp_det_auto_id": { "bsonType": "object", @@ -1942,11 +2390,19 @@ }, "local_timestamp_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_timestamp_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_long_rand_auto_id": { "bsonType": "object", @@ -1981,11 +2437,19 @@ }, "local_long_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_long_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_long_det_auto_id": { "bsonType": "object", @@ -2008,11 +2472,19 @@ }, "local_long_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_long_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_decimal_rand_auto_id": { "bsonType": "object", @@ -2047,11 +2519,3817 @@ }, "local_decimal_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_decimal_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_double_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "double" + } + } + } + }, + "azure_double_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "double" + } + } + } + }, + "azure_double_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_double_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_string_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "string" + } + } + } + }, + "azure_string_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "string" + } + } + } + }, + "azure_string_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_string_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_string_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "string" + } + } + } + }, + "azure_string_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_string_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_object_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "object" + } + } + } + }, + "azure_object_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "object" + } + } + } + }, + "azure_object_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_object_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_array_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "array" + } + } + } + }, + "azure_array_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "array" + } + } + } + }, + "azure_array_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_array_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_binData=00_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "azure_binData=00_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "azure_binData=00_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_binData=00_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_binData=00_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "binData" + } + } + } + }, + "azure_binData=00_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_binData=00_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_binData=04_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "azure_binData=04_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "azure_binData=04_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_binData=04_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_binData=04_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "binData" + } + } + } + }, + "azure_binData=04_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_binData=04_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_objectId_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "objectId" + } + } + } + }, + "azure_objectId_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "objectId" + } + } + } + }, + "azure_objectId_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_objectId_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_objectId_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "objectId" + } + } + } + }, + "azure_objectId_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_objectId_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_bool_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "bool" + } + } + } + }, + "azure_bool_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "bool" + } + } + } + }, + "azure_bool_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_bool_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_date_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "date" + } + } + } + }, + "azure_date_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "date" + } + } + } + }, + "azure_date_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_date_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_date_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "date" + } + } + } + }, + "azure_date_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_date_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_regex_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "regex" + } + } + } + }, + "azure_regex_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "regex" + } + } + } + }, + "azure_regex_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_regex_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_regex_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "regex" + } + } + } + }, + "azure_regex_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_regex_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_dbPointer_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "dbPointer" + } + } + } + }, + "azure_dbPointer_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "dbPointer" + } + } + } + }, + "azure_dbPointer_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_dbPointer_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_dbPointer_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "dbPointer" + } + } + } + }, + "azure_dbPointer_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_dbPointer_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_javascript_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascript" + } + } + } + }, + "azure_javascript_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascript" + } + } + } + }, + "azure_javascript_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_javascript_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_javascript_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "javascript" + } + } + } + }, + "azure_javascript_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_javascript_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_symbol_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "symbol" + } + } + } + }, + "azure_symbol_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "symbol" + } + } + } + }, + "azure_symbol_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_symbol_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_symbol_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "symbol" + } + } + } + }, + "azure_symbol_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_symbol_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_javascriptWithScope_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascriptWithScope" + } + } + } + }, + "azure_javascriptWithScope_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascriptWithScope" + } + } + } + }, + "azure_javascriptWithScope_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_javascriptWithScope_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_int_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "int" + } + } + } + }, + "azure_int_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "int" + } + } + } + }, + "azure_int_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_int_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_int_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "int" + } + } + } + }, + "azure_int_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_int_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_timestamp_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "timestamp" + } + } + } + }, + "azure_timestamp_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "timestamp" + } + } + } + }, + "azure_timestamp_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_timestamp_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_timestamp_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "timestamp" + } + } + } + }, + "azure_timestamp_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_timestamp_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_long_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "long" + } + } + } + }, + "azure_long_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "long" + } + } + } + }, + "azure_long_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_long_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_long_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "long" + } + } + } + }, + "azure_long_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_long_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_decimal_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "decimal" + } + } + } + }, + "azure_decimal_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "decimal" + } + } + } + }, + "azure_decimal_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_decimal_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_double_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "double" + } + } + } + }, + "gcp_double_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "double" + } + } + } + }, + "gcp_double_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_double_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_string_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "string" + } + } + } + }, + "gcp_string_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "string" + } + } + } + }, + "gcp_string_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_string_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_string_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "string" + } + } + } + }, + "gcp_string_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_string_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_object_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "object" + } + } + } + }, + "gcp_object_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "object" + } + } + } + }, + "gcp_object_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_object_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_array_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "array" + } + } + } + }, + "gcp_array_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "array" + } + } + } + }, + "gcp_array_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_array_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_binData=00_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "gcp_binData=00_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "gcp_binData=00_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_binData=00_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_binData=00_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "binData" + } + } + } + }, + "gcp_binData=00_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_binData=00_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_binData=04_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "gcp_binData=04_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "gcp_binData=04_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_binData=04_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_binData=04_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "binData" + } + } + } + }, + "gcp_binData=04_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_binData=04_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_objectId_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "objectId" + } + } + } + }, + "gcp_objectId_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "objectId" + } + } + } + }, + "gcp_objectId_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_objectId_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_objectId_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "objectId" + } + } + } + }, + "gcp_objectId_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_objectId_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_bool_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "bool" + } + } + } + }, + "gcp_bool_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "bool" + } + } + } + }, + "gcp_bool_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_bool_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_date_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "date" + } + } + } + }, + "gcp_date_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "date" + } + } + } + }, + "gcp_date_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_date_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_date_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "date" + } + } + } + }, + "gcp_date_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_date_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_regex_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "regex" + } + } + } + }, + "gcp_regex_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "regex" + } + } + } + }, + "gcp_regex_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_regex_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_regex_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "regex" + } + } + } + }, + "gcp_regex_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_regex_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_dbPointer_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "dbPointer" + } + } + } + }, + "gcp_dbPointer_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "dbPointer" + } + } + } + }, + "gcp_dbPointer_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_dbPointer_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_dbPointer_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "dbPointer" + } + } + } + }, + "gcp_dbPointer_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_dbPointer_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_javascript_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascript" + } + } + } + }, + "gcp_javascript_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascript" + } + } + } + }, + "gcp_javascript_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_javascript_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_javascript_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "javascript" + } + } + } + }, + "gcp_javascript_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_javascript_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_symbol_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "symbol" + } + } + } + }, + "gcp_symbol_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "symbol" + } + } + } + }, + "gcp_symbol_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_symbol_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_symbol_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "symbol" + } + } + } + }, + "gcp_symbol_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_symbol_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_javascriptWithScope_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascriptWithScope" + } + } + } + }, + "gcp_javascriptWithScope_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascriptWithScope" + } + } + } + }, + "gcp_javascriptWithScope_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_javascriptWithScope_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_int_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "int" + } + } + } + }, + "gcp_int_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "int" + } + } + } + }, + "gcp_int_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_int_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_int_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "int" + } + } + } + }, + "gcp_int_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_int_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_timestamp_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "timestamp" + } + } + } + }, + "gcp_timestamp_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "timestamp" + } + } + } + }, + "gcp_timestamp_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_timestamp_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_timestamp_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "timestamp" + } + } + } + }, + "gcp_timestamp_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_timestamp_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_long_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "long" + } + } + } + }, + "gcp_long_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "long" + } + } + } + }, + "gcp_long_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_long_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_long_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "long" + } + } + } + }, + "gcp_long_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_long_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_decimal_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "decimal" + } + } + } + }, + "gcp_decimal_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "decimal" + } + } + } + }, + "gcp_decimal_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_decimal_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_double_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "double" + } + } + } + }, + "kmip_double_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "double" + } + } + } + }, + "kmip_double_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_double_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_string_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "string" + } + } + } + }, + "kmip_string_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "string" + } + } + } + }, + "kmip_string_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_string_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_string_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "string" + } + } + } + }, + "kmip_string_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_string_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_object_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "object" + } + } + } + }, + "kmip_object_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "object" + } + } + } + }, + "kmip_object_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_object_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_array_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "array" + } + } + } + }, + "kmip_array_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "array" + } + } + } + }, + "kmip_array_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_array_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_binData=00_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "kmip_binData=00_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "kmip_binData=00_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_binData=00_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_binData=00_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "binData" + } + } + } + }, + "kmip_binData=00_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_binData=00_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_binData=04_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "kmip_binData=04_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "kmip_binData=04_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_binData=04_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_binData=04_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "binData" + } + } + } + }, + "kmip_binData=04_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_binData=04_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_objectId_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "objectId" + } + } + } + }, + "kmip_objectId_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "objectId" + } + } + } + }, + "kmip_objectId_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_objectId_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_objectId_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "objectId" + } + } + } + }, + "kmip_objectId_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_objectId_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_bool_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "bool" + } + } + } + }, + "kmip_bool_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "bool" + } + } + } + }, + "kmip_bool_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_bool_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_date_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "date" + } + } + } + }, + "kmip_date_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "date" + } + } + } + }, + "kmip_date_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_date_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_date_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "date" + } + } + } + }, + "kmip_date_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_date_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_regex_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "regex" + } + } + } + }, + "kmip_regex_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "regex" + } + } + } + }, + "kmip_regex_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_regex_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_regex_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "regex" + } + } + } + }, + "kmip_regex_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_regex_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_dbPointer_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "dbPointer" + } + } + } + }, + "kmip_dbPointer_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "dbPointer" + } + } + } + }, + "kmip_dbPointer_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_dbPointer_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_dbPointer_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "dbPointer" + } + } + } + }, + "kmip_dbPointer_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_dbPointer_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_javascript_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascript" + } + } + } + }, + "kmip_javascript_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascript" + } + } + } + }, + "kmip_javascript_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_javascript_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_javascript_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "javascript" + } + } + } + }, + "kmip_javascript_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_javascript_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_symbol_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "symbol" + } + } + } + }, + "kmip_symbol_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "symbol" + } + } + } + }, + "kmip_symbol_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_symbol_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_symbol_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "symbol" + } + } + } + }, + "kmip_symbol_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_symbol_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_javascriptWithScope_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascriptWithScope" + } + } + } + }, + "kmip_javascriptWithScope_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascriptWithScope" + } + } + } + }, + "kmip_javascriptWithScope_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_javascriptWithScope_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_int_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "int" + } + } + } + }, + "kmip_int_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "int" + } + } + } + }, + "kmip_int_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_int_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_int_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "int" + } + } + } + }, + "kmip_int_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_int_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_timestamp_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "timestamp" + } + } + } + }, + "kmip_timestamp_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "timestamp" + } + } + } + }, + "kmip_timestamp_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_timestamp_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_timestamp_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "timestamp" + } + } + } + }, + "kmip_timestamp_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_timestamp_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_long_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "long" + } + } + } + }, + "kmip_long_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "long" + } + } + } + }, + "kmip_long_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_long_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_long_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "long" + } + } + } + }, + "kmip_long_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_long_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_decimal_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "decimal" + } + } + } + }, + "kmip_decimal_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "decimal" + } + } + } + }, + "kmip_decimal_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_decimal_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } } } -} +} \ No newline at end of file diff --git a/test/client-side-encryption/corpus/corpus.json b/test/client-side-encryption/corpus/corpus.json index cbf7a091a1..559711b347 100644 --- a/test/client-side-encryption/corpus/corpus.json +++ b/test/client-side-encryption/corpus/corpus.json @@ -2,6 +2,9 @@ "_id": "client_side_encryption_corpus", "altname_aws": "aws", "altname_local": "local", + "altname_azure": "azure", + "altname_gcp": "gcp", + "altname_kmip": "kmip", "aws_double_rand_auto_id": { "kms": "aws", "type": "double", @@ -9,7 +12,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$numberDouble": "1.234" } + "value": { + "$numberDouble": "1.234" + } }, "aws_double_rand_auto_altname": { "kms": "aws", @@ -18,7 +23,9 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$numberDouble": "1.234" } + "value": { + "$numberDouble": "1.234" + } }, "aws_double_rand_explicit_id": { "kms": "aws", @@ -27,7 +34,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$numberDouble": "1.234" } + "value": { + "$numberDouble": "1.234" + } }, "aws_double_rand_explicit_altname": { "kms": "aws", @@ -36,7 +45,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$numberDouble": "1.234" } + "value": { + "$numberDouble": "1.234" + } }, "aws_double_det_explicit_id": { "kms": "aws", @@ -45,7 +56,9 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "$numberDouble": "1.234" } + "value": { + "$numberDouble": "1.234" + } }, "aws_double_det_explicit_altname": { "kms": "aws", @@ -54,7 +67,9 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "$numberDouble": "1.234" } + "value": { + "$numberDouble": "1.234" + } }, "aws_string_rand_auto_id": { "kms": "aws", @@ -126,7 +141,11 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "x": { "$numberInt": "1" } } + "value": { + "x": { + "$numberInt": "1" + } + } }, "aws_object_rand_auto_altname": { "kms": "aws", @@ -135,7 +154,11 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "x": { "$numberInt": "1" } } + "value": { + "x": { + "$numberInt": "1" + } + } }, "aws_object_rand_explicit_id": { "kms": "aws", @@ -144,7 +167,11 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "x": { "$numberInt": "1" } } + "value": { + "x": { + "$numberInt": "1" + } + } }, "aws_object_rand_explicit_altname": { "kms": "aws", @@ -153,7 +180,11 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "x": { "$numberInt": "1" } } + "value": { + "x": { + "$numberInt": "1" + } + } }, "aws_object_det_explicit_id": { "kms": "aws", @@ -162,7 +193,11 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "x": { "$numberInt": "1" } } + "value": { + "x": { + "$numberInt": "1" + } + } }, "aws_object_det_explicit_altname": { "kms": "aws", @@ -171,7 +206,11 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "x": { "$numberInt": "1" } } + "value": { + "x": { + "$numberInt": "1" + } + } }, "aws_array_rand_auto_id": { "kms": "aws", @@ -181,9 +220,15 @@ "identifier": "id", "allowed": true, "value": [ - { "$numberInt": "1" }, - { "$numberInt": "2" }, - { "$numberInt": "3" } + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } ] }, "aws_array_rand_auto_altname": { @@ -194,9 +239,15 @@ "identifier": "altname", "allowed": true, "value": [ - { "$numberInt": "1" }, - { "$numberInt": "2" }, - { "$numberInt": "3" } + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } ] }, "aws_array_rand_explicit_id": { @@ -207,9 +258,15 @@ "identifier": "id", "allowed": true, "value": [ - { "$numberInt": "1" }, - { "$numberInt": "2" }, - { "$numberInt": "3" } + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } ] }, "aws_array_rand_explicit_altname": { @@ -220,9 +277,15 @@ "identifier": "altname", "allowed": true, "value": [ - { "$numberInt": "1" }, - { "$numberInt": "2" }, - { "$numberInt": "3" } + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } ] }, "aws_array_det_explicit_id": { @@ -233,9 +296,15 @@ "identifier": "id", "allowed": false, "value": [ - { "$numberInt": "1" }, - { "$numberInt": "2" }, - { "$numberInt": "3" } + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } ] }, "aws_array_det_explicit_altname": { @@ -246,9 +315,15 @@ "identifier": "altname", "allowed": false, "value": [ - { "$numberInt": "1" }, - { "$numberInt": "2" }, - { "$numberInt": "3" } + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } ] }, "aws_binData=00_rand_auto_id": { @@ -258,7 +333,12 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$binary": { "base64": "AQIDBA==", "subType": "00" } } + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } }, "aws_binData=00_rand_auto_altname": { "kms": "aws", @@ -267,7 +347,12 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$binary": { "base64": "AQIDBA==", "subType": "00" } } + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } }, "aws_binData=00_rand_explicit_id": { "kms": "aws", @@ -276,7 +361,12 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$binary": { "base64": "AQIDBA==", "subType": "00" } } + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } }, "aws_binData=00_rand_explicit_altname": { "kms": "aws", @@ -285,7 +375,12 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$binary": { "base64": "AQIDBA==", "subType": "00" } } + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } }, "aws_binData=00_det_auto_id": { "kms": "aws", @@ -294,7 +389,12 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$binary": { "base64": "AQIDBA==", "subType": "00" } } + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } }, "aws_binData=00_det_explicit_id": { "kms": "aws", @@ -303,7 +403,12 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$binary": { "base64": "AQIDBA==", "subType": "00" } } + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } }, "aws_binData=00_det_explicit_altname": { "kms": "aws", @@ -312,7 +417,12 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$binary": { "base64": "AQIDBA==", "subType": "00" } } + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } }, "aws_binData=04_rand_auto_id": { "kms": "aws", @@ -322,7 +432,10 @@ "identifier": "id", "allowed": true, "value": { - "$binary": { "base64": "AAECAwQFBgcICQoLDA0ODw==", "subType": "04" } + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } } }, "aws_binData=04_rand_auto_altname": { @@ -333,7 +446,10 @@ "identifier": "altname", "allowed": true, "value": { - "$binary": { "base64": "AAECAwQFBgcICQoLDA0ODw==", "subType": "04" } + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } } }, "aws_binData=04_rand_explicit_id": { @@ -344,7 +460,10 @@ "identifier": "id", "allowed": true, "value": { - "$binary": { "base64": "AAECAwQFBgcICQoLDA0ODw==", "subType": "04" } + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } } }, "aws_binData=04_rand_explicit_altname": { @@ -355,7 +474,10 @@ "identifier": "altname", "allowed": true, "value": { - "$binary": { "base64": "AAECAwQFBgcICQoLDA0ODw==", "subType": "04" } + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } } }, "aws_binData=04_det_auto_id": { @@ -366,7 +488,10 @@ "identifier": "id", "allowed": true, "value": { - "$binary": { "base64": "AAECAwQFBgcICQoLDA0ODw==", "subType": "04" } + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } } }, "aws_binData=04_det_explicit_id": { @@ -377,7 +502,10 @@ "identifier": "id", "allowed": true, "value": { - "$binary": { "base64": "AAECAwQFBgcICQoLDA0ODw==", "subType": "04" } + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } } }, "aws_binData=04_det_explicit_altname": { @@ -388,7 +516,10 @@ "identifier": "altname", "allowed": true, "value": { - "$binary": { "base64": "AAECAwQFBgcICQoLDA0ODw==", "subType": "04" } + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } } }, "aws_undefined_rand_explicit_id": { @@ -398,7 +529,9 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "$undefined": true } + "value": { + "$undefined": true + } }, "aws_undefined_rand_explicit_altname": { "kms": "aws", @@ -407,7 +540,9 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "$undefined": true } + "value": { + "$undefined": true + } }, "aws_undefined_det_explicit_id": { "kms": "aws", @@ -416,7 +551,9 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "$undefined": true } + "value": { + "$undefined": true + } }, "aws_undefined_det_explicit_altname": { "kms": "aws", @@ -425,7 +562,9 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "$undefined": true } + "value": { + "$undefined": true + } }, "aws_objectId_rand_auto_id": { "kms": "aws", @@ -434,7 +573,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$oid": "01234567890abcdef0123456" } + "value": { + "$oid": "01234567890abcdef0123456" + } }, "aws_objectId_rand_auto_altname": { "kms": "aws", @@ -443,7 +584,9 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$oid": "01234567890abcdef0123456" } + "value": { + "$oid": "01234567890abcdef0123456" + } }, "aws_objectId_rand_explicit_id": { "kms": "aws", @@ -452,7 +595,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$oid": "01234567890abcdef0123456" } + "value": { + "$oid": "01234567890abcdef0123456" + } }, "aws_objectId_rand_explicit_altname": { "kms": "aws", @@ -461,7 +606,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$oid": "01234567890abcdef0123456" } + "value": { + "$oid": "01234567890abcdef0123456" + } }, "aws_objectId_det_auto_id": { "kms": "aws", @@ -470,7 +617,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$oid": "01234567890abcdef0123456" } + "value": { + "$oid": "01234567890abcdef0123456" + } }, "aws_objectId_det_explicit_id": { "kms": "aws", @@ -479,7 +628,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$oid": "01234567890abcdef0123456" } + "value": { + "$oid": "01234567890abcdef0123456" + } }, "aws_objectId_det_explicit_altname": { "kms": "aws", @@ -488,7 +639,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$oid": "01234567890abcdef0123456" } + "value": { + "$oid": "01234567890abcdef0123456" + } }, "aws_bool_rand_auto_id": { "kms": "aws", @@ -551,7 +704,11 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$date": { "$numberLong": "12345" } } + "value": { + "$date": { + "$numberLong": "12345" + } + } }, "aws_date_rand_auto_altname": { "kms": "aws", @@ -560,7 +717,11 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$date": { "$numberLong": "12345" } } + "value": { + "$date": { + "$numberLong": "12345" + } + } }, "aws_date_rand_explicit_id": { "kms": "aws", @@ -569,7 +730,11 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$date": { "$numberLong": "12345" } } + "value": { + "$date": { + "$numberLong": "12345" + } + } }, "aws_date_rand_explicit_altname": { "kms": "aws", @@ -578,7 +743,11 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$date": { "$numberLong": "12345" } } + "value": { + "$date": { + "$numberLong": "12345" + } + } }, "aws_date_det_auto_id": { "kms": "aws", @@ -587,7 +756,11 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$date": { "$numberLong": "12345" } } + "value": { + "$date": { + "$numberLong": "12345" + } + } }, "aws_date_det_explicit_id": { "kms": "aws", @@ -596,7 +769,11 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$date": { "$numberLong": "12345" } } + "value": { + "$date": { + "$numberLong": "12345" + } + } }, "aws_date_det_explicit_altname": { "kms": "aws", @@ -605,7 +782,11 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$date": { "$numberLong": "12345" } } + "value": { + "$date": { + "$numberLong": "12345" + } + } }, "aws_null_rand_explicit_id": { "kms": "aws", @@ -650,7 +831,12 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$regularExpression": { "pattern": ".*", "options": "" } } + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } }, "aws_regex_rand_auto_altname": { "kms": "aws", @@ -659,7 +845,12 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$regularExpression": { "pattern": ".*", "options": "" } } + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } }, "aws_regex_rand_explicit_id": { "kms": "aws", @@ -668,7 +859,12 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$regularExpression": { "pattern": ".*", "options": "" } } + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } }, "aws_regex_rand_explicit_altname": { "kms": "aws", @@ -677,7 +873,12 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$regularExpression": { "pattern": ".*", "options": "" } } + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } }, "aws_regex_det_auto_id": { "kms": "aws", @@ -686,7 +887,12 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$regularExpression": { "pattern": ".*", "options": "" } } + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } }, "aws_regex_det_explicit_id": { "kms": "aws", @@ -695,7 +901,12 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$regularExpression": { "pattern": ".*", "options": "" } } + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } }, "aws_regex_det_explicit_altname": { "kms": "aws", @@ -704,7 +915,12 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$regularExpression": { "pattern": ".*", "options": "" } } + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } }, "aws_dbPointer_rand_auto_id": { "kms": "aws", @@ -716,7 +932,9 @@ "value": { "$dbPointer": { "$ref": "db.example", - "$id": { "$oid": "01234567890abcdef0123456" } + "$id": { + "$oid": "01234567890abcdef0123456" + } } } }, @@ -730,7 +948,9 @@ "value": { "$dbPointer": { "$ref": "db.example", - "$id": { "$oid": "01234567890abcdef0123456" } + "$id": { + "$oid": "01234567890abcdef0123456" + } } } }, @@ -744,7 +964,9 @@ "value": { "$dbPointer": { "$ref": "db.example", - "$id": { "$oid": "01234567890abcdef0123456" } + "$id": { + "$oid": "01234567890abcdef0123456" + } } } }, @@ -758,7 +980,9 @@ "value": { "$dbPointer": { "$ref": "db.example", - "$id": { "$oid": "01234567890abcdef0123456" } + "$id": { + "$oid": "01234567890abcdef0123456" + } } } }, @@ -772,7 +996,9 @@ "value": { "$dbPointer": { "$ref": "db.example", - "$id": { "$oid": "01234567890abcdef0123456" } + "$id": { + "$oid": "01234567890abcdef0123456" + } } } }, @@ -786,7 +1012,9 @@ "value": { "$dbPointer": { "$ref": "db.example", - "$id": { "$oid": "01234567890abcdef0123456" } + "$id": { + "$oid": "01234567890abcdef0123456" + } } } }, @@ -800,7 +1028,9 @@ "value": { "$dbPointer": { "$ref": "db.example", - "$id": { "$oid": "01234567890abcdef0123456" } + "$id": { + "$oid": "01234567890abcdef0123456" + } } } }, @@ -811,7 +1041,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$code": "x=1" } + "value": { + "$code": "x=1" + } }, "aws_javascript_rand_auto_altname": { "kms": "aws", @@ -820,7 +1052,9 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$code": "x=1" } + "value": { + "$code": "x=1" + } }, "aws_javascript_rand_explicit_id": { "kms": "aws", @@ -829,7 +1063,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$code": "x=1" } + "value": { + "$code": "x=1" + } }, "aws_javascript_rand_explicit_altname": { "kms": "aws", @@ -838,7 +1074,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$code": "x=1" } + "value": { + "$code": "x=1" + } }, "aws_javascript_det_auto_id": { "kms": "aws", @@ -847,7 +1085,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$code": "x=1" } + "value": { + "$code": "x=1" + } }, "aws_javascript_det_explicit_id": { "kms": "aws", @@ -856,7 +1096,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$code": "x=1" } + "value": { + "$code": "x=1" + } }, "aws_javascript_det_explicit_altname": { "kms": "aws", @@ -865,7 +1107,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$code": "x=1" } + "value": { + "$code": "x=1" + } }, "aws_symbol_rand_auto_id": { "kms": "aws", @@ -874,7 +1118,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$symbol": "mongodb-symbol" } + "value": { + "$symbol": "mongodb-symbol" + } }, "aws_symbol_rand_auto_altname": { "kms": "aws", @@ -883,7 +1129,9 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$symbol": "mongodb-symbol" } + "value": { + "$symbol": "mongodb-symbol" + } }, "aws_symbol_rand_explicit_id": { "kms": "aws", @@ -892,7 +1140,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$symbol": "mongodb-symbol" } + "value": { + "$symbol": "mongodb-symbol" + } }, "aws_symbol_rand_explicit_altname": { "kms": "aws", @@ -901,7 +1151,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$symbol": "mongodb-symbol" } + "value": { + "$symbol": "mongodb-symbol" + } }, "aws_symbol_det_auto_id": { "kms": "aws", @@ -910,7 +1162,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$symbol": "mongodb-symbol" } + "value": { + "$symbol": "mongodb-symbol" + } }, "aws_symbol_det_explicit_id": { "kms": "aws", @@ -919,7 +1173,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$symbol": "mongodb-symbol" } + "value": { + "$symbol": "mongodb-symbol" + } }, "aws_symbol_det_explicit_altname": { "kms": "aws", @@ -928,7 +1184,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$symbol": "mongodb-symbol" } + "value": { + "$symbol": "mongodb-symbol" + } }, "aws_javascriptWithScope_rand_auto_id": { "kms": "aws", @@ -937,7 +1195,10 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$code": "x=1", "$scope": {} } + "value": { + "$code": "x=1", + "$scope": {} + } }, "aws_javascriptWithScope_rand_auto_altname": { "kms": "aws", @@ -946,7 +1207,10 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$code": "x=1", "$scope": {} } + "value": { + "$code": "x=1", + "$scope": {} + } }, "aws_javascriptWithScope_rand_explicit_id": { "kms": "aws", @@ -955,7 +1219,10 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$code": "x=1", "$scope": {} } + "value": { + "$code": "x=1", + "$scope": {} + } }, "aws_javascriptWithScope_rand_explicit_altname": { "kms": "aws", @@ -964,7 +1231,10 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$code": "x=1", "$scope": {} } + "value": { + "$code": "x=1", + "$scope": {} + } }, "aws_javascriptWithScope_det_explicit_id": { "kms": "aws", @@ -973,7 +1243,10 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "$code": "x=1", "$scope": {} } + "value": { + "$code": "x=1", + "$scope": {} + } }, "aws_javascriptWithScope_det_explicit_altname": { "kms": "aws", @@ -982,7 +1255,10 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "$code": "x=1", "$scope": {} } + "value": { + "$code": "x=1", + "$scope": {} + } }, "aws_int_rand_auto_id": { "kms": "aws", @@ -991,7 +1267,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$numberInt": "123" } + "value": { + "$numberInt": "123" + } }, "aws_int_rand_auto_altname": { "kms": "aws", @@ -1000,7 +1278,9 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$numberInt": "123" } + "value": { + "$numberInt": "123" + } }, "aws_int_rand_explicit_id": { "kms": "aws", @@ -1009,7 +1289,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$numberInt": "123" } + "value": { + "$numberInt": "123" + } }, "aws_int_rand_explicit_altname": { "kms": "aws", @@ -1018,7 +1300,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$numberInt": "123" } + "value": { + "$numberInt": "123" + } }, "aws_int_det_auto_id": { "kms": "aws", @@ -1027,7 +1311,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$numberInt": "123" } + "value": { + "$numberInt": "123" + } }, "aws_int_det_explicit_id": { "kms": "aws", @@ -1036,7 +1322,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$numberInt": "123" } + "value": { + "$numberInt": "123" + } }, "aws_int_det_explicit_altname": { "kms": "aws", @@ -1045,7 +1333,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$numberInt": "123" } + "value": { + "$numberInt": "123" + } }, "aws_timestamp_rand_auto_id": { "kms": "aws", @@ -1054,7 +1344,12 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$timestamp": { "t": 0, "i": 12345 } } + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } }, "aws_timestamp_rand_auto_altname": { "kms": "aws", @@ -1063,7 +1358,12 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$timestamp": { "t": 0, "i": 12345 } } + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } }, "aws_timestamp_rand_explicit_id": { "kms": "aws", @@ -1072,7 +1372,12 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$timestamp": { "t": 0, "i": 12345 } } + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } }, "aws_timestamp_rand_explicit_altname": { "kms": "aws", @@ -1081,7 +1386,12 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$timestamp": { "t": 0, "i": 12345 } } + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } }, "aws_timestamp_det_auto_id": { "kms": "aws", @@ -1090,7 +1400,12 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$timestamp": { "t": 0, "i": 12345 } } + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } }, "aws_timestamp_det_explicit_id": { "kms": "aws", @@ -1099,7 +1414,12 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$timestamp": { "t": 0, "i": 12345 } } + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } }, "aws_timestamp_det_explicit_altname": { "kms": "aws", @@ -1108,7 +1428,12 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$timestamp": { "t": 0, "i": 12345 } } + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } }, "aws_long_rand_auto_id": { "kms": "aws", @@ -1117,7 +1442,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$numberLong": "456" } + "value": { + "$numberLong": "456" + } }, "aws_long_rand_auto_altname": { "kms": "aws", @@ -1126,7 +1453,9 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$numberLong": "456" } + "value": { + "$numberLong": "456" + } }, "aws_long_rand_explicit_id": { "kms": "aws", @@ -1135,7 +1464,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$numberLong": "456" } + "value": { + "$numberLong": "456" + } }, "aws_long_rand_explicit_altname": { "kms": "aws", @@ -1144,7 +1475,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$numberLong": "456" } + "value": { + "$numberLong": "456" + } }, "aws_long_det_auto_id": { "kms": "aws", @@ -1153,7 +1486,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$numberLong": "456" } + "value": { + "$numberLong": "456" + } }, "aws_long_det_explicit_id": { "kms": "aws", @@ -1162,7 +1497,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$numberLong": "456" } + "value": { + "$numberLong": "456" + } }, "aws_long_det_explicit_altname": { "kms": "aws", @@ -1171,7 +1508,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$numberLong": "456" } + "value": { + "$numberLong": "456" + } }, "aws_decimal_rand_auto_id": { "kms": "aws", @@ -1180,7 +1519,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$numberDecimal": "1.234" } + "value": { + "$numberDecimal": "1.234" + } }, "aws_decimal_rand_auto_altname": { "kms": "aws", @@ -1189,7 +1530,9 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$numberDecimal": "1.234" } + "value": { + "$numberDecimal": "1.234" + } }, "aws_decimal_rand_explicit_id": { "kms": "aws", @@ -1198,7 +1541,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$numberDecimal": "1.234" } + "value": { + "$numberDecimal": "1.234" + } }, "aws_decimal_rand_explicit_altname": { "kms": "aws", @@ -1207,7 +1552,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$numberDecimal": "1.234" } + "value": { + "$numberDecimal": "1.234" + } }, "aws_decimal_det_explicit_id": { "kms": "aws", @@ -1216,7 +1563,9 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "$numberDecimal": "1.234" } + "value": { + "$numberDecimal": "1.234" + } }, "aws_decimal_det_explicit_altname": { "kms": "aws", @@ -1225,7 +1574,9 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "$numberDecimal": "1.234" } + "value": { + "$numberDecimal": "1.234" + } }, "aws_minKey_rand_explicit_id": { "kms": "aws", @@ -1234,7 +1585,9 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "$minKey": 1 } + "value": { + "$minKey": 1 + } }, "aws_minKey_rand_explicit_altname": { "kms": "aws", @@ -1243,7 +1596,9 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "$minKey": 1 } + "value": { + "$minKey": 1 + } }, "aws_minKey_det_explicit_id": { "kms": "aws", @@ -1252,7 +1607,9 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "$minKey": 1 } + "value": { + "$minKey": 1 + } }, "aws_minKey_det_explicit_altname": { "kms": "aws", @@ -1261,7 +1618,9 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "$minKey": 1 } + "value": { + "$minKey": 1 + } }, "aws_maxKey_rand_explicit_id": { "kms": "aws", @@ -1270,7 +1629,9 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "$maxKey": 1 } + "value": { + "$maxKey": 1 + } }, "aws_maxKey_rand_explicit_altname": { "kms": "aws", @@ -1279,7 +1640,9 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "$maxKey": 1 } + "value": { + "$maxKey": 1 + } }, "aws_maxKey_det_explicit_id": { "kms": "aws", @@ -1288,7 +1651,9 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "$maxKey": 1 } + "value": { + "$maxKey": 1 + } }, "aws_maxKey_det_explicit_altname": { "kms": "aws", @@ -1297,7 +1662,9 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "$maxKey": 1 } + "value": { + "$maxKey": 1 + } }, "local_double_rand_auto_id": { "kms": "local", @@ -1306,7 +1673,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$numberDouble": "1.234" } + "value": { + "$numberDouble": "1.234" + } }, "local_double_rand_auto_altname": { "kms": "local", @@ -1315,7 +1684,9 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$numberDouble": "1.234" } + "value": { + "$numberDouble": "1.234" + } }, "local_double_rand_explicit_id": { "kms": "local", @@ -1324,7 +1695,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$numberDouble": "1.234" } + "value": { + "$numberDouble": "1.234" + } }, "local_double_rand_explicit_altname": { "kms": "local", @@ -1333,7 +1706,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$numberDouble": "1.234" } + "value": { + "$numberDouble": "1.234" + } }, "local_double_det_explicit_id": { "kms": "local", @@ -1342,7 +1717,9 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "$numberDouble": "1.234" } + "value": { + "$numberDouble": "1.234" + } }, "local_double_det_explicit_altname": { "kms": "local", @@ -1351,8 +1728,10 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "$numberDouble": "1.234" } - }, + "value": { + "$numberDouble": "1.234" + } + }, "local_string_rand_auto_id": { "kms": "local", "type": "string", @@ -1423,7 +1802,11 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "x": { "$numberInt": "1" } } + "value": { + "x": { + "$numberInt": "1" + } + } }, "local_object_rand_auto_altname": { "kms": "local", @@ -1432,7 +1815,11 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "x": { "$numberInt": "1" } } + "value": { + "x": { + "$numberInt": "1" + } + } }, "local_object_rand_explicit_id": { "kms": "local", @@ -1441,7 +1828,11 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "x": { "$numberInt": "1" } } + "value": { + "x": { + "$numberInt": "1" + } + } }, "local_object_rand_explicit_altname": { "kms": "local", @@ -1450,7 +1841,11 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "x": { "$numberInt": "1" } } + "value": { + "x": { + "$numberInt": "1" + } + } }, "local_object_det_explicit_id": { "kms": "local", @@ -1459,7 +1854,11 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "x": { "$numberInt": "1" } } + "value": { + "x": { + "$numberInt": "1" + } + } }, "local_object_det_explicit_altname": { "kms": "local", @@ -1468,7 +1867,11 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "x": { "$numberInt": "1" } } + "value": { + "x": { + "$numberInt": "1" + } + } }, "local_array_rand_auto_id": { "kms": "local", @@ -1478,9 +1881,15 @@ "identifier": "id", "allowed": true, "value": [ - { "$numberInt": "1" }, - { "$numberInt": "2" }, - { "$numberInt": "3" } + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } ] }, "local_array_rand_auto_altname": { @@ -1491,9 +1900,15 @@ "identifier": "altname", "allowed": true, "value": [ - { "$numberInt": "1" }, - { "$numberInt": "2" }, - { "$numberInt": "3" } + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } ] }, "local_array_rand_explicit_id": { @@ -1504,9 +1919,15 @@ "identifier": "id", "allowed": true, "value": [ - { "$numberInt": "1" }, - { "$numberInt": "2" }, - { "$numberInt": "3" } + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } ] }, "local_array_rand_explicit_altname": { @@ -1517,9 +1938,15 @@ "identifier": "altname", "allowed": true, "value": [ - { "$numberInt": "1" }, - { "$numberInt": "2" }, - { "$numberInt": "3" } + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } ] }, "local_array_det_explicit_id": { @@ -1530,9 +1957,15 @@ "identifier": "id", "allowed": false, "value": [ - { "$numberInt": "1" }, - { "$numberInt": "2" }, - { "$numberInt": "3" } + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } ] }, "local_array_det_explicit_altname": { @@ -1543,9 +1976,15 @@ "identifier": "altname", "allowed": false, "value": [ - { "$numberInt": "1" }, - { "$numberInt": "2" }, - { "$numberInt": "3" } + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } ] }, "local_binData=00_rand_auto_id": { @@ -1555,7 +1994,12 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$binary": { "base64": "AQIDBA==", "subType": "00" } } + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } }, "local_binData=00_rand_auto_altname": { "kms": "local", @@ -1564,7 +2008,12 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$binary": { "base64": "AQIDBA==", "subType": "00" } } + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } }, "local_binData=00_rand_explicit_id": { "kms": "local", @@ -1573,7 +2022,12 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$binary": { "base64": "AQIDBA==", "subType": "00" } } + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } }, "local_binData=00_rand_explicit_altname": { "kms": "local", @@ -1582,7 +2036,12 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$binary": { "base64": "AQIDBA==", "subType": "00" } } + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } }, "local_binData=00_det_auto_id": { "kms": "local", @@ -1591,7 +2050,12 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$binary": { "base64": "AQIDBA==", "subType": "00" } } + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } }, "local_binData=00_det_explicit_id": { "kms": "local", @@ -1600,7 +2064,12 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$binary": { "base64": "AQIDBA==", "subType": "00" } } + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } }, "local_binData=00_det_explicit_altname": { "kms": "local", @@ -1609,7 +2078,12 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$binary": { "base64": "AQIDBA==", "subType": "00" } } + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } }, "local_binData=04_rand_auto_id": { "kms": "local", @@ -1619,7 +2093,10 @@ "identifier": "id", "allowed": true, "value": { - "$binary": { "base64": "AAECAwQFBgcICQoLDA0ODw==", "subType": "04" } + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } } }, "local_binData=04_rand_auto_altname": { @@ -1630,7 +2107,10 @@ "identifier": "altname", "allowed": true, "value": { - "$binary": { "base64": "AAECAwQFBgcICQoLDA0ODw==", "subType": "04" } + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } } }, "local_binData=04_rand_explicit_id": { @@ -1641,7 +2121,10 @@ "identifier": "id", "allowed": true, "value": { - "$binary": { "base64": "AAECAwQFBgcICQoLDA0ODw==", "subType": "04" } + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } } }, "local_binData=04_rand_explicit_altname": { @@ -1652,7 +2135,10 @@ "identifier": "altname", "allowed": true, "value": { - "$binary": { "base64": "AAECAwQFBgcICQoLDA0ODw==", "subType": "04" } + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } } }, "local_binData=04_det_auto_id": { @@ -1663,7 +2149,10 @@ "identifier": "id", "allowed": true, "value": { - "$binary": { "base64": "AAECAwQFBgcICQoLDA0ODw==", "subType": "04" } + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } } }, "local_binData=04_det_explicit_id": { @@ -1674,7 +2163,10 @@ "identifier": "id", "allowed": true, "value": { - "$binary": { "base64": "AAECAwQFBgcICQoLDA0ODw==", "subType": "04" } + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } } }, "local_binData=04_det_explicit_altname": { @@ -1685,7 +2177,10 @@ "identifier": "altname", "allowed": true, "value": { - "$binary": { "base64": "AAECAwQFBgcICQoLDA0ODw==", "subType": "04" } + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } } }, "local_undefined_rand_explicit_id": { @@ -1695,7 +2190,9 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "$undefined": true } + "value": { + "$undefined": true + } }, "local_undefined_rand_explicit_altname": { "kms": "local", @@ -1704,7 +2201,9 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "$undefined": true } + "value": { + "$undefined": true + } }, "local_undefined_det_explicit_id": { "kms": "local", @@ -1713,7 +2212,9 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "$undefined": true } + "value": { + "$undefined": true + } }, "local_undefined_det_explicit_altname": { "kms": "local", @@ -1722,7 +2223,9 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "$undefined": true } + "value": { + "$undefined": true + } }, "local_objectId_rand_auto_id": { "kms": "local", @@ -1731,7 +2234,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$oid": "01234567890abcdef0123456" } + "value": { + "$oid": "01234567890abcdef0123456" + } }, "local_objectId_rand_auto_altname": { "kms": "local", @@ -1740,7 +2245,9 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$oid": "01234567890abcdef0123456" } + "value": { + "$oid": "01234567890abcdef0123456" + } }, "local_objectId_rand_explicit_id": { "kms": "local", @@ -1749,7 +2256,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$oid": "01234567890abcdef0123456" } + "value": { + "$oid": "01234567890abcdef0123456" + } }, "local_objectId_rand_explicit_altname": { "kms": "local", @@ -1758,7 +2267,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$oid": "01234567890abcdef0123456" } + "value": { + "$oid": "01234567890abcdef0123456" + } }, "local_objectId_det_auto_id": { "kms": "local", @@ -1767,7 +2278,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$oid": "01234567890abcdef0123456" } + "value": { + "$oid": "01234567890abcdef0123456" + } }, "local_objectId_det_explicit_id": { "kms": "local", @@ -1776,7 +2289,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$oid": "01234567890abcdef0123456" } + "value": { + "$oid": "01234567890abcdef0123456" + } }, "local_objectId_det_explicit_altname": { "kms": "local", @@ -1785,7 +2300,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$oid": "01234567890abcdef0123456" } + "value": { + "$oid": "01234567890abcdef0123456" + } }, "local_bool_rand_auto_id": { "kms": "local", @@ -1848,7 +2365,11 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$date": { "$numberLong": "12345" } } + "value": { + "$date": { + "$numberLong": "12345" + } + } }, "local_date_rand_auto_altname": { "kms": "local", @@ -1857,7 +2378,11 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$date": { "$numberLong": "12345" } } + "value": { + "$date": { + "$numberLong": "12345" + } + } }, "local_date_rand_explicit_id": { "kms": "local", @@ -1866,7 +2391,11 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$date": { "$numberLong": "12345" } } + "value": { + "$date": { + "$numberLong": "12345" + } + } }, "local_date_rand_explicit_altname": { "kms": "local", @@ -1875,7 +2404,11 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$date": { "$numberLong": "12345" } } + "value": { + "$date": { + "$numberLong": "12345" + } + } }, "local_date_det_auto_id": { "kms": "local", @@ -1884,7 +2417,11 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$date": { "$numberLong": "12345" } } + "value": { + "$date": { + "$numberLong": "12345" + } + } }, "local_date_det_explicit_id": { "kms": "local", @@ -1893,7 +2430,11 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$date": { "$numberLong": "12345" } } + "value": { + "$date": { + "$numberLong": "12345" + } + } }, "local_date_det_explicit_altname": { "kms": "local", @@ -1902,7 +2443,11 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$date": { "$numberLong": "12345" } } + "value": { + "$date": { + "$numberLong": "12345" + } + } }, "local_null_rand_explicit_id": { "kms": "local", @@ -1947,7 +2492,12 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$regularExpression": { "pattern": ".*", "options": "" } } + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } }, "local_regex_rand_auto_altname": { "kms": "local", @@ -1956,7 +2506,12 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$regularExpression": { "pattern": ".*", "options": "" } } + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } }, "local_regex_rand_explicit_id": { "kms": "local", @@ -1965,7 +2520,12 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$regularExpression": { "pattern": ".*", "options": "" } } + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } }, "local_regex_rand_explicit_altname": { "kms": "local", @@ -1974,7 +2534,12 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$regularExpression": { "pattern": ".*", "options": "" } } + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } }, "local_regex_det_auto_id": { "kms": "local", @@ -1983,7 +2548,12 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$regularExpression": { "pattern": ".*", "options": "" } } + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } }, "local_regex_det_explicit_id": { "kms": "local", @@ -1992,7 +2562,12 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$regularExpression": { "pattern": ".*", "options": "" } } + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } }, "local_regex_det_explicit_altname": { "kms": "local", @@ -2001,7 +2576,12 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$regularExpression": { "pattern": ".*", "options": "" } } + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } }, "local_dbPointer_rand_auto_id": { "kms": "local", @@ -2013,7 +2593,9 @@ "value": { "$dbPointer": { "$ref": "db.example", - "$id": { "$oid": "01234567890abcdef0123456" } + "$id": { + "$oid": "01234567890abcdef0123456" + } } } }, @@ -2027,7 +2609,9 @@ "value": { "$dbPointer": { "$ref": "db.example", - "$id": { "$oid": "01234567890abcdef0123456" } + "$id": { + "$oid": "01234567890abcdef0123456" + } } } }, @@ -2041,7 +2625,9 @@ "value": { "$dbPointer": { "$ref": "db.example", - "$id": { "$oid": "01234567890abcdef0123456" } + "$id": { + "$oid": "01234567890abcdef0123456" + } } } }, @@ -2055,7 +2641,9 @@ "value": { "$dbPointer": { "$ref": "db.example", - "$id": { "$oid": "01234567890abcdef0123456" } + "$id": { + "$oid": "01234567890abcdef0123456" + } } } }, @@ -2069,7 +2657,9 @@ "value": { "$dbPointer": { "$ref": "db.example", - "$id": { "$oid": "01234567890abcdef0123456" } + "$id": { + "$oid": "01234567890abcdef0123456" + } } } }, @@ -2083,7 +2673,9 @@ "value": { "$dbPointer": { "$ref": "db.example", - "$id": { "$oid": "01234567890abcdef0123456" } + "$id": { + "$oid": "01234567890abcdef0123456" + } } } }, @@ -2097,7 +2689,9 @@ "value": { "$dbPointer": { "$ref": "db.example", - "$id": { "$oid": "01234567890abcdef0123456" } + "$id": { + "$oid": "01234567890abcdef0123456" + } } } }, @@ -2108,7 +2702,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$code": "x=1" } + "value": { + "$code": "x=1" + } }, "local_javascript_rand_auto_altname": { "kms": "local", @@ -2117,7 +2713,9 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$code": "x=1" } + "value": { + "$code": "x=1" + } }, "local_javascript_rand_explicit_id": { "kms": "local", @@ -2126,7 +2724,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$code": "x=1" } + "value": { + "$code": "x=1" + } }, "local_javascript_rand_explicit_altname": { "kms": "local", @@ -2135,7 +2735,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$code": "x=1" } + "value": { + "$code": "x=1" + } }, "local_javascript_det_auto_id": { "kms": "local", @@ -2144,7 +2746,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$code": "x=1" } + "value": { + "$code": "x=1" + } }, "local_javascript_det_explicit_id": { "kms": "local", @@ -2153,7 +2757,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$code": "x=1" } + "value": { + "$code": "x=1" + } }, "local_javascript_det_explicit_altname": { "kms": "local", @@ -2162,7 +2768,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$code": "x=1" } + "value": { + "$code": "x=1" + } }, "local_symbol_rand_auto_id": { "kms": "local", @@ -2171,7 +2779,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$symbol": "mongodb-symbol" } + "value": { + "$symbol": "mongodb-symbol" + } }, "local_symbol_rand_auto_altname": { "kms": "local", @@ -2180,7 +2790,9 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$symbol": "mongodb-symbol" } + "value": { + "$symbol": "mongodb-symbol" + } }, "local_symbol_rand_explicit_id": { "kms": "local", @@ -2189,7 +2801,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$symbol": "mongodb-symbol" } + "value": { + "$symbol": "mongodb-symbol" + } }, "local_symbol_rand_explicit_altname": { "kms": "local", @@ -2198,7 +2812,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$symbol": "mongodb-symbol" } + "value": { + "$symbol": "mongodb-symbol" + } }, "local_symbol_det_auto_id": { "kms": "local", @@ -2207,7 +2823,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$symbol": "mongodb-symbol" } + "value": { + "$symbol": "mongodb-symbol" + } }, "local_symbol_det_explicit_id": { "kms": "local", @@ -2216,7 +2834,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$symbol": "mongodb-symbol" } + "value": { + "$symbol": "mongodb-symbol" + } }, "local_symbol_det_explicit_altname": { "kms": "local", @@ -2225,7 +2845,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$symbol": "mongodb-symbol" } + "value": { + "$symbol": "mongodb-symbol" + } }, "local_javascriptWithScope_rand_auto_id": { "kms": "local", @@ -2234,7 +2856,10 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$code": "x=1", "$scope": {} } + "value": { + "$code": "x=1", + "$scope": {} + } }, "local_javascriptWithScope_rand_auto_altname": { "kms": "local", @@ -2243,7 +2868,10 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$code": "x=1", "$scope": {} } + "value": { + "$code": "x=1", + "$scope": {} + } }, "local_javascriptWithScope_rand_explicit_id": { "kms": "local", @@ -2252,7 +2880,10 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$code": "x=1", "$scope": {} } + "value": { + "$code": "x=1", + "$scope": {} + } }, "local_javascriptWithScope_rand_explicit_altname": { "kms": "local", @@ -2261,7 +2892,10 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$code": "x=1", "$scope": {} } + "value": { + "$code": "x=1", + "$scope": {} + } }, "local_javascriptWithScope_det_explicit_id": { "kms": "local", @@ -2270,7 +2904,10 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "$code": "x=1", "$scope": {} } + "value": { + "$code": "x=1", + "$scope": {} + } }, "local_javascriptWithScope_det_explicit_altname": { "kms": "local", @@ -2279,7 +2916,10 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "$code": "x=1", "$scope": {} } + "value": { + "$code": "x=1", + "$scope": {} + } }, "local_int_rand_auto_id": { "kms": "local", @@ -2288,7 +2928,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$numberInt": "123" } + "value": { + "$numberInt": "123" + } }, "local_int_rand_auto_altname": { "kms": "local", @@ -2297,7 +2939,9 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$numberInt": "123" } + "value": { + "$numberInt": "123" + } }, "local_int_rand_explicit_id": { "kms": "local", @@ -2306,7 +2950,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$numberInt": "123" } + "value": { + "$numberInt": "123" + } }, "local_int_rand_explicit_altname": { "kms": "local", @@ -2315,7 +2961,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$numberInt": "123" } + "value": { + "$numberInt": "123" + } }, "local_int_det_auto_id": { "kms": "local", @@ -2324,7 +2972,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$numberInt": "123" } + "value": { + "$numberInt": "123" + } }, "local_int_det_explicit_id": { "kms": "local", @@ -2333,7 +2983,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$numberInt": "123" } + "value": { + "$numberInt": "123" + } }, "local_int_det_explicit_altname": { "kms": "local", @@ -2342,7 +2994,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$numberInt": "123" } + "value": { + "$numberInt": "123" + } }, "local_timestamp_rand_auto_id": { "kms": "local", @@ -2351,7 +3005,12 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$timestamp": { "t": 0, "i": 12345 } } + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } }, "local_timestamp_rand_auto_altname": { "kms": "local", @@ -2360,7 +3019,12 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$timestamp": { "t": 0, "i": 12345 } } + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } }, "local_timestamp_rand_explicit_id": { "kms": "local", @@ -2369,7 +3033,12 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$timestamp": { "t": 0, "i": 12345 } } + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } }, "local_timestamp_rand_explicit_altname": { "kms": "local", @@ -2378,7 +3047,12 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$timestamp": { "t": 0, "i": 12345 } } + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } }, "local_timestamp_det_auto_id": { "kms": "local", @@ -2387,7 +3061,12 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$timestamp": { "t": 0, "i": 12345 } } + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } }, "local_timestamp_det_explicit_id": { "kms": "local", @@ -2396,7 +3075,12 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$timestamp": { "t": 0, "i": 12345 } } + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } }, "local_timestamp_det_explicit_altname": { "kms": "local", @@ -2405,7 +3089,12 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$timestamp": { "t": 0, "i": 12345 } } + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } }, "local_long_rand_auto_id": { "kms": "local", @@ -2414,7 +3103,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$numberLong": "456" } + "value": { + "$numberLong": "456" + } }, "local_long_rand_auto_altname": { "kms": "local", @@ -2423,7 +3114,9 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$numberLong": "456" } + "value": { + "$numberLong": "456" + } }, "local_long_rand_explicit_id": { "kms": "local", @@ -2432,7 +3125,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$numberLong": "456" } + "value": { + "$numberLong": "456" + } }, "local_long_rand_explicit_altname": { "kms": "local", @@ -2441,7 +3136,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$numberLong": "456" } + "value": { + "$numberLong": "456" + } }, "local_long_det_auto_id": { "kms": "local", @@ -2450,7 +3147,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$numberLong": "456" } + "value": { + "$numberLong": "456" + } }, "local_long_det_explicit_id": { "kms": "local", @@ -2459,7 +3158,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$numberLong": "456" } + "value": { + "$numberLong": "456" + } }, "local_long_det_explicit_altname": { "kms": "local", @@ -2468,7 +3169,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$numberLong": "456" } + "value": { + "$numberLong": "456" + } }, "local_decimal_rand_auto_id": { "kms": "local", @@ -2477,7 +3180,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$numberDecimal": "1.234" } + "value": { + "$numberDecimal": "1.234" + } }, "local_decimal_rand_auto_altname": { "kms": "local", @@ -2486,7 +3191,9 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$numberDecimal": "1.234" } + "value": { + "$numberDecimal": "1.234" + } }, "local_decimal_rand_explicit_id": { "kms": "local", @@ -2495,7 +3202,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$numberDecimal": "1.234" } + "value": { + "$numberDecimal": "1.234" + } }, "local_decimal_rand_explicit_altname": { "kms": "local", @@ -2504,7 +3213,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$numberDecimal": "1.234" } + "value": { + "$numberDecimal": "1.234" + } }, "local_decimal_det_explicit_id": { "kms": "local", @@ -2513,7 +3224,9 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "$numberDecimal": "1.234" } + "value": { + "$numberDecimal": "1.234" + } }, "local_decimal_det_explicit_altname": { "kms": "local", @@ -2522,7 +3235,9 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "$numberDecimal": "1.234" } + "value": { + "$numberDecimal": "1.234" + } }, "local_minKey_rand_explicit_id": { "kms": "local", @@ -2531,7 +3246,9 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "$minKey": 1 } + "value": { + "$minKey": 1 + } }, "local_minKey_rand_explicit_altname": { "kms": "local", @@ -2540,7 +3257,9 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "$minKey": 1 } + "value": { + "$minKey": 1 + } }, "local_minKey_det_explicit_id": { "kms": "local", @@ -2549,7 +3268,9 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "$minKey": 1 } + "value": { + "$minKey": 1 + } }, "local_minKey_det_explicit_altname": { "kms": "local", @@ -2558,7 +3279,9 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "$minKey": 1 } + "value": { + "$minKey": 1 + } }, "local_maxKey_rand_explicit_id": { "kms": "local", @@ -2567,7 +3290,9 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "$maxKey": 1 } + "value": { + "$maxKey": 1 + } }, "local_maxKey_rand_explicit_altname": { "kms": "local", @@ -2576,7 +3301,9 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "$maxKey": 1 } + "value": { + "$maxKey": 1 + } }, "local_maxKey_det_explicit_id": { "kms": "local", @@ -2585,7 +3312,9 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "$maxKey": 1 } + "value": { + "$maxKey": 1 + } }, "local_maxKey_det_explicit_altname": { "kms": "local", @@ -2594,7 +3323,4992 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "$maxKey": 1 } + "value": { + "$maxKey": 1 + } + }, + "azure_double_rand_auto_id": { + "kms": "azure", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "azure_double_rand_auto_altname": { + "kms": "azure", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "azure_double_rand_explicit_id": { + "kms": "azure", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "azure_double_rand_explicit_altname": { + "kms": "azure", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "azure_double_det_explicit_id": { + "kms": "azure", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDouble": "1.234" + } + }, + "azure_double_det_explicit_altname": { + "kms": "azure", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDouble": "1.234" + } + }, + "azure_string_rand_auto_id": { + "kms": "azure", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "azure_string_rand_auto_altname": { + "kms": "azure", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": "mongodb" + }, + "azure_string_rand_explicit_id": { + "kms": "azure", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "azure_string_rand_explicit_altname": { + "kms": "azure", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": "mongodb" + }, + "azure_string_det_auto_id": { + "kms": "azure", + "type": "string", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "azure_string_det_explicit_id": { + "kms": "azure", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "azure_string_det_explicit_altname": { + "kms": "azure", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": "mongodb" + }, + "azure_object_rand_auto_id": { + "kms": "azure", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "azure_object_rand_auto_altname": { + "kms": "azure", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "azure_object_rand_explicit_id": { + "kms": "azure", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "azure_object_rand_explicit_altname": { + "kms": "azure", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "azure_object_det_explicit_id": { + "kms": "azure", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "azure_object_det_explicit_altname": { + "kms": "azure", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "azure_array_rand_auto_id": { + "kms": "azure", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "azure_array_rand_auto_altname": { + "kms": "azure", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "azure_array_rand_explicit_id": { + "kms": "azure", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "azure_array_rand_explicit_altname": { + "kms": "azure", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "azure_array_det_explicit_id": { + "kms": "azure", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "azure_array_det_explicit_altname": { + "kms": "azure", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "azure_binData=00_rand_auto_id": { + "kms": "azure", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "azure_binData=00_rand_auto_altname": { + "kms": "azure", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "azure_binData=00_rand_explicit_id": { + "kms": "azure", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "azure_binData=00_rand_explicit_altname": { + "kms": "azure", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "azure_binData=00_det_auto_id": { + "kms": "azure", + "type": "binData=00", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "azure_binData=00_det_explicit_id": { + "kms": "azure", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "azure_binData=00_det_explicit_altname": { + "kms": "azure", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "azure_binData=04_rand_auto_id": { + "kms": "azure", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "azure_binData=04_rand_auto_altname": { + "kms": "azure", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "azure_binData=04_rand_explicit_id": { + "kms": "azure", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "azure_binData=04_rand_explicit_altname": { + "kms": "azure", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "azure_binData=04_det_auto_id": { + "kms": "azure", + "type": "binData=04", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "azure_binData=04_det_explicit_id": { + "kms": "azure", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "azure_binData=04_det_explicit_altname": { + "kms": "azure", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "azure_undefined_rand_explicit_id": { + "kms": "azure", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "azure_undefined_rand_explicit_altname": { + "kms": "azure", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "azure_undefined_det_explicit_id": { + "kms": "azure", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "azure_undefined_det_explicit_altname": { + "kms": "azure", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "azure_objectId_rand_auto_id": { + "kms": "azure", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "azure_objectId_rand_auto_altname": { + "kms": "azure", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "azure_objectId_rand_explicit_id": { + "kms": "azure", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "azure_objectId_rand_explicit_altname": { + "kms": "azure", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "azure_objectId_det_auto_id": { + "kms": "azure", + "type": "objectId", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "azure_objectId_det_explicit_id": { + "kms": "azure", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "azure_objectId_det_explicit_altname": { + "kms": "azure", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "azure_bool_rand_auto_id": { + "kms": "azure", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": true + }, + "azure_bool_rand_auto_altname": { + "kms": "azure", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": true + }, + "azure_bool_rand_explicit_id": { + "kms": "azure", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": true + }, + "azure_bool_rand_explicit_altname": { + "kms": "azure", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": true + }, + "azure_bool_det_explicit_id": { + "kms": "azure", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": true + }, + "azure_bool_det_explicit_altname": { + "kms": "azure", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": true + }, + "azure_date_rand_auto_id": { + "kms": "azure", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "azure_date_rand_auto_altname": { + "kms": "azure", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "azure_date_rand_explicit_id": { + "kms": "azure", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "azure_date_rand_explicit_altname": { + "kms": "azure", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "azure_date_det_auto_id": { + "kms": "azure", + "type": "date", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "azure_date_det_explicit_id": { + "kms": "azure", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "azure_date_det_explicit_altname": { + "kms": "azure", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "azure_null_rand_explicit_id": { + "kms": "azure", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "azure_null_rand_explicit_altname": { + "kms": "azure", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "azure_null_det_explicit_id": { + "kms": "azure", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "azure_null_det_explicit_altname": { + "kms": "azure", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "azure_regex_rand_auto_id": { + "kms": "azure", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "azure_regex_rand_auto_altname": { + "kms": "azure", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "azure_regex_rand_explicit_id": { + "kms": "azure", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "azure_regex_rand_explicit_altname": { + "kms": "azure", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "azure_regex_det_auto_id": { + "kms": "azure", + "type": "regex", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "azure_regex_det_explicit_id": { + "kms": "azure", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "azure_regex_det_explicit_altname": { + "kms": "azure", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "azure_dbPointer_rand_auto_id": { + "kms": "azure", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "azure_dbPointer_rand_auto_altname": { + "kms": "azure", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "azure_dbPointer_rand_explicit_id": { + "kms": "azure", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "azure_dbPointer_rand_explicit_altname": { + "kms": "azure", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "azure_dbPointer_det_auto_id": { + "kms": "azure", + "type": "dbPointer", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "azure_dbPointer_det_explicit_id": { + "kms": "azure", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "azure_dbPointer_det_explicit_altname": { + "kms": "azure", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "azure_javascript_rand_auto_id": { + "kms": "azure", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "azure_javascript_rand_auto_altname": { + "kms": "azure", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "azure_javascript_rand_explicit_id": { + "kms": "azure", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "azure_javascript_rand_explicit_altname": { + "kms": "azure", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "azure_javascript_det_auto_id": { + "kms": "azure", + "type": "javascript", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "azure_javascript_det_explicit_id": { + "kms": "azure", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "azure_javascript_det_explicit_altname": { + "kms": "azure", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "azure_symbol_rand_auto_id": { + "kms": "azure", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "azure_symbol_rand_auto_altname": { + "kms": "azure", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "azure_symbol_rand_explicit_id": { + "kms": "azure", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "azure_symbol_rand_explicit_altname": { + "kms": "azure", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "azure_symbol_det_auto_id": { + "kms": "azure", + "type": "symbol", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "azure_symbol_det_explicit_id": { + "kms": "azure", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "azure_symbol_det_explicit_altname": { + "kms": "azure", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "azure_javascriptWithScope_rand_auto_id": { + "kms": "azure", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "azure_javascriptWithScope_rand_auto_altname": { + "kms": "azure", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "azure_javascriptWithScope_rand_explicit_id": { + "kms": "azure", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "azure_javascriptWithScope_rand_explicit_altname": { + "kms": "azure", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "azure_javascriptWithScope_det_explicit_id": { + "kms": "azure", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "azure_javascriptWithScope_det_explicit_altname": { + "kms": "azure", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "azure_int_rand_auto_id": { + "kms": "azure", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "azure_int_rand_auto_altname": { + "kms": "azure", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "azure_int_rand_explicit_id": { + "kms": "azure", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "azure_int_rand_explicit_altname": { + "kms": "azure", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "azure_int_det_auto_id": { + "kms": "azure", + "type": "int", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "azure_int_det_explicit_id": { + "kms": "azure", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "azure_int_det_explicit_altname": { + "kms": "azure", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "azure_timestamp_rand_auto_id": { + "kms": "azure", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "azure_timestamp_rand_auto_altname": { + "kms": "azure", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "azure_timestamp_rand_explicit_id": { + "kms": "azure", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "azure_timestamp_rand_explicit_altname": { + "kms": "azure", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "azure_timestamp_det_auto_id": { + "kms": "azure", + "type": "timestamp", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "azure_timestamp_det_explicit_id": { + "kms": "azure", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "azure_timestamp_det_explicit_altname": { + "kms": "azure", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "azure_long_rand_auto_id": { + "kms": "azure", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "azure_long_rand_auto_altname": { + "kms": "azure", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "azure_long_rand_explicit_id": { + "kms": "azure", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "azure_long_rand_explicit_altname": { + "kms": "azure", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "azure_long_det_auto_id": { + "kms": "azure", + "type": "long", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "azure_long_det_explicit_id": { + "kms": "azure", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "azure_long_det_explicit_altname": { + "kms": "azure", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "azure_decimal_rand_auto_id": { + "kms": "azure", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "azure_decimal_rand_auto_altname": { + "kms": "azure", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "azure_decimal_rand_explicit_id": { + "kms": "azure", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "azure_decimal_rand_explicit_altname": { + "kms": "azure", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "azure_decimal_det_explicit_id": { + "kms": "azure", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "azure_decimal_det_explicit_altname": { + "kms": "azure", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "azure_minKey_rand_explicit_id": { + "kms": "azure", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "azure_minKey_rand_explicit_altname": { + "kms": "azure", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "azure_minKey_det_explicit_id": { + "kms": "azure", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "azure_minKey_det_explicit_altname": { + "kms": "azure", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "azure_maxKey_rand_explicit_id": { + "kms": "azure", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "azure_maxKey_rand_explicit_altname": { + "kms": "azure", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "azure_maxKey_det_explicit_id": { + "kms": "azure", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "azure_maxKey_det_explicit_altname": { + "kms": "azure", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "gcp_double_rand_auto_id": { + "kms": "gcp", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "gcp_double_rand_auto_altname": { + "kms": "gcp", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "gcp_double_rand_explicit_id": { + "kms": "gcp", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "gcp_double_rand_explicit_altname": { + "kms": "gcp", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "gcp_double_det_explicit_id": { + "kms": "gcp", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDouble": "1.234" + } + }, + "gcp_double_det_explicit_altname": { + "kms": "gcp", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDouble": "1.234" + } + }, + "gcp_string_rand_auto_id": { + "kms": "gcp", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "gcp_string_rand_auto_altname": { + "kms": "gcp", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": "mongodb" + }, + "gcp_string_rand_explicit_id": { + "kms": "gcp", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "gcp_string_rand_explicit_altname": { + "kms": "gcp", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": "mongodb" + }, + "gcp_string_det_auto_id": { + "kms": "gcp", + "type": "string", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "gcp_string_det_explicit_id": { + "kms": "gcp", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "gcp_string_det_explicit_altname": { + "kms": "gcp", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": "mongodb" + }, + "gcp_object_rand_auto_id": { + "kms": "gcp", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "gcp_object_rand_auto_altname": { + "kms": "gcp", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "gcp_object_rand_explicit_id": { + "kms": "gcp", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "gcp_object_rand_explicit_altname": { + "kms": "gcp", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "gcp_object_det_explicit_id": { + "kms": "gcp", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "gcp_object_det_explicit_altname": { + "kms": "gcp", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "gcp_array_rand_auto_id": { + "kms": "gcp", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "gcp_array_rand_auto_altname": { + "kms": "gcp", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "gcp_array_rand_explicit_id": { + "kms": "gcp", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "gcp_array_rand_explicit_altname": { + "kms": "gcp", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "gcp_array_det_explicit_id": { + "kms": "gcp", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "gcp_array_det_explicit_altname": { + "kms": "gcp", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "gcp_binData=00_rand_auto_id": { + "kms": "gcp", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "gcp_binData=00_rand_auto_altname": { + "kms": "gcp", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "gcp_binData=00_rand_explicit_id": { + "kms": "gcp", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "gcp_binData=00_rand_explicit_altname": { + "kms": "gcp", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "gcp_binData=00_det_auto_id": { + "kms": "gcp", + "type": "binData=00", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "gcp_binData=00_det_explicit_id": { + "kms": "gcp", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "gcp_binData=00_det_explicit_altname": { + "kms": "gcp", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "gcp_binData=04_rand_auto_id": { + "kms": "gcp", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "gcp_binData=04_rand_auto_altname": { + "kms": "gcp", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "gcp_binData=04_rand_explicit_id": { + "kms": "gcp", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "gcp_binData=04_rand_explicit_altname": { + "kms": "gcp", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "gcp_binData=04_det_auto_id": { + "kms": "gcp", + "type": "binData=04", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "gcp_binData=04_det_explicit_id": { + "kms": "gcp", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "gcp_binData=04_det_explicit_altname": { + "kms": "gcp", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "gcp_undefined_rand_explicit_id": { + "kms": "gcp", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "gcp_undefined_rand_explicit_altname": { + "kms": "gcp", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "gcp_undefined_det_explicit_id": { + "kms": "gcp", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "gcp_undefined_det_explicit_altname": { + "kms": "gcp", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "gcp_objectId_rand_auto_id": { + "kms": "gcp", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "gcp_objectId_rand_auto_altname": { + "kms": "gcp", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "gcp_objectId_rand_explicit_id": { + "kms": "gcp", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "gcp_objectId_rand_explicit_altname": { + "kms": "gcp", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "gcp_objectId_det_auto_id": { + "kms": "gcp", + "type": "objectId", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "gcp_objectId_det_explicit_id": { + "kms": "gcp", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "gcp_objectId_det_explicit_altname": { + "kms": "gcp", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "gcp_bool_rand_auto_id": { + "kms": "gcp", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": true + }, + "gcp_bool_rand_auto_altname": { + "kms": "gcp", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": true + }, + "gcp_bool_rand_explicit_id": { + "kms": "gcp", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": true + }, + "gcp_bool_rand_explicit_altname": { + "kms": "gcp", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": true + }, + "gcp_bool_det_explicit_id": { + "kms": "gcp", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": true + }, + "gcp_bool_det_explicit_altname": { + "kms": "gcp", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": true + }, + "gcp_date_rand_auto_id": { + "kms": "gcp", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "gcp_date_rand_auto_altname": { + "kms": "gcp", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "gcp_date_rand_explicit_id": { + "kms": "gcp", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "gcp_date_rand_explicit_altname": { + "kms": "gcp", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "gcp_date_det_auto_id": { + "kms": "gcp", + "type": "date", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "gcp_date_det_explicit_id": { + "kms": "gcp", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "gcp_date_det_explicit_altname": { + "kms": "gcp", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "gcp_null_rand_explicit_id": { + "kms": "gcp", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "gcp_null_rand_explicit_altname": { + "kms": "gcp", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "gcp_null_det_explicit_id": { + "kms": "gcp", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "gcp_null_det_explicit_altname": { + "kms": "gcp", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "gcp_regex_rand_auto_id": { + "kms": "gcp", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "gcp_regex_rand_auto_altname": { + "kms": "gcp", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "gcp_regex_rand_explicit_id": { + "kms": "gcp", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "gcp_regex_rand_explicit_altname": { + "kms": "gcp", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "gcp_regex_det_auto_id": { + "kms": "gcp", + "type": "regex", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "gcp_regex_det_explicit_id": { + "kms": "gcp", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "gcp_regex_det_explicit_altname": { + "kms": "gcp", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "gcp_dbPointer_rand_auto_id": { + "kms": "gcp", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "gcp_dbPointer_rand_auto_altname": { + "kms": "gcp", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "gcp_dbPointer_rand_explicit_id": { + "kms": "gcp", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "gcp_dbPointer_rand_explicit_altname": { + "kms": "gcp", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "gcp_dbPointer_det_auto_id": { + "kms": "gcp", + "type": "dbPointer", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "gcp_dbPointer_det_explicit_id": { + "kms": "gcp", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "gcp_dbPointer_det_explicit_altname": { + "kms": "gcp", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "gcp_javascript_rand_auto_id": { + "kms": "gcp", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "gcp_javascript_rand_auto_altname": { + "kms": "gcp", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "gcp_javascript_rand_explicit_id": { + "kms": "gcp", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "gcp_javascript_rand_explicit_altname": { + "kms": "gcp", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "gcp_javascript_det_auto_id": { + "kms": "gcp", + "type": "javascript", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "gcp_javascript_det_explicit_id": { + "kms": "gcp", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "gcp_javascript_det_explicit_altname": { + "kms": "gcp", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "gcp_symbol_rand_auto_id": { + "kms": "gcp", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "gcp_symbol_rand_auto_altname": { + "kms": "gcp", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "gcp_symbol_rand_explicit_id": { + "kms": "gcp", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "gcp_symbol_rand_explicit_altname": { + "kms": "gcp", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "gcp_symbol_det_auto_id": { + "kms": "gcp", + "type": "symbol", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "gcp_symbol_det_explicit_id": { + "kms": "gcp", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "gcp_symbol_det_explicit_altname": { + "kms": "gcp", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "gcp_javascriptWithScope_rand_auto_id": { + "kms": "gcp", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "gcp_javascriptWithScope_rand_auto_altname": { + "kms": "gcp", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "gcp_javascriptWithScope_rand_explicit_id": { + "kms": "gcp", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "gcp_javascriptWithScope_rand_explicit_altname": { + "kms": "gcp", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "gcp_javascriptWithScope_det_explicit_id": { + "kms": "gcp", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "gcp_javascriptWithScope_det_explicit_altname": { + "kms": "gcp", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "gcp_int_rand_auto_id": { + "kms": "gcp", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "gcp_int_rand_auto_altname": { + "kms": "gcp", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "gcp_int_rand_explicit_id": { + "kms": "gcp", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "gcp_int_rand_explicit_altname": { + "kms": "gcp", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "gcp_int_det_auto_id": { + "kms": "gcp", + "type": "int", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "gcp_int_det_explicit_id": { + "kms": "gcp", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "gcp_int_det_explicit_altname": { + "kms": "gcp", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "gcp_timestamp_rand_auto_id": { + "kms": "gcp", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "gcp_timestamp_rand_auto_altname": { + "kms": "gcp", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "gcp_timestamp_rand_explicit_id": { + "kms": "gcp", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "gcp_timestamp_rand_explicit_altname": { + "kms": "gcp", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "gcp_timestamp_det_auto_id": { + "kms": "gcp", + "type": "timestamp", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "gcp_timestamp_det_explicit_id": { + "kms": "gcp", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "gcp_timestamp_det_explicit_altname": { + "kms": "gcp", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "gcp_long_rand_auto_id": { + "kms": "gcp", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "gcp_long_rand_auto_altname": { + "kms": "gcp", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "gcp_long_rand_explicit_id": { + "kms": "gcp", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "gcp_long_rand_explicit_altname": { + "kms": "gcp", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "gcp_long_det_auto_id": { + "kms": "gcp", + "type": "long", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "gcp_long_det_explicit_id": { + "kms": "gcp", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "gcp_long_det_explicit_altname": { + "kms": "gcp", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "gcp_decimal_rand_auto_id": { + "kms": "gcp", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "gcp_decimal_rand_auto_altname": { + "kms": "gcp", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "gcp_decimal_rand_explicit_id": { + "kms": "gcp", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "gcp_decimal_rand_explicit_altname": { + "kms": "gcp", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "gcp_decimal_det_explicit_id": { + "kms": "gcp", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "gcp_decimal_det_explicit_altname": { + "kms": "gcp", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "gcp_minKey_rand_explicit_id": { + "kms": "gcp", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "gcp_minKey_rand_explicit_altname": { + "kms": "gcp", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "gcp_minKey_det_explicit_id": { + "kms": "gcp", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "gcp_minKey_det_explicit_altname": { + "kms": "gcp", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "gcp_maxKey_rand_explicit_id": { + "kms": "gcp", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "gcp_maxKey_rand_explicit_altname": { + "kms": "gcp", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "gcp_maxKey_det_explicit_id": { + "kms": "gcp", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "gcp_maxKey_det_explicit_altname": { + "kms": "gcp", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "kmip_double_rand_auto_id": { + "kms": "kmip", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "kmip_double_rand_auto_altname": { + "kms": "kmip", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "kmip_double_rand_explicit_id": { + "kms": "kmip", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "kmip_double_rand_explicit_altname": { + "kms": "kmip", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "kmip_double_det_explicit_id": { + "kms": "kmip", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDouble": "1.234" + } + }, + "kmip_double_det_explicit_altname": { + "kms": "kmip", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDouble": "1.234" + } + }, + "kmip_string_rand_auto_id": { + "kms": "kmip", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "kmip_string_rand_auto_altname": { + "kms": "kmip", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": "mongodb" + }, + "kmip_string_rand_explicit_id": { + "kms": "kmip", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "kmip_string_rand_explicit_altname": { + "kms": "kmip", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": "mongodb" + }, + "kmip_string_det_auto_id": { + "kms": "kmip", + "type": "string", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "kmip_string_det_explicit_id": { + "kms": "kmip", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "kmip_string_det_explicit_altname": { + "kms": "kmip", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": "mongodb" + }, + "kmip_object_rand_auto_id": { + "kms": "kmip", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "kmip_object_rand_auto_altname": { + "kms": "kmip", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "kmip_object_rand_explicit_id": { + "kms": "kmip", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "kmip_object_rand_explicit_altname": { + "kms": "kmip", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "kmip_object_det_explicit_id": { + "kms": "kmip", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "kmip_object_det_explicit_altname": { + "kms": "kmip", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "kmip_array_rand_auto_id": { + "kms": "kmip", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "kmip_array_rand_auto_altname": { + "kms": "kmip", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "kmip_array_rand_explicit_id": { + "kms": "kmip", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "kmip_array_rand_explicit_altname": { + "kms": "kmip", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "kmip_array_det_explicit_id": { + "kms": "kmip", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "kmip_array_det_explicit_altname": { + "kms": "kmip", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "kmip_binData=00_rand_auto_id": { + "kms": "kmip", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "kmip_binData=00_rand_auto_altname": { + "kms": "kmip", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "kmip_binData=00_rand_explicit_id": { + "kms": "kmip", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "kmip_binData=00_rand_explicit_altname": { + "kms": "kmip", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "kmip_binData=00_det_auto_id": { + "kms": "kmip", + "type": "binData=00", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "kmip_binData=00_det_explicit_id": { + "kms": "kmip", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "kmip_binData=00_det_explicit_altname": { + "kms": "kmip", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "kmip_binData=04_rand_auto_id": { + "kms": "kmip", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "kmip_binData=04_rand_auto_altname": { + "kms": "kmip", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "kmip_binData=04_rand_explicit_id": { + "kms": "kmip", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "kmip_binData=04_rand_explicit_altname": { + "kms": "kmip", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "kmip_binData=04_det_auto_id": { + "kms": "kmip", + "type": "binData=04", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "kmip_binData=04_det_explicit_id": { + "kms": "kmip", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "kmip_binData=04_det_explicit_altname": { + "kms": "kmip", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "kmip_undefined_rand_explicit_id": { + "kms": "kmip", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "kmip_undefined_rand_explicit_altname": { + "kms": "kmip", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "kmip_undefined_det_explicit_id": { + "kms": "kmip", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "kmip_undefined_det_explicit_altname": { + "kms": "kmip", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "kmip_objectId_rand_auto_id": { + "kms": "kmip", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "kmip_objectId_rand_auto_altname": { + "kms": "kmip", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "kmip_objectId_rand_explicit_id": { + "kms": "kmip", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "kmip_objectId_rand_explicit_altname": { + "kms": "kmip", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "kmip_objectId_det_auto_id": { + "kms": "kmip", + "type": "objectId", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "kmip_objectId_det_explicit_id": { + "kms": "kmip", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "kmip_objectId_det_explicit_altname": { + "kms": "kmip", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "kmip_bool_rand_auto_id": { + "kms": "kmip", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": true + }, + "kmip_bool_rand_auto_altname": { + "kms": "kmip", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": true + }, + "kmip_bool_rand_explicit_id": { + "kms": "kmip", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": true + }, + "kmip_bool_rand_explicit_altname": { + "kms": "kmip", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": true + }, + "kmip_bool_det_explicit_id": { + "kms": "kmip", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": true + }, + "kmip_bool_det_explicit_altname": { + "kms": "kmip", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": true + }, + "kmip_date_rand_auto_id": { + "kms": "kmip", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "kmip_date_rand_auto_altname": { + "kms": "kmip", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "kmip_date_rand_explicit_id": { + "kms": "kmip", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "kmip_date_rand_explicit_altname": { + "kms": "kmip", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "kmip_date_det_auto_id": { + "kms": "kmip", + "type": "date", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "kmip_date_det_explicit_id": { + "kms": "kmip", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "kmip_date_det_explicit_altname": { + "kms": "kmip", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "kmip_null_rand_explicit_id": { + "kms": "kmip", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "kmip_null_rand_explicit_altname": { + "kms": "kmip", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "kmip_null_det_explicit_id": { + "kms": "kmip", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "kmip_null_det_explicit_altname": { + "kms": "kmip", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "kmip_regex_rand_auto_id": { + "kms": "kmip", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "kmip_regex_rand_auto_altname": { + "kms": "kmip", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "kmip_regex_rand_explicit_id": { + "kms": "kmip", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "kmip_regex_rand_explicit_altname": { + "kms": "kmip", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "kmip_regex_det_auto_id": { + "kms": "kmip", + "type": "regex", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "kmip_regex_det_explicit_id": { + "kms": "kmip", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "kmip_regex_det_explicit_altname": { + "kms": "kmip", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "kmip_dbPointer_rand_auto_id": { + "kms": "kmip", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "kmip_dbPointer_rand_auto_altname": { + "kms": "kmip", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "kmip_dbPointer_rand_explicit_id": { + "kms": "kmip", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "kmip_dbPointer_rand_explicit_altname": { + "kms": "kmip", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "kmip_dbPointer_det_auto_id": { + "kms": "kmip", + "type": "dbPointer", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "kmip_dbPointer_det_explicit_id": { + "kms": "kmip", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "kmip_dbPointer_det_explicit_altname": { + "kms": "kmip", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "kmip_javascript_rand_auto_id": { + "kms": "kmip", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "kmip_javascript_rand_auto_altname": { + "kms": "kmip", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "kmip_javascript_rand_explicit_id": { + "kms": "kmip", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "kmip_javascript_rand_explicit_altname": { + "kms": "kmip", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "kmip_javascript_det_auto_id": { + "kms": "kmip", + "type": "javascript", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "kmip_javascript_det_explicit_id": { + "kms": "kmip", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "kmip_javascript_det_explicit_altname": { + "kms": "kmip", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "kmip_symbol_rand_auto_id": { + "kms": "kmip", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "kmip_symbol_rand_auto_altname": { + "kms": "kmip", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "kmip_symbol_rand_explicit_id": { + "kms": "kmip", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "kmip_symbol_rand_explicit_altname": { + "kms": "kmip", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "kmip_symbol_det_auto_id": { + "kms": "kmip", + "type": "symbol", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "kmip_symbol_det_explicit_id": { + "kms": "kmip", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "kmip_symbol_det_explicit_altname": { + "kms": "kmip", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "kmip_javascriptWithScope_rand_auto_id": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "kmip_javascriptWithScope_rand_auto_altname": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "kmip_javascriptWithScope_rand_explicit_id": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "kmip_javascriptWithScope_rand_explicit_altname": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "kmip_javascriptWithScope_det_explicit_id": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "kmip_javascriptWithScope_det_explicit_altname": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "kmip_int_rand_auto_id": { + "kms": "kmip", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "kmip_int_rand_auto_altname": { + "kms": "kmip", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "kmip_int_rand_explicit_id": { + "kms": "kmip", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "kmip_int_rand_explicit_altname": { + "kms": "kmip", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "kmip_int_det_auto_id": { + "kms": "kmip", + "type": "int", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "kmip_int_det_explicit_id": { + "kms": "kmip", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "kmip_int_det_explicit_altname": { + "kms": "kmip", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "kmip_timestamp_rand_auto_id": { + "kms": "kmip", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "kmip_timestamp_rand_auto_altname": { + "kms": "kmip", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "kmip_timestamp_rand_explicit_id": { + "kms": "kmip", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "kmip_timestamp_rand_explicit_altname": { + "kms": "kmip", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "kmip_timestamp_det_auto_id": { + "kms": "kmip", + "type": "timestamp", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "kmip_timestamp_det_explicit_id": { + "kms": "kmip", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "kmip_timestamp_det_explicit_altname": { + "kms": "kmip", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "kmip_long_rand_auto_id": { + "kms": "kmip", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "kmip_long_rand_auto_altname": { + "kms": "kmip", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "kmip_long_rand_explicit_id": { + "kms": "kmip", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "kmip_long_rand_explicit_altname": { + "kms": "kmip", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "kmip_long_det_auto_id": { + "kms": "kmip", + "type": "long", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "kmip_long_det_explicit_id": { + "kms": "kmip", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "kmip_long_det_explicit_altname": { + "kms": "kmip", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "kmip_decimal_rand_auto_id": { + "kms": "kmip", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "kmip_decimal_rand_auto_altname": { + "kms": "kmip", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "kmip_decimal_rand_explicit_id": { + "kms": "kmip", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "kmip_decimal_rand_explicit_altname": { + "kms": "kmip", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "kmip_decimal_det_explicit_id": { + "kms": "kmip", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "kmip_decimal_det_explicit_altname": { + "kms": "kmip", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "kmip_minKey_rand_explicit_id": { + "kms": "kmip", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "kmip_minKey_rand_explicit_altname": { + "kms": "kmip", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "kmip_minKey_det_explicit_id": { + "kms": "kmip", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "kmip_minKey_det_explicit_altname": { + "kms": "kmip", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "kmip_maxKey_rand_explicit_id": { + "kms": "kmip", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "kmip_maxKey_rand_explicit_altname": { + "kms": "kmip", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "kmip_maxKey_det_explicit_id": { + "kms": "kmip", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "kmip_maxKey_det_explicit_altname": { + "kms": "kmip", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } }, "payload=0,algo=rand": { "kms": "local", @@ -2902,4 +8616,4 @@ "allowed": true, "value": "aaaaaaaaaaaaaaaa" } -} +} \ No newline at end of file diff --git a/test/client-side-encryption/custom/azure-dek.json b/test/client-side-encryption/custom/azure-dek.json new file mode 100644 index 0000000000..e644c971c6 --- /dev/null +++ b/test/client-side-encryption/custom/azure-dek.json @@ -0,0 +1,33 @@ +{ + "_id": { + "$binary": { + "base64": "AZURE+AAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "n+HWZ0ZSVOYA3cvQgP7inN4JSXfOH85IngmeQxRpQHjCCcqT3IFqEWNlrsVHiz3AELimHhX4HKqOLWMUeSIT6emUDDoQX9BAv8DR1+E1w4nGs/NyEneac78EYFkK3JysrFDOgl2ypCCTKAypkn9CkAx1if4cfgQE93LW4kczcyHdGiH36CIxrCDGv1UzAvERN5Qa47DVwsM6a+hWsF2AAAJVnF0wYLLJU07TuRHdMrrphPWXZsFgyV+lRqJ7DDpReKNO8nMPLV/mHqHBHGPGQiRdb9NoJo8CvokGz4+KE8oLwzKf6V24dtwZmRkrsDV4iOhvROAzz+Euo1ypSkL3mw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1601573901680" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1601573901680" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyAltNames": ["altname", "azure_altname"] +} diff --git a/test/client-side-encryption/custom/azure-gcp-schema.json b/test/client-side-encryption/custom/azure-gcp-schema.json new file mode 100644 index 0000000000..441949f6d6 --- /dev/null +++ b/test/client-side-encryption/custom/azure-gcp-schema.json @@ -0,0 +1,31 @@ +{ + "db.coll": { + "bsonType": "object", + "properties": { + "secret_azure": { + "encrypt": { + "keyId": [{ + "$binary": { + "base64": "AZURE+AAAAAAAAAAAAAAAA==", + "subType": "04" + } + }], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "string" + } + }, + "secret_gcp": { + "encrypt": { + "keyId": [{ + "$binary": { + "base64": "GCP+AAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "string" + } + } + } + } +} diff --git a/test/client-side-encryption/custom/gcp-dek.json b/test/client-side-encryption/custom/gcp-dek.json new file mode 100644 index 0000000000..968c8b9176 --- /dev/null +++ b/test/client-side-encryption/custom/gcp-dek.json @@ -0,0 +1,35 @@ +{ + "_id": { + "$binary": { + "base64": "GCP+AAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "CiQAIgLj0WyktnB4dfYHo5SLZ41K4ASQrjJUaSzl5vvVH0G12G0SiQEAjlV8XPlbnHDEDFbdTO4QIe8ER2/172U1ouLazG0ysDtFFIlSvWX5ZnZUrRMmp/R2aJkzLXEt/zf8Mn4Lfm+itnjgo5R9K4pmPNvvPKNZX5C16lrPT+aA+rd+zXFSmlMg3i5jnxvTdLHhg3G7Q/Uv1ZIJskKt95bzLoe0tUVzRWMYXLIEcohnQg==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1601574333107" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1601574333107" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyAltNames": ["altname", "gcp_altname"] +} diff --git a/test/client-side-encryption/etc/data/encryptedFields.json b/test/client-side-encryption/etc/data/encryptedFields.json new file mode 100644 index 0000000000..88abe5a604 --- /dev/null +++ b/test/client-side-encryption/etc/data/encryptedFields.json @@ -0,0 +1,30 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] +} \ No newline at end of file diff --git a/test/client-side-encryption/etc/data/keys/key1-document.json b/test/client-side-encryption/etc/data/keys/key1-document.json new file mode 100644 index 0000000000..566b56c354 --- /dev/null +++ b/test/client-side-encryption/etc/data/keys/key1-document.json @@ -0,0 +1,30 @@ +{ + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } +} diff --git a/test/client-side-encryption/etc/data/keys/key1-id.json b/test/client-side-encryption/etc/data/keys/key1-id.json new file mode 100644 index 0000000000..7d18f52ebb --- /dev/null +++ b/test/client-side-encryption/etc/data/keys/key1-id.json @@ -0,0 +1,6 @@ +{ + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } +} diff --git a/test/client-side-encryption/etc/data/keys/key2-document.json b/test/client-side-encryption/etc/data/keys/key2-document.json new file mode 100644 index 0000000000..a654d980ba --- /dev/null +++ b/test/client-side-encryption/etc/data/keys/key2-document.json @@ -0,0 +1,30 @@ +{ + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } +} diff --git a/test/client-side-encryption/etc/data/keys/key2-id.json b/test/client-side-encryption/etc/data/keys/key2-id.json new file mode 100644 index 0000000000..6e9b87bbc2 --- /dev/null +++ b/test/client-side-encryption/etc/data/keys/key2-id.json @@ -0,0 +1,6 @@ +{ + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } +} diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-Date.json b/test/client-side-encryption/etc/data/range-encryptedFields-Date.json new file mode 100644 index 0000000000..97a2b2d4e5 --- /dev/null +++ b/test/client-side-encryption/etc/data/range-encryptedFields-Date.json @@ -0,0 +1,33 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] +} diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-DecimalNoPrecision.json b/test/client-side-encryption/etc/data/range-encryptedFields-DecimalNoPrecision.json new file mode 100644 index 0000000000..4d284475f4 --- /dev/null +++ b/test/client-side-encryption/etc/data/range-encryptedFields-DecimalNoPrecision.json @@ -0,0 +1,23 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] +} diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-DecimalPrecision.json b/test/client-side-encryption/etc/data/range-encryptedFields-DecimalPrecision.json new file mode 100644 index 0000000000..53449182b2 --- /dev/null +++ b/test/client-side-encryption/etc/data/range-encryptedFields-DecimalPrecision.json @@ -0,0 +1,32 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] +} diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-DoubleNoPrecision.json b/test/client-side-encryption/etc/data/range-encryptedFields-DoubleNoPrecision.json new file mode 100644 index 0000000000..b478a772d7 --- /dev/null +++ b/test/client-side-encryption/etc/data/range-encryptedFields-DoubleNoPrecision.json @@ -0,0 +1,23 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] +} diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-DoublePrecision.json b/test/client-side-encryption/etc/data/range-encryptedFields-DoublePrecision.json new file mode 100644 index 0000000000..395a369680 --- /dev/null +++ b/test/client-side-encryption/etc/data/range-encryptedFields-DoublePrecision.json @@ -0,0 +1,32 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] +} diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-Int.json b/test/client-side-encryption/etc/data/range-encryptedFields-Int.json new file mode 100644 index 0000000000..61b7082dff --- /dev/null +++ b/test/client-side-encryption/etc/data/range-encryptedFields-Int.json @@ -0,0 +1,29 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] +} diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-Long.json b/test/client-side-encryption/etc/data/range-encryptedFields-Long.json new file mode 100644 index 0000000000..b18b84b6e8 --- /dev/null +++ b/test/client-side-encryption/etc/data/range-encryptedFields-Long.json @@ -0,0 +1,29 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/aggregate.json b/test/client-side-encryption/spec/legacy/aggregate.json similarity index 93% rename from test/client-side-encryption/spec/aggregate.json rename to test/client-side-encryption/spec/legacy/aggregate.json index 6bc9242717..7de725b71d 100644 --- a/test/client-side-encryption/spec/aggregate.json +++ b/test/client-side-encryption/spec/legacy/aggregate.json @@ -150,18 +150,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "admin" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -187,7 +175,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } @@ -273,18 +261,6 @@ "command_name": "aggregate" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "admin" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -310,7 +286,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/legacy/awsTemporary.json b/test/client-side-encryption/spec/legacy/awsTemporary.json new file mode 100644 index 0000000000..10eb85feee --- /dev/null +++ b/test/client-side-encryption/spec/legacy/awsTemporary.json @@ -0,0 +1,225 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "json_schema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "Insert a document with auto encryption using the AWS provider with temporary credentials", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "awsTemporary": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault" + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ] + } + } + }, + { + "description": "Insert with invalid temporary credentials", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "awsTemporaryNoSessionToken": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + }, + "result": { + "errorContains": "security token" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/azureKMS.json b/test/client-side-encryption/spec/legacy/azureKMS.json new file mode 100644 index 0000000000..afecf40b0a --- /dev/null +++ b/test/client-side-encryption/spec/legacy/azureKMS.json @@ -0,0 +1,224 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "json_schema": { + "properties": { + "encrypted_string_aws": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_azure": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZURE+AAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_gcp": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCP+AAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_local": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_kmip": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "dBHpr8aITfeBQ15grpbLpQ==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "AZURE+AAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "n+HWZ0ZSVOYA3cvQgP7inN4JSXfOH85IngmeQxRpQHjCCcqT3IFqEWNlrsVHiz3AELimHhX4HKqOLWMUeSIT6emUDDoQX9BAv8DR1+E1w4nGs/NyEneac78EYFkK3JysrFDOgl2ypCCTKAypkn9CkAx1if4cfgQE93LW4kczcyHdGiH36CIxrCDGv1UzAvERN5Qa47DVwsM6a+hWsF2AAAJVnF0wYLLJU07TuRHdMrrphPWXZsFgyV+lRqJ7DDpReKNO8nMPLV/mHqHBHGPGQiRdb9NoJo8CvokGz4+KE8oLwzKf6V24dtwZmRkrsDV4iOhvROAzz+Euo1ypSkL3mw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1601573901680" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1601573901680" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyAltNames": [ + "altname", + "azure_altname" + ] + } + ], + "tests": [ + { + "description": "Insert a document with auto encryption using Azure KMS provider", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "azure": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string_azure": "string0" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AZURE+AAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault" + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string_azure": { + "$binary": { + "base64": "AQGVERPgAAAAAAAAAAAAAAAC5DbBSwPwfSlBrDtRuglvNvCXD1KzDuCKY2P+4bRFtHDjpTOE2XuytPAUaAbXf1orsPq59PVZmsbTZbt2CB8qaQ==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string_azure": { + "$binary": { + "base64": "AQGVERPgAAAAAAAAAAAAAAAC5DbBSwPwfSlBrDtRuglvNvCXD1KzDuCKY2P+4bRFtHDjpTOE2XuytPAUaAbXf1orsPq59PVZmsbTZbt2CB8qaQ==", + "subType": "06" + } + } + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/badQueries.json b/test/client-side-encryption/spec/legacy/badQueries.json similarity index 99% rename from test/client-side-encryption/spec/badQueries.json rename to test/client-side-encryption/spec/legacy/badQueries.json index 824a53c00b..4968307ba3 100644 --- a/test/client-side-encryption/spec/badQueries.json +++ b/test/client-side-encryption/spec/legacy/badQueries.json @@ -1318,7 +1318,7 @@ } }, "result": { - "errorContains": "Cannot encrypt element of type array" + "errorContains": "Cannot encrypt element of type" } } ] @@ -1387,7 +1387,7 @@ } }, "result": { - "errorContains": "Cannot encrypt element of type array" + "errorContains": "Cannot encrypt element of type" } } ] diff --git a/test/client-side-encryption/spec/badSchema.json b/test/client-side-encryption/spec/legacy/badSchema.json similarity index 100% rename from test/client-side-encryption/spec/badSchema.json rename to test/client-side-encryption/spec/legacy/badSchema.json diff --git a/test/client-side-encryption/spec/basic.json b/test/client-side-encryption/spec/legacy/basic.json similarity index 92% rename from test/client-side-encryption/spec/basic.json rename to test/client-side-encryption/spec/legacy/basic.json index 371894e8ca..3ed066f530 100644 --- a/test/client-side-encryption/spec/basic.json +++ b/test/client-side-encryption/spec/legacy/basic.json @@ -144,18 +144,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "admin" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -181,7 +169,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } @@ -283,18 +271,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "admin" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -320,7 +296,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/bulk.json b/test/client-side-encryption/spec/legacy/bulk.json similarity index 96% rename from test/client-side-encryption/spec/bulk.json rename to test/client-side-encryption/spec/legacy/bulk.json index 7a401d5e8e..1b62e5e8ab 100644 --- a/test/client-side-encryption/spec/bulk.json +++ b/test/client-side-encryption/spec/legacy/bulk.json @@ -178,18 +178,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "admin" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -215,7 +203,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/bypassAutoEncryption.json b/test/client-side-encryption/spec/legacy/bypassAutoEncryption.json similarity index 99% rename from test/client-side-encryption/spec/bypassAutoEncryption.json rename to test/client-side-encryption/spec/legacy/bypassAutoEncryption.json index 42f4473223..9d09cb3fa9 100644 --- a/test/client-side-encryption/spec/bypassAutoEncryption.json +++ b/test/client-side-encryption/spec/legacy/bypassAutoEncryption.json @@ -196,7 +196,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } @@ -369,7 +369,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/bypassedCommand.json b/test/client-side-encryption/spec/legacy/bypassedCommand.json similarity index 93% rename from test/client-side-encryption/spec/bypassedCommand.json rename to test/client-side-encryption/spec/legacy/bypassedCommand.json index bd0b1c565d..18054a70cb 100644 --- a/test/client-side-encryption/spec/bypassedCommand.json +++ b/test/client-side-encryption/spec/legacy/bypassedCommand.json @@ -78,7 +78,7 @@ ] }, { - "description": "current op is not bypassed", + "description": "kill op is not bypassed", "clientOptions": { "autoEncryptOpts": { "kmsProviders": { @@ -90,14 +90,15 @@ { "name": "runCommand", "object": "database", - "command_name": "currentOp", + "command_name": "killOp", "arguments": { "command": { - "currentOp": 1 + "killOp": 1, + "op": 1234 } }, "result": { - "errorContains": "command not supported for auto encryption: currentOp" + "errorContains": "command not supported for auto encryption: killOp" } } ] diff --git a/test/client-side-encryption/spec/count.json b/test/client-side-encryption/spec/legacy/count.json similarity index 94% rename from test/client-side-encryption/spec/count.json rename to test/client-side-encryption/spec/legacy/count.json index 9ac5104a09..9df8cd639e 100644 --- a/test/client-side-encryption/spec/count.json +++ b/test/client-side-encryption/spec/legacy/count.json @@ -149,18 +149,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "admin" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -186,7 +174,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/countDocuments.json b/test/client-side-encryption/spec/legacy/countDocuments.json similarity index 95% rename from test/client-side-encryption/spec/countDocuments.json rename to test/client-side-encryption/spec/legacy/countDocuments.json index d4ae0aeb46..07ff97f264 100644 --- a/test/client-side-encryption/spec/countDocuments.json +++ b/test/client-side-encryption/spec/legacy/countDocuments.json @@ -150,18 +150,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "admin" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -187,7 +175,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/legacy/create-and-createIndexes.json b/test/client-side-encryption/spec/legacy/create-and-createIndexes.json new file mode 100644 index 0000000000..48638a97c8 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/create-and-createIndexes.json @@ -0,0 +1,115 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "tests": [ + { + "description": "create is OK", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "unencryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "unencryptedCollection", + "validator": { + "unencrypted_string": "foo" + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "unencryptedCollection" + } + } + ] + }, + { + "description": "createIndexes is OK", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "unencryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "unencryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "createIndexes": "unencryptedCollection", + "indexes": [ + { + "name": "name", + "key": { + "name": 1 + } + } + ] + } + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "unencryptedCollection", + "index": "name" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/delete.json b/test/client-side-encryption/spec/legacy/delete.json similarity index 93% rename from test/client-side-encryption/spec/delete.json rename to test/client-side-encryption/spec/legacy/delete.json index bb9c061556..a6f4ffde91 100644 --- a/test/client-side-encryption/spec/delete.json +++ b/test/client-side-encryption/spec/legacy/delete.json @@ -151,18 +151,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "admin" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -188,7 +176,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } @@ -276,18 +264,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "admin" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -313,7 +289,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/distinct.json b/test/client-side-encryption/spec/legacy/distinct.json similarity index 95% rename from test/client-side-encryption/spec/distinct.json rename to test/client-side-encryption/spec/legacy/distinct.json index c473030580..9786b07814 100644 --- a/test/client-side-encryption/spec/distinct.json +++ b/test/client-side-encryption/spec/legacy/distinct.json @@ -161,18 +161,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "admin" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -198,7 +186,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/explain.json b/test/client-side-encryption/spec/legacy/explain.json similarity index 94% rename from test/client-side-encryption/spec/explain.json rename to test/client-side-encryption/spec/legacy/explain.json index 6872cedf2b..8ca3b48d37 100644 --- a/test/client-side-encryption/spec/explain.json +++ b/test/client-side-encryption/spec/legacy/explain.json @@ -1,7 +1,7 @@ { "runOn": [ { - "minServerVersion": "4.1.10" + "minServerVersion": "7.0.0" } ], "database_name": "default", @@ -155,18 +155,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "admin" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -192,7 +180,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/find.json b/test/client-side-encryption/spec/legacy/find.json similarity index 94% rename from test/client-side-encryption/spec/find.json rename to test/client-side-encryption/spec/legacy/find.json index 93cef311c0..1feddab0e3 100644 --- a/test/client-side-encryption/spec/find.json +++ b/test/client-side-encryption/spec/legacy/find.json @@ -160,18 +160,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "admin" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -197,7 +185,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } @@ -302,18 +290,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "admin" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -339,7 +315,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/findOneAndDelete.json b/test/client-side-encryption/spec/legacy/findOneAndDelete.json similarity index 94% rename from test/client-side-encryption/spec/findOneAndDelete.json rename to test/client-side-encryption/spec/legacy/findOneAndDelete.json index 2d9f963f23..e418a4581b 100644 --- a/test/client-side-encryption/spec/findOneAndDelete.json +++ b/test/client-side-encryption/spec/legacy/findOneAndDelete.json @@ -148,18 +148,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "admin" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -185,7 +173,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/findOneAndReplace.json b/test/client-side-encryption/spec/legacy/findOneAndReplace.json similarity index 94% rename from test/client-side-encryption/spec/findOneAndReplace.json rename to test/client-side-encryption/spec/legacy/findOneAndReplace.json index 1512fb9552..78baca8432 100644 --- a/test/client-side-encryption/spec/findOneAndReplace.json +++ b/test/client-side-encryption/spec/legacy/findOneAndReplace.json @@ -147,18 +147,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "admin" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -184,7 +172,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/findOneAndUpdate.json b/test/client-side-encryption/spec/legacy/findOneAndUpdate.json similarity index 94% rename from test/client-side-encryption/spec/findOneAndUpdate.json rename to test/client-side-encryption/spec/legacy/findOneAndUpdate.json index a5b41f8455..1d85851151 100644 --- a/test/client-side-encryption/spec/findOneAndUpdate.json +++ b/test/client-side-encryption/spec/legacy/findOneAndUpdate.json @@ -149,18 +149,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "admin" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -186,7 +174,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/legacy/fle2v2-BypassQueryAnalysis.json b/test/client-side-encryption/spec/legacy/fle2v2-BypassQueryAnalysis.json new file mode 100644 index 0000000000..dcc3983ae0 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-BypassQueryAnalysis.json @@ -0,0 +1,262 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "BypassQueryAnalysis decrypts", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "bypassQueryAnalysis": true + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": { + "$binary": { + "base64": "C18BAAAFZAAgAAAAANnt+eLTkv4GdDPl8IAfJOvTzArOgFJQ2S/DcLza4W0DBXMAIAAAAAD2u+omZme3P2gBPehMQyQHQ153tPN1+z7bksYA9jKTpAVwADAAAAAAUnCOQqIvmR65YKyYnsiVfVrg9hwUVO3RhhKExo3RWOzgaS0QdsBL5xKFS0JhZSoWBXUAEAAAAAQSNFZ4EjSYdhI0EjRWeJASEHQAAgAAAAV2AFAAAAAAEjRWeBI0mHYSNBI0VniQEpQbp/ZJpWBKeDtKLiXb0P2E9wvc0g3f373jnYQYlJquOrlPOoEy3ngsHPJuSUijvWDsrQzqYa349K7G/66qaXEFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsBWwAIAAAAACkm0o9bj6j0HuADKc0svbqO2UHj6GrlNdF6yKNxh63xRJrAAAAAAAAAAAAAA==", + "subType": "06" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "_id": 1, + "encryptedIndexed": "123" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$binary": { + "base64": "C18BAAAFZAAgAAAAANnt+eLTkv4GdDPl8IAfJOvTzArOgFJQ2S/DcLza4W0DBXMAIAAAAAD2u+omZme3P2gBPehMQyQHQ153tPN1+z7bksYA9jKTpAVwADAAAAAAUnCOQqIvmR65YKyYnsiVfVrg9hwUVO3RhhKExo3RWOzgaS0QdsBL5xKFS0JhZSoWBXUAEAAAAAQSNFZ4EjSYdhI0EjRWeJASEHQAAgAAAAV2AFAAAAAAEjRWeBI0mHYSNBI0VniQEpQbp/ZJpWBKeDtKLiXb0P2E9wvc0g3f373jnYQYlJquOrlPOoEy3ngsHPJuSUijvWDsrQzqYa349K7G/66qaXEFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsBWwAIAAAAACkm0o9bj6j0HuADKc0svbqO2UHj6GrlNdF6yKNxh63xRJrAAAAAAAAAAAAAA==", + "subType": "06" + } + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "31eCYlbQoVboc5zwC8IoyJVSkag9PxREka8dkmbXJeY=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Compact.json b/test/client-side-encryption/spec/legacy/fle2v2-Compact.json new file mode 100644 index 0000000000..e47c689bf0 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Compact.json @@ -0,0 +1,231 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + }, + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Compact works", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "runCommand", + "object": "database", + "command_name": "compactStructuredEncryptionData", + "arguments": { + "command": { + "compactStructuredEncryptionData": "default" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "compactStructuredEncryptionData": "default", + "compactionTokens": { + "encryptedIndexed": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + }, + "encryptedUnindexed": { + "$binary": { + "base64": "SWO8WEoZ2r2Kx/muQKb7+COizy85nIIUFiHh4K9kcvA=", + "subType": "00" + } + } + } + }, + "command_name": "compactStructuredEncryptionData" + } + } + ] + }, + { + "description": "Compact errors on an unencrypted client", + "operations": [ + { + "name": "runCommand", + "object": "database", + "command_name": "compactStructuredEncryptionData", + "arguments": { + "command": { + "compactStructuredEncryptionData": "default" + } + }, + "result": { + "errorContains": "'compactStructuredEncryptionData.compactionTokens' is missing" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection-OldServer.json b/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection-OldServer.json new file mode 100644 index 0000000000..d5b04b3ea5 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection-OldServer.json @@ -0,0 +1,62 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0", + "maxServerVersion": "6.3.99", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "tests": [ + { + "description": "driver returns an error if creating a QEv2 collection on unsupported server", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + }, + "result": { + "errorContains": "Driver support of Queryable Encryption is incompatible with server. Upgrade server to use Queryable Encryption." + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection.json b/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection.json new file mode 100644 index 0000000000..cc8bd17145 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection.json @@ -0,0 +1,1759 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "tests": [ + { + "description": "state collections and index are created", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "command_name": "createIndexes", + "database_name": "default" + } + } + ] + }, + { + "description": "default state collection names are applied", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "command_name": "createIndexes", + "database_name": "default" + } + } + ] + }, + { + "description": "drop removes all state collections", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + }, + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "command_name": "createIndexes", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + } + ] + }, + { + "description": "CreateCollection without encryptedFields.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "plaintextCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "plaintextCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "plaintextCollection" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "plaintextCollection" + } + }, + "command_name": "listCollections", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "plaintextCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "plaintextCollection" + }, + "command_name": "create", + "database_name": "default" + } + } + ] + }, + { + "description": "CreateCollection from encryptedFieldsMap.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "command_name": "createIndexes", + "database_name": "default" + } + } + ] + }, + { + "description": "CreateCollection from encryptedFields.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "encryptedCollection" + } + }, + "command_name": "listCollections", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "command_name": "createIndexes", + "database_name": "default" + } + } + ] + }, + { + "description": "DropCollection from encryptedFieldsMap", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + } + ] + }, + { + "description": "DropCollection from encryptedFields", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": {} + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + }, + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "encryptedCollection" + } + }, + "command_name": "listCollections", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "command_name": "createIndexes", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + } + ] + }, + { + "description": "DropCollection from remote encryptedFields", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": {} + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + }, + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "encryptedCollection" + } + }, + "command_name": "listCollections", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "command_name": "createIndexes", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "encryptedCollection" + } + }, + "command_name": "listCollections", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + } + ] + }, + { + "description": "encryptedFields are consulted for metadata collection names", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "escCollection": "invalid_esc_name", + "ecocCollection": "invalid_ecoc_name", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + }, + "result": { + "errorContains": "Encrypted State Collection name should follow" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-DecryptExistingData.json b/test/client-side-encryption/spec/legacy/fle2v2-DecryptExistingData.json new file mode 100644 index 0000000000..905d3c9456 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-DecryptExistingData.json @@ -0,0 +1,150 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [ + { + "_id": 1, + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + ], + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 decrypt of existing data succeeds", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "_id": 1, + "encryptedUnindexed": "value123" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Delete.json new file mode 100644 index 0000000000..e4150eab8e --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Delete.json @@ -0,0 +1,285 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Delete can query an FLE2 indexed field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json b/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json new file mode 100644 index 0000000000..b579979e94 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json @@ -0,0 +1,213 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "encryptedFieldsMap is preferred over remote encryptedFields", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.default": { + "fields": [] + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "_id": 1, + "encryptedUnindexed": "value123" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFields-vs-jsonSchema.json b/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFields-vs-jsonSchema.json new file mode 100644 index 0000000000..0a84d73650 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFields-vs-jsonSchema.json @@ -0,0 +1,301 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "json_schema": { + "properties": {}, + "bsonType": "object" + }, + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "encryptedFields is preferred over jsonSchema", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "123" + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedIndexed": "123" + } + }, + "result": [ + { + "_id": 1, + "encryptedIndexed": "123" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPGmZcUzdE/FPILvRSyAScGvZparGI2y9rJ/vSBxgCujBXMAIAAAAACi1RjmndKqgnXy7xb22RzUbnZl1sOZRXPOC0KcJkAxmQVsACAAAAAApJtKPW4+o9B7gAynNLL26jtlB4+hq5TXResijcYet8USY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "31eCYlbQoVboc5zwC8IoyJVSkag9PxREka8dkmbXJeY=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFieldsMap-defaults.json b/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFieldsMap-defaults.json new file mode 100644 index 0000000000..3e0905eadf --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFieldsMap-defaults.json @@ -0,0 +1,106 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "key_vault_data": [], + "tests": [ + { + "description": "default state collections are applied to encryptionInformation", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.default": { + "fields": [] + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "foo": { + "$binary": { + "base64": "BYkAAAAFZAAgAAAAAE8KGPgq7h3n9nH5lfHcia8wtOTLwGkZNLBesb6PULqbBXMAIAAAAACq0558QyD3c3jkR5k0Zc9UpQK8ByhXhtn2d1xVQnuJ3AVjACAAAAAA1003zUWGwD4zVZ0KeihnZOthS3V6CEHUfnJZcIYHefISY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "foo": { + "$binary": { + "base64": "BYkAAAAFZAAgAAAAAE8KGPgq7h3n9nH5lfHcia8wtOTLwGkZNLBesb6PULqbBXMAIAAAAACq0558QyD3c3jkR5k0Zc9UpQK8ByhXhtn2d1xVQnuJ3AVjACAAAAAA1003zUWGwD4zVZ0KeihnZOthS3V6CEHUfnJZcIYHefISY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + ], + "encryptionInformation": { + "type": { + "$numberInt": "1" + }, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [] + } + } + }, + "ordered": true + }, + "command_name": "insert" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "foo": { + "$binary": { + "base64": "BYkAAAAFZAAgAAAAAE8KGPgq7h3n9nH5lfHcia8wtOTLwGkZNLBesb6PULqbBXMAIAAAAACq0558QyD3c3jkR5k0Zc9UpQK8ByhXhtn2d1xVQnuJ3AVjACAAAAAA1003zUWGwD4zVZ0KeihnZOthS3V6CEHUfnJZcIYHefISY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-FindOneAndUpdate.json new file mode 100644 index 0000000000..4606fbb930 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-FindOneAndUpdate.json @@ -0,0 +1,561 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "findOneAndUpdate can query an FLE2 indexed field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + }, + "update": { + "$set": { + "foo": "bar" + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedIndexed": "value123" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "foo": "bar" + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "foo": "bar", + "__safeContent__": [ + { + "$binary": { + "base64": "ThpoKfQ8AkOzkFfNC1+9PF0pY2nIzfXvRdxQgjkNbBw=", + "subType": "00" + } + } + ] + } + ] + } + } + }, + { + "description": "findOneAndUpdate can modify an FLE2 indexed field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + }, + "update": { + "$set": { + "encryptedIndexed": "value456" + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedIndexed": "value123" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "encryptedIndexed": "value456" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedIndexed": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "findAndModify" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": { + "$eq": 1 + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rhe7/w8Ob8Unl44rGr/moScx6m5VODQnscDhF4Nkn6g=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-InsertFind-Indexed.json b/test/client-side-encryption/spec/legacy/fle2v2-InsertFind-Indexed.json new file mode 100644 index 0000000000..c7149d1f5c --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-InsertFind-Indexed.json @@ -0,0 +1,297 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Insert and find FLE2 indexed field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "123" + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedIndexed": "123" + } + }, + "result": [ + { + "_id": 1, + "encryptedIndexed": "123" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPGmZcUzdE/FPILvRSyAScGvZparGI2y9rJ/vSBxgCujBXMAIAAAAACi1RjmndKqgnXy7xb22RzUbnZl1sOZRXPOC0KcJkAxmQVsACAAAAAApJtKPW4+o9B7gAynNLL26jtlB4+hq5TXResijcYet8USY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "31eCYlbQoVboc5zwC8IoyJVSkag9PxREka8dkmbXJeY=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-InsertFind-Unindexed.json b/test/client-side-encryption/spec/legacy/fle2v2-InsertFind-Unindexed.json new file mode 100644 index 0000000000..008b0c959f --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-InsertFind-Unindexed.json @@ -0,0 +1,249 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Insert and find FLE2 unindexed field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedUnindexed": "value123" + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "_id": 1, + "encryptedUnindexed": "value123" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedUnindexed": { + "$$type": "binData" + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": { + "$eq": 1 + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedUnindexed": { + "$$type": "binData" + } + } + ] + } + } + }, + { + "description": "Query with an unindexed field fails", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedUnindexed": "value123" + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedUnindexed": "value123" + } + }, + "result": { + "errorContains": "encrypt" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-MissingKey.json b/test/client-side-encryption/spec/legacy/fle2v2-MissingKey.json new file mode 100644 index 0000000000..a072454112 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-MissingKey.json @@ -0,0 +1,117 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [ + { + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + ], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [], + "tests": [ + { + "description": "FLE2 encrypt fails with missing key", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "123" + } + }, + "result": { + "errorContains": "not all keys requested were satisfied" + } + } + ] + }, + { + "description": "FLE2 decrypt fails with missing key", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": {} + }, + "result": { + "errorContains": "not all keys requested were satisfied" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-NoEncryption.json b/test/client-side-encryption/spec/legacy/fle2v2-NoEncryption.json new file mode 100644 index 0000000000..185691d61c --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-NoEncryption.json @@ -0,0 +1,88 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "key_vault_data": [], + "encrypted_fields": { + "fields": [] + }, + "tests": [ + { + "description": "insert with no encryption succeeds", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "foo": "bar" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "foo": "bar" + } + ], + "ordered": true + }, + "command_name": "insert" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "foo": "bar" + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Aggregate.json new file mode 100644 index 0000000000..dea821bd1e --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Aggregate.json @@ -0,0 +1,509 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Date. Aggregate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "aggregate" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Correctness.json new file mode 100644 index 0000000000..9e4f525877 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Correctness.json @@ -0,0 +1,1840 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Find with $gt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gte": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + "result": [] + } + ] + }, + { + "description": "Find with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$lt": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$lte": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$lt": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "200" + } + } + } + } + }, + "result": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + }, + "$lt": { + "$date": { + "$numberLong": "2" + } + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gte": { + "$date": { + "$numberLong": "0" + } + }, + "$lte": { + "$date": { + "$numberLong": "200" + } + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$in": [ + { + "$date": { + "$numberLong": "0" + } + } + ] + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "-1" + } + } + } + }, + "result": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedDate": { + "$date": { + "$numberLong": "200" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 200, + "encryptedDate": { + "$date": { + "$numberLong": "200" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gte": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "1" + } + } + } + } + } + ] + }, + "result": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$lt": { + "$date": { + "$numberLong": "1" + } + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$lte": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$lt": { + "$date": { + "$numberLong": "0" + } + } + } + } + } + ] + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "200" + } + } + } + } + } + ] + }, + "result": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + }, + "$lt": { + "$date": { + "$numberLong": "2" + } + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gte": { + "$date": { + "$numberLong": "0" + } + }, + "$lte": { + "$date": { + "$numberLong": "200" + } + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$in": [ + { + "$date": { + "$numberLong": "0" + } + } + ] + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$numberDouble": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gte": { + "$numberDouble": "0" + } + } + } + }, + "result": { + "errorContains": "value type is a date" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Delete.json new file mode 100644 index 0000000000..7f4094f50c --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Delete.json @@ -0,0 +1,437 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Date. Delete.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-FindOneAndUpdate.json new file mode 100644 index 0000000000..5ec0601603 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-FindOneAndUpdate.json @@ -0,0 +1,515 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Date. FindOneAndUpdate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "update": { + "$set": { + "encryptedDate": { + "$date": { + "$numberLong": "2" + } + } + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDate": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-InsertFind.json new file mode 100644 index 0000000000..efce1511c0 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-InsertFind.json @@ -0,0 +1,500 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Date. Insert and Find.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Update.json new file mode 100644 index 0000000000..7f9fadcda4 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Update.json @@ -0,0 +1,517 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Date. Update.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "update": { + "$set": { + "encryptedDate": { + "$date": { + "$numberLong": "2" + } + } + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command_name": "update", + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDate": { + "$$type": "binData" + } + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + }, + "$db": "default" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Aggregate.json new file mode 100644 index 0000000000..fb129392b1 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Aggregate.json @@ -0,0 +1,1903 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. Aggregate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$binary": { + "base64": "DeFiAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAAA", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "aggregate" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RGTjNVEsNJb+DG7DpPOam8rQWD5HZAMpRyiTQaw7tk8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I93Md7QNPGmEEGYU1+VVCqBPBEvXdqHPtTJtMOn06Yk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "GecBFQ1PemlECWZWCl7f74vmsL6eB6mzQ9n6tK6FYfs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QpjhZl+O1ORifgtCZuWAdcP6OKL7IZ2cA46v8FJcV28=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RlQWwhU+uVv0a+9IB5cUkEfvHBvOw3B1Sx6WfPWMqes=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubb81XTC7U+4tcNzf1oYvOY6gR5hC2Izqx54f4GuJ0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6M4Q5NMQ9TqNnjzGOxIkiUIY8TEL0I3XD1QnhefQUqU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BtInzk9t2FFMCEY6AQ7zN8jwrrZEs2irSv6q0Q4NaIw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vxXfETu9cuBIpRBo3jUUU04mJIH/aAhLX8K6VI5Xv0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXPCdS+q23zi1bkPnaVG2j0PsVtxdeSLJ//h6J1x8RU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KY3KkfBAsN2l80wbpj41G0gwBR5KmmFnZcagg7D3ENk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI8NFAxXCX4VOnY5X73K6KI/Yspd3aR94KV39MhJlAw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nFxH0UC3mATKA6Vboz+QX/hAjj19kF/SH6H5Cne7qC0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q8hYqIYaIi7nOdG/7qQZYnz8Bsacfi66M1nVku4SH08=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4saA92R4arp4anvD9xFtze+sNcQqTEhPHyl1h70A8NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DbIziOBRRyeQS6RtBR09E37LV+CTKrEjGoRMLSpG6eE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Fv80Plp/7w2gnVqrwawLd6qhJ10G4NCDm3re67cNq4Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "T/T2oiQCBBES4YN7EodzPRdabZSFlYIClHBym+bQUZE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZQgHD3l46Ujqtbnj1VbbeM29C9wJzOhz+yZ/7XdSrxk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ltlFKzWvyZvHxDFOYDd/XXJ6kUiJj0ln2HTCEz2o4Z4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "flW8A7bltC1u8bzx0WJtxosGJdOVsJFfbx33jxnpFGg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SXO+92QbMKwUSG2t27ciunV1c3VvFkUuDmSczpRe008=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+KioGs1GM+xRBzFE67ePTWj04KMSE5/Y6qUF7nJ5kvU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L3xNVbh6YH+RzqABN+5Jgb7T234Efpn766DmUvxIxgg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hPF+60mBYPjh21dEmPlBhKgyc9S2qLtTkypYvnqP2Fc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EletRsETy2HcjaPIm2c8CkT7ch/P3pJJDC8hasepcSU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "r5bMXUaNKqLPxZ+TG9HYTG4aSDgcpim27rN8rQFkM0w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Q7Erdr8+/S0wUEDDIqlS5XjBVWvhZY65K0uUDb6+Ns=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xEcnhXy35hbXNVBPOOt3TUHbxvKfQ48KjA9b6/rbMqQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "T8bEpiQNgsEudXvyKE9SZlSvbpV/LUaslsdqgSFltyo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hIoiaF2YjnxDbODfhFEB+JGZ5nf8suD3Shck5bwQ3N0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qnA6qzejeRJ0rsZaZ0zOvKAaXyxt5lpscKQNYFZNl4k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "anAKCL2DN/le2VaP0n2ucYSEH/DaaEH/8Sa4OqTZsRA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JCZlBJaFm618oWYSnT9Jr1MtwFVw4BZjOzO+5yWgR90=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yxyk4n9762WzcDVGnTn4jCqUnSMIVCrLDIjCX1QVj34=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fDI6fdKvDJwim5/CQwWZEzcrXE3LHgy7FTtffcC7tXE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Vex+gcz5T+WkzsVZQrkqUR2ryyZbnaOGuWpYvjN0zCw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8TLEXz+Gbbp6llHpZXVjLsdlYY9f6hrKpHVpyfDe0RY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7fTyt5BrunypS65TfOzFW2E2qdIuT4SLeDeGlbQoJCs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8fKGrkqN0/KuSjyXgDBmRauDKrSa//JBKRWHEB9xBf4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s4codmG7uN4ss6P357jL21lazEe90M9GOK5WrOknSV0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RkSpua8XF+NUdxVDU90EbLUTTyZFX3tt3atBTroFaRk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LnTCuCDyAHK5B9KXzjtwGmWB+qergQk2OCjnIx9MI2A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cBFh0virAX4pVXf/udIGI2951i0+0aZAdJcBVGtYnT4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "G54X6myQXWZ5fw/G31en3QbdgfXzL9+hFTtJpnWMqDI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EdsiiuezcsFJFnYIyGjCOhnqMj1BOwTB5EFxN+ERUkg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dVH9MXLtk0WTwGQ3xmrhOqfropMUkDW3o6paNPGl3NU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sB3HqXKWY3pKbuEH8BTbfNIGfbY+7/ZbOc3XC+JRNNI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WHyDk62Xhqbo4/iie2aLIM4x2uuAjv6102dJSHI58oM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pNUFuHpeNRDUZ/NrtII2c6sNc9eGR1lIUlIyXKERA+0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UPa+pdCqnN0bfAptdzldQOSd01gidrDKy8KhWrpSKAI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "l+7dOAlo+HUffMqFYXL6pgUFeTbwOM9CjKQLxEoLtc4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SRnDXV/rN6C8xwMutv9E1luv3DOUio3VkgPr8Cpm7Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QcH6gl+gX7xZ7OWhUNQMbndJy0Piz49pDo6RsnLkVSA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "t+uL4DnfsI/Zll/KXWW1cOKX3Hu8WIkm3pt9efCVSAQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "myutHDctku/+Uug/nD8gRbYvmx/IovtoAAC2/fz2oHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6C+cjD0e0nSCP6cPqQYbNG7SlOd6Mfvi8hyfm7Ng+D8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zg01JSoOj9oBKT0S1ldJucXzY5AKgreS+h2xJreWTOs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7qQ80/FjodHl1m1py/Oii0/9C/xWbLdhaRXQ+kkCP10=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YwWMNH07vL6c5Nhg+MRnVByhzUunu8y0VLM9z/XvR5U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dle8bU98+fudAbc14SToZFkwvV3tcYVsjDug0NWljpc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "J+eKL1vPJmlzltvhI6Li5Fz/TJmi3Ng+ehRTcs46API=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB3XzfFygLwC3WHkj0up+VbEd25KKoce1vOpG/5bwK4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vnVnmOnL+z2pqwE+A6cVKS0Iwy4F4/2IiElJca9bUQM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+lG5r/Fpqry3BtFuvY67+RntmHAMDoLVOSGc6ZoXPb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L5MXQertqc6uj7ADe8aWKbd1sYHPCE7P1VYVg9Zc3VI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "imKONuZgopt0bhM3GMX2WVPwQYMTobuUUEdhcLfHs4c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eOkU1J1uVbiVFWBerbXsSIVcF2nqiicTkFy4x7kFHB8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gI0uDhXeoH/UatDQKEf4qo8FHzWZDhb/wuWTqbq/ID4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cOkd5Aa3btYhtojE/smsF/PJnULqQ4NNqTkU6KXTFmo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AWNJMs1MTe294oFipp8Y6P0CjpkZ4qCZoClQF3XcHq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6gJtlzXOFhGYrVbTuRMmvMlDTwXdNtR9aGBlHZPwIMw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LEmwVGA/xsEG7UrcOoYLFu6KCXgijzFznenknuDacm8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mIRFPTXRrGaPtp/Ydij2jgkRe4uoUvAKxW2d8b9zYL0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "B+Uv2u48WALOO0L311z+eryjYQzKJVMfdHMZPhOAFmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "INXXp0wDyVCq+NtfIrrC2ciETmyW/dWB/48/u4yLEZ4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "se7DGo8XrlrQDLEcco1tZrQt9kDe+0RTyl2bw/quG4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vr0m2+Zk9lbN6UgWCyn8xJWJOokU3IDYab5U5q1+CgQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XI+eJ8Gy2JktG1gICgoj1qpsfy1tKmH0kglWbaQH6DA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A+UCuNnuAUqnQzspA6TVqUPRmtZmpSex5HFw7THRxs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xaH2Ehfljd19uo0Fvb3iwkdaiWEVQd2YPoitgEPkhSM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S/iZBJGcc8+qZxyMtab65MMBoSglybwk3x58Nb86gnY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "w14ZE5qqY5YgkS4Zcs9YNbrQbY1XfGOOHNn9bOYnFVQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0MhGd/jEF1vjkKGp+ZMn9SjLK54jkp9W4Hg+Sp/oxaI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "92QZ73e/NRTYgCm4aifaKth6aAsKnLLccBc0zx/qUTY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WOjzemCgFJOiGIp81RSVh/tFlzSTj9eFWcBnsiv2Ycs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DrsP9CmfKPjw5yLL8bnSeAxfNzAwlb+Z8OqCiKgBY7o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lMogqg8veBv6mri3/drMe9afJiKMvevkmGcw9BedfLo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TxqwNcY8Tg2MPpNdkPBwvfpuTttSYRHU26DGECKYQ9o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "l0u1b4b4vYACWIwfnB7PZac4oDEgjQZCzHruNPTgAIY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "iVSGQ+cCfhbWIrY/v/WBORK92elu9gfRKyGhr6r/k00=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yK1forG50diEXte8ECzjfpHeYsPyuQ/dgxbxn/nzY5k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gIfTLCD3VwnOwkC0zPXWTqaITxX6ZplA69PO2a6zolc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "O/Zxlgh3WqpzJ7+Sd8XWMVID4/GXJUUWaSqfgDUi3b0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZQ6yv368zwahUqSUYH/StL0Qgz/TwS1CzlMjVDvCciI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m2rPEYkjwyiKdonMrKlcF7hya4lFOAUwEePJ3SgrNx8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mq0yl5iVKlq71bT/dT/fXOWf2n90bTnXFnOdGDN0JOc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6qDGMXipPLC2O6EAAMjO2F9xx4rdqZso4IkPpH2304U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jvQHRQQa2RIszE2LX2Hv2LbRhYawJ6qmtRt8HZzFQXg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ovJXQrkZlpeHRciKyE/WWNm5O389gRgzx1W+Dw596X4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "a4kgRNvYctGYqyQv9qScL/WkljTYVylJ9pE9KDULlxU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qV4Q48vPiCJMTjljotzYKI/zfExWpkKOSHGcAjGyDig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jtI7zbBF+QW/aYYTkn90zzyHLXLgmy7l1bzgMb2oqic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q0KmJl9txPdn962UNvnfe6UFhdk9YaFZuTm33F+csso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ULNdEqeZJgtmNOhN/Y9INzsE9AnxWYwOMn+pIbRXIFs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "R4oz9+wkdjpKe5tE1jpG7IURAnfvS5fLP4LrD5cZfTE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qG5Z7VhwSu/HT/YFTgDzyAAzJKq51xPw2HeEV5btYC4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OM/1DmIIZ5Qyhtq8TGkHTBEMVKjAnKRZMRXYtTG8ctc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2R5vZbljLXnDFA99YfGuRB7pAdPJVKsT25zLNMC0fUk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OMbavF2EmdAz1fHkLV3ctFEUDfriKhoT2gidwHZ9z1o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MWT4Zrw3/vVvTYMa1Is5Pjr3wEwnBfnEAPPUAHKQhNU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tBkRPfG9yxfKocQx5pAJX0oEHKPL0Tgtr+0UYe09InE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lqxpnDR/H0YgH7RcfKoNoaaRhe1SIazIeMbQ1fu9y3Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "utT1UdR22PWOTrOkZauztX613lAplV4eh/ejTRb7ZSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S+Y2yFyKi/a6FXhih4yGo29X8I8OT6/zwEoX6NMKT4o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QSjVppg29x6oS5yBg8OFjrFt0tuTpWCuKxfIy0k8YnE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y3r6/Xsfvsl3HksXlVYkJgHUqpQGfICxg3x9f8Zw1qM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BSltHzEwDjFN4du9rDHAPvl22atlcTioEtt+gC5L1tk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0arGXjSN0006UnXbrWsGqhvBair569DeFDUME3Df3rA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s/DumaMad08S+PBUUcrS+v42K0z8HgcdiQtrFAEu2Qs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EzJ8Y8N0OQBTlnvrK82PdevDNZZO4E6CNgYVu8Cj6Ks=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VA4vr8jBPI5QdiPrULzzZjBMIUbG3V7Slg5zm0bFcKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YAOvEB2ZLtq9LQiFViBHWaxxWVVonC2rNYj9tN9s3L0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hgaHMo9aAGS+nBwvqnTjZO+YkiQPY1c1XcIYeaYKHyI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YvaoLt3ZpH0atB0tNzwMjpoxRYJXl0DqSjisMJiGVBE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EMmW6CptFsiLoPOi5/uAJQ2FmeLg6mCpuVLLrRWk7Mc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1jQsNMarSnarlYmXEuoFokeBMg/090qUD9wqo1Zn8Gs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hupXNKhRpJxpyDAAP1TgJ5JMZh9lhbMk6s7D7dMS3C8=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Correctness.json new file mode 100644 index 0000000000..5120aecb7a --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Correctness.json @@ -0,0 +1,1156 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Find with $gt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gte": { + "$numberDecimal": "0.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "1.0" + } + } + } + }, + "result": [] + } + ] + }, + { + "description": "Find with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$lt": { + "$numberDecimal": "1.0" + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$lte": { + "$numberDecimal": "1.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0.0" + }, + "$lt": { + "$numberDecimal": "2.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$in": [ + { + "$numberDecimal": "0.0" + } + ] + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$gte": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "1.0" + } + } + } + } + ] + }, + "result": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$lt": { + "$numberDecimal": "1.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$lte": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0.0" + }, + "$lt": { + "$numberDecimal": "2.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$in": [ + { + "$numberDecimal": "0.0" + } + ] + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberInt": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Delete.json new file mode 100644 index 0000000000..de81159b43 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Delete.json @@ -0,0 +1,1111 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. Delete.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$binary": { + "base64": "DeFiAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAAA", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-FindOneAndUpdate.json new file mode 100644 index 0000000000..36cf91c88c --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-FindOneAndUpdate.json @@ -0,0 +1,1907 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. FindOneAndUpdate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + }, + "update": { + "$set": { + "encryptedDecimalNoPrecision": { + "$numberDecimal": "2" + } + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$binary": { + "base64": "DeFiAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAAA", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mr/laWHUijZT5VT3x2a7crb7wgd/UXOGz8jr8BVqBpM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXVD/HSbBljko0jJcaxJ1nrzs2+pchLQqYR3vywS8SU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VDCpBYsJIxTfcI6Zgf7FTmKMxUffQv+Ys8zt5dlK76I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zYDslUwOUVNwTYkETfjceH/PU3bac9X3UuQyYJ19qK0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rAOmHSz18Jx107xpbv9fYcPOmh/KPAqge0PAtuhIRnc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BFOB1OGVUen7VsOuS0g8Ti7oDsTt2Yj/k/7ta8YAdGM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2fckE5SPs0GU+akDkUEM6mm0EtcV3WDE/sQsnTtodlk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mi9+aNjuwIvaMpSHENvKzKRAmX9cYguo2mXLvOoftHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "K6TWn4VcWWkz/gkUkLmbtwkG7SNeABICmLDnoYJFlLU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z+2/cEtGU0Fq7QJFNGA/0y4aWAsw0ncG6X0LYRqwS3c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rrSIf+lgcNZFbbUkS9BmE045jRWBpcBJXHzfMVEFuzE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KlHL3Kyje1/LMIfgbCqw1SolxffJvvgsYBV5y77wxuA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hzJ1YBoETmYeCh352dBmG8d8Wse/bUcqojTWpWQlgsc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lSdcllDXx8MA+s0GULjDA1lQkcV0L8/aHtZ6dM2pZ2c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "HGr7JLTTA7ksAnlmjSIwwdBVvgr3fv46/FTdiCPYpos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mMr25v1VwOEVZ8xaNUTHJCcsYqV+kwK6RzGYilxPtJ4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "129hJbziPJzNo0IoTU3bECdge0FtaPW8dm4dyNVNwYU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "doiLJ96qoo+v7NqIAZLq6BI5axV8Id8gT5vyJ1ZZ0PM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cW/Lcul3xYmfyvI/0x/+ybN78aQmBK1XIGs1EEU09N8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1aVIwzu9N5EJV9yEES+/g6hOTH7cA2NTcLIc59cu0wU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kw5tyl7Ew0r1wFyrN1mB9FiVW2hK2BxxxUuJDNWjyjQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ADAY2YBrm6RJBDY/eLLcfNxmSJku+mefz74gH66oyco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8gkqB1LojzPrstpFG7RHYmWxXpIlPDTqWnNsXH7XDRU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TESfVQMDQjfTZmHmUeYUE2XrokJ6CcrsKx/GmypGjOw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qFM+HFVQ539S0Ouynd1fBHoemFxtU9PRxE5+Dq7Ljy4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jPiFgUZteSmOg4wf3bsEKCZzcnxmMoILsgp/GaZD+dM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YaWUgJhYgPNN7TkFK16H8SsQS226JguaVhOIQxZwQNQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x90/Qk3AgyaFsvWf2KUCu5XF3j76WFSjt/GrnG01060=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZGWybWL/xlEdMYRFCZDUoz10sywTf7U/7wufsb78lH0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8l4ganN66jIcdxfHAdYLaym/mdzUUQ8TViw3MDRySPc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c8p5XEGTqxqvRGVlR+nkxw9uUdoqDqTB0jlYQ361qMA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1ZGFLlpQBcU3zIUg8MmgWwFKVz/SaA7eSYFrfe3Hb70=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "34529174M77rHr3Ftn9r8jU4a5ztYtyVhMn1wryZSkU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YkQ4pxFWzc49MS0vZM6S8mNo4wAwo21rePBeF3C+9mI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MhOf4mYY00KKVhptOcXf0bXB7WfuuM801MRJg4vXPgc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7pbbD8ihNIYIBJ3tAUPGzHpFPpIeCTAk5L88qCB0/9w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C9Q5PoNJTQo6pmNzXEEXUEqH22//UUWY1gqILcIywec=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AqGVk1QjDNDLYWGRBX/nv9QdGR2SEgXZEhF0EWBAiSE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/sGI3VCbJUKATULJmhTayPOeVW+5MjWSvVCqS77sRbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yOtbL0ih7gsuoxVtRrACMz+4N5uo7jIR7zzmtih2Beo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uA6dkb2Iyg9Su8UNDvZzkPx33kPZtWr/CCuEY+XgzUM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1DoSFPdHIplqZk+DyWAmEPckWwXw/GdB25NLmzeEZhk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OfDVS0T3ZuIXI/LNbTp6C9UbPIWLKiMy6Wx+9tqNl+g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3PZjHXbmG6GtPz+iapKtQ3yY4PoFFgjIy+fV2xQv1YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kaoLN0BoBWsmqE7kKkJQejATmLShd8qffcAmlhsxsGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vpiw9KgQdegGmp7IJnSGX2miujRLU0xzs0ITTqbPW7c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NuXFf7xGUefYjIUTuMxNUTCfVHrF8oL0AT7dPv5Plk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8Tz53LxtfEBJ9eR+d2690kwNsqPV6XyKo2PlqZCbUrc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "e6zsOmHSyV8tyQtSX6BSwui6wK9v1xG3giY/IILJQ2w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2fedFMCxa2DzmIpfbDKGXhQg0PPwbUv6vIWdwwlvhms=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yEJKMFnWXTC8tJUfzCInzQRByNEPjHxpw4L4m8No91Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YbFuWwOiFuQyOzIJXDbOkCWC2DyrG+248TBuVCa1pXU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "w7IkwGdrguwDrar5+w0Z3va5wXyZ4VXJkDMISyRjPGo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YmJUoILTRJPhyIyWyXJTsQ6KSZHHbEpwPVup6Ldm/Ko=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FvMjcwVZJmfh6FP/yBg2wgskK+KHD8YVUY6WtrE8xbg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "h4HCtD4HyYz0nci49IVAa10Z4NJD/FHnRMV4sRX6qro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nC7BpXCmym+a0Is2kReM9cYN2M1Eh5rVo8fjms14Oiw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1qtVWaeVo649ZZZtN8gXbwLgMWGLhz8beODbvru0I7Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Ej+mC0QFyMNIiSjR939S+iGBm7dm+1xObu5IcF/OpbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UQ8LbUG3cMegbr9yKfKanAPQE1EfPkFciVDrNqZ5GHY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4iI3mXIDjnX+ralk1HhJY43mZx2uTJM7hsv9MQzTX7E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0WQCcs3rvsasgohERHHCaBM4Iy6yomS4qJ5To3/yYiw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qDCTVPoue1/DOAGNAlUstdA9Sid8MgEY4e5EzHcVHRk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9F9Mus0UnlzHb8E8ImxgXtz6SU98YXD0JqswOKw/Bzs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pctHpHKVBBcsahQ6TNh6/1V1ZrqOtKSAPtATV6BJqh0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vfR3C/4cPkVdxtNaqtF/v635ONbhTf5WbwJM6s4EXNE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ejP43xUBIex6szDcqExAFpx1IE/Ksi5ywJ84GKDFRrs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jbP4AWYd3S2f3ejmMG7dS5IbrFol48UUoT+ve3JLN6U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CiDifI7958sUjNqJUBQULeyF7x0Up3loPWvYKw9uAuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "e2dQFsiHqd2BFHNhlSxocjd+cPs4wkcUW/CnCz4KNuM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PJFckVmzBipqaEqsuP2mkjhJE4qhw36NhfQ9DcOHyEU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S3MeuJhET/B8VcfZYDR9fvX0nscDj416jdDekhmK11s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CGVHZRXpuNtQviDB2Kj03Q8uvs4w3RwTgV847R7GwPw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yUGgmgyLrxbEpDVy89XN3c2cmFpZXWWmuJ/35zVZ+Jw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "inb6Q97mL1a9onfNTT8v9wsoi/fz7KXKq3p8j90AU9c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CCyYx/4npq9xGO1lsCo8ZJhFO9/tN7DB+/DTE778rYg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LNnYw4fwbiAZu0kBdAHPEm/OFnreS+oArdB5O/l/I98=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "P006SxmUS/RjiQJVYPdMFnNo3827GIEmSzagggkg05Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oyvwY+WsnYV6UHuPki1o0ILJ2jN4uyXf9yaUNtZJyBA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "36Lk3RHWh1wmtCWC/Yj6jNIo17U5y6SofAgQjzjVxD8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vOOo8FqeHnuO9mqOYjIb4vgwIwVyXZ5Y+bY5d9tGFUM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bJiDJjwQRNxqxlGjRm5lLziFhcfTDCnQ/qU1V85qcRg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2Qgrm1n0wUELAQnpkEiIHB856yv76q8jLbpiucetcm0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5ciPOYxTK0WDwwYyfs7yiVymwtYQXDELLxmM4JLl4/o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "31dC2WUSIOKQc4jwT6PikfeYTwi80mTlh7P31T5KNQU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YluTV2Mu53EGCKLcWfHZb0BM/IPW2xJdG3vYlDMEsM4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dh/8lGo2Ek6KukSwutH6Q35iy8TgV0FN0SJqe0ZVHN8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EVw6HpIs3BKen2qY2gz4y5dw1JpXilfh07msZfQqJpc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FYolLla9L8EZMROEdWetozroU40Dnmwwx2jIMrr7c1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8M6k4QIutSIj6CM41vvkQtuFsaGrjoR9SZJVSLbfGKQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9LM0VoddDNHway442MqY+Z7vohB2UHau/cddshhzf40=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "66i8Ytco4Yq/FMl6pIRZazz3CZlu8fO2OI6Pne0pvHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2a/HgX+MjZxjXtSvHgF1yEpHMJBkl8Caee8XrJtn0WM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "frhBM662c4ZVG7mWP8K/HhRjd01lydW/cPcHnDjifqc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6k1T7Q1t668PBqv6fwpVnT1HWh7Am5LtbKvwPJKcpGU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UlJ5Edfusp8S/Pyhw6KTglIejmbr1HO0zUeHn/qFETA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jsxsB+1ECB3assUdoC333do9tYH+LglHmVSJHy4N8Hg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2nzIQxGYF7j3bGsIesECEOqhObKs/9ywknPHeJ3yges=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xJYKtuWrX90JrJVoYtnwP7Ce59XQGFYoalxpNfBXEH0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NLI5lriBTleGCELcHBtNnmnvwSRkHHaLOX4cKboMgTw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hUOQV0RmE5aJdJww1AR9rirJG4zOYPo+6cCkgn/BGvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "h4G2Of76AgxcUziBwCyH+ayMOpdBWzg4yFrTfehSC2c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VuamM75RzGfQpj2/Y1jSVuQLrhy6OAwlZxjuQLB/9Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kn9+hLq7hvw02xr9vrplOCDXKBTuFhfbX7d5v/l85Pg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fAiGqKyLZpGngBYFbtYUYt8LUrJ49vYafiboifTDjxs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BxRILymgfVJCczqjUIWXcfrfSgrrYkxTM5VTg0HkZLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CrFY/PzfPU2zsFkGLu/dI6mEeizZzCR+uYgjZBAHro0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AEbrIuwvXLTtYgMjOqnGQ8y8axUn5Ukrn7UZRSyfQVw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ouWeVH3PEFg+dKWlXc6BmqirJOaVWjJbMzZbCsce4dA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+hd6xFB+EG+kVP7WH4uMd1CLaWMnt5xJRaY/Guuga9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zmpGalfAOL3gmcUMJYcLYIRT/2VDO/1Dw4KdYZoNcng=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2PbHAoM/46J2UIZ/vyksKzmVVfxA7YUyIxWeL/N/vBk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7fD9x+zk5MVFesb59Klqiwwmve7P5ON/5COURXj5smE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tlrNQ4jaq051iaWonuv1sSrYhKkL1LtNZuHsvATha3s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fBodm28iClNpvlRyVq0dOdXQ08S7/N3aDwid+PdWvRo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "O+/nnRqT3Zv7yMMGug8GhKHaWy6u7BfRGtZoj0sdN1c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5AZZ/RTMY4Photnm/cpXZr/HnFRi3eljacMsipkJLHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oFVyo/kgoMxBIk2VE52ySSimeyU+Gr0EfCwapXnTpKA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z8v59DfcnviA0mzvnUk+URVO0UuqAWvtarEgJva/n1c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "P64GOntZ+zBJEHkigoh9FSxSO+rJTqR20z5aiGQ9an4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xMbSuDPfWuO/Dm7wuVl06GnzG9uzTlJJX9vFy7boGlY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kXPB19mRClxdH2UsHwlttS6lLU2uHvzuZgZz7kC45jU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NDVjVYXAw4k0w4tFzvs7QDq39aaU3HQor4I2XMKKnCk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uKw/+ErVfpTO1dGUfd3T/eWfZW3nUxXCdBGdjvHtZ88=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "av0uxEzWkizYWm0QUM/MN1hLibnxPvCWJKwjOV4yVQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ERwUC47dvgOBzIsEESMIioLYbFOxOe8PtJTnmDkKuHM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2gseKlG5Le12fS/vj4eaED4lturF16kAgJ1TpW3HxEE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7Cvg0Y3j/5i2F1TeXxlMmU7xwif5dCmwkZAOrVC5K2Y=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-InsertFind.json new file mode 100644 index 0000000000..6b5a642aa8 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-InsertFind.json @@ -0,0 +1,1894 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. Insert and Find.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$binary": { + "base64": "DeFiAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAAA", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RGTjNVEsNJb+DG7DpPOam8rQWD5HZAMpRyiTQaw7tk8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I93Md7QNPGmEEGYU1+VVCqBPBEvXdqHPtTJtMOn06Yk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "GecBFQ1PemlECWZWCl7f74vmsL6eB6mzQ9n6tK6FYfs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QpjhZl+O1ORifgtCZuWAdcP6OKL7IZ2cA46v8FJcV28=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RlQWwhU+uVv0a+9IB5cUkEfvHBvOw3B1Sx6WfPWMqes=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubb81XTC7U+4tcNzf1oYvOY6gR5hC2Izqx54f4GuJ0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6M4Q5NMQ9TqNnjzGOxIkiUIY8TEL0I3XD1QnhefQUqU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BtInzk9t2FFMCEY6AQ7zN8jwrrZEs2irSv6q0Q4NaIw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vxXfETu9cuBIpRBo3jUUU04mJIH/aAhLX8K6VI5Xv0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXPCdS+q23zi1bkPnaVG2j0PsVtxdeSLJ//h6J1x8RU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KY3KkfBAsN2l80wbpj41G0gwBR5KmmFnZcagg7D3ENk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI8NFAxXCX4VOnY5X73K6KI/Yspd3aR94KV39MhJlAw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nFxH0UC3mATKA6Vboz+QX/hAjj19kF/SH6H5Cne7qC0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q8hYqIYaIi7nOdG/7qQZYnz8Bsacfi66M1nVku4SH08=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4saA92R4arp4anvD9xFtze+sNcQqTEhPHyl1h70A8NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DbIziOBRRyeQS6RtBR09E37LV+CTKrEjGoRMLSpG6eE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Fv80Plp/7w2gnVqrwawLd6qhJ10G4NCDm3re67cNq4Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "T/T2oiQCBBES4YN7EodzPRdabZSFlYIClHBym+bQUZE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZQgHD3l46Ujqtbnj1VbbeM29C9wJzOhz+yZ/7XdSrxk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ltlFKzWvyZvHxDFOYDd/XXJ6kUiJj0ln2HTCEz2o4Z4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "flW8A7bltC1u8bzx0WJtxosGJdOVsJFfbx33jxnpFGg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SXO+92QbMKwUSG2t27ciunV1c3VvFkUuDmSczpRe008=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+KioGs1GM+xRBzFE67ePTWj04KMSE5/Y6qUF7nJ5kvU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L3xNVbh6YH+RzqABN+5Jgb7T234Efpn766DmUvxIxgg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hPF+60mBYPjh21dEmPlBhKgyc9S2qLtTkypYvnqP2Fc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EletRsETy2HcjaPIm2c8CkT7ch/P3pJJDC8hasepcSU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "r5bMXUaNKqLPxZ+TG9HYTG4aSDgcpim27rN8rQFkM0w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Q7Erdr8+/S0wUEDDIqlS5XjBVWvhZY65K0uUDb6+Ns=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xEcnhXy35hbXNVBPOOt3TUHbxvKfQ48KjA9b6/rbMqQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "T8bEpiQNgsEudXvyKE9SZlSvbpV/LUaslsdqgSFltyo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hIoiaF2YjnxDbODfhFEB+JGZ5nf8suD3Shck5bwQ3N0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qnA6qzejeRJ0rsZaZ0zOvKAaXyxt5lpscKQNYFZNl4k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "anAKCL2DN/le2VaP0n2ucYSEH/DaaEH/8Sa4OqTZsRA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JCZlBJaFm618oWYSnT9Jr1MtwFVw4BZjOzO+5yWgR90=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yxyk4n9762WzcDVGnTn4jCqUnSMIVCrLDIjCX1QVj34=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fDI6fdKvDJwim5/CQwWZEzcrXE3LHgy7FTtffcC7tXE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Vex+gcz5T+WkzsVZQrkqUR2ryyZbnaOGuWpYvjN0zCw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8TLEXz+Gbbp6llHpZXVjLsdlYY9f6hrKpHVpyfDe0RY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7fTyt5BrunypS65TfOzFW2E2qdIuT4SLeDeGlbQoJCs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8fKGrkqN0/KuSjyXgDBmRauDKrSa//JBKRWHEB9xBf4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s4codmG7uN4ss6P357jL21lazEe90M9GOK5WrOknSV0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RkSpua8XF+NUdxVDU90EbLUTTyZFX3tt3atBTroFaRk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LnTCuCDyAHK5B9KXzjtwGmWB+qergQk2OCjnIx9MI2A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cBFh0virAX4pVXf/udIGI2951i0+0aZAdJcBVGtYnT4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "G54X6myQXWZ5fw/G31en3QbdgfXzL9+hFTtJpnWMqDI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EdsiiuezcsFJFnYIyGjCOhnqMj1BOwTB5EFxN+ERUkg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dVH9MXLtk0WTwGQ3xmrhOqfropMUkDW3o6paNPGl3NU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sB3HqXKWY3pKbuEH8BTbfNIGfbY+7/ZbOc3XC+JRNNI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WHyDk62Xhqbo4/iie2aLIM4x2uuAjv6102dJSHI58oM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pNUFuHpeNRDUZ/NrtII2c6sNc9eGR1lIUlIyXKERA+0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UPa+pdCqnN0bfAptdzldQOSd01gidrDKy8KhWrpSKAI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "l+7dOAlo+HUffMqFYXL6pgUFeTbwOM9CjKQLxEoLtc4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SRnDXV/rN6C8xwMutv9E1luv3DOUio3VkgPr8Cpm7Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QcH6gl+gX7xZ7OWhUNQMbndJy0Piz49pDo6RsnLkVSA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "t+uL4DnfsI/Zll/KXWW1cOKX3Hu8WIkm3pt9efCVSAQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "myutHDctku/+Uug/nD8gRbYvmx/IovtoAAC2/fz2oHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6C+cjD0e0nSCP6cPqQYbNG7SlOd6Mfvi8hyfm7Ng+D8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zg01JSoOj9oBKT0S1ldJucXzY5AKgreS+h2xJreWTOs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7qQ80/FjodHl1m1py/Oii0/9C/xWbLdhaRXQ+kkCP10=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YwWMNH07vL6c5Nhg+MRnVByhzUunu8y0VLM9z/XvR5U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dle8bU98+fudAbc14SToZFkwvV3tcYVsjDug0NWljpc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "J+eKL1vPJmlzltvhI6Li5Fz/TJmi3Ng+ehRTcs46API=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB3XzfFygLwC3WHkj0up+VbEd25KKoce1vOpG/5bwK4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vnVnmOnL+z2pqwE+A6cVKS0Iwy4F4/2IiElJca9bUQM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+lG5r/Fpqry3BtFuvY67+RntmHAMDoLVOSGc6ZoXPb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L5MXQertqc6uj7ADe8aWKbd1sYHPCE7P1VYVg9Zc3VI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "imKONuZgopt0bhM3GMX2WVPwQYMTobuUUEdhcLfHs4c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eOkU1J1uVbiVFWBerbXsSIVcF2nqiicTkFy4x7kFHB8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gI0uDhXeoH/UatDQKEf4qo8FHzWZDhb/wuWTqbq/ID4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cOkd5Aa3btYhtojE/smsF/PJnULqQ4NNqTkU6KXTFmo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AWNJMs1MTe294oFipp8Y6P0CjpkZ4qCZoClQF3XcHq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6gJtlzXOFhGYrVbTuRMmvMlDTwXdNtR9aGBlHZPwIMw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LEmwVGA/xsEG7UrcOoYLFu6KCXgijzFznenknuDacm8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mIRFPTXRrGaPtp/Ydij2jgkRe4uoUvAKxW2d8b9zYL0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "B+Uv2u48WALOO0L311z+eryjYQzKJVMfdHMZPhOAFmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "INXXp0wDyVCq+NtfIrrC2ciETmyW/dWB/48/u4yLEZ4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "se7DGo8XrlrQDLEcco1tZrQt9kDe+0RTyl2bw/quG4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vr0m2+Zk9lbN6UgWCyn8xJWJOokU3IDYab5U5q1+CgQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XI+eJ8Gy2JktG1gICgoj1qpsfy1tKmH0kglWbaQH6DA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A+UCuNnuAUqnQzspA6TVqUPRmtZmpSex5HFw7THRxs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xaH2Ehfljd19uo0Fvb3iwkdaiWEVQd2YPoitgEPkhSM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S/iZBJGcc8+qZxyMtab65MMBoSglybwk3x58Nb86gnY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "w14ZE5qqY5YgkS4Zcs9YNbrQbY1XfGOOHNn9bOYnFVQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0MhGd/jEF1vjkKGp+ZMn9SjLK54jkp9W4Hg+Sp/oxaI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "92QZ73e/NRTYgCm4aifaKth6aAsKnLLccBc0zx/qUTY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WOjzemCgFJOiGIp81RSVh/tFlzSTj9eFWcBnsiv2Ycs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DrsP9CmfKPjw5yLL8bnSeAxfNzAwlb+Z8OqCiKgBY7o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lMogqg8veBv6mri3/drMe9afJiKMvevkmGcw9BedfLo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TxqwNcY8Tg2MPpNdkPBwvfpuTttSYRHU26DGECKYQ9o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "l0u1b4b4vYACWIwfnB7PZac4oDEgjQZCzHruNPTgAIY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "iVSGQ+cCfhbWIrY/v/WBORK92elu9gfRKyGhr6r/k00=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yK1forG50diEXte8ECzjfpHeYsPyuQ/dgxbxn/nzY5k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gIfTLCD3VwnOwkC0zPXWTqaITxX6ZplA69PO2a6zolc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "O/Zxlgh3WqpzJ7+Sd8XWMVID4/GXJUUWaSqfgDUi3b0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZQ6yv368zwahUqSUYH/StL0Qgz/TwS1CzlMjVDvCciI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m2rPEYkjwyiKdonMrKlcF7hya4lFOAUwEePJ3SgrNx8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mq0yl5iVKlq71bT/dT/fXOWf2n90bTnXFnOdGDN0JOc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6qDGMXipPLC2O6EAAMjO2F9xx4rdqZso4IkPpH2304U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jvQHRQQa2RIszE2LX2Hv2LbRhYawJ6qmtRt8HZzFQXg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ovJXQrkZlpeHRciKyE/WWNm5O389gRgzx1W+Dw596X4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "a4kgRNvYctGYqyQv9qScL/WkljTYVylJ9pE9KDULlxU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qV4Q48vPiCJMTjljotzYKI/zfExWpkKOSHGcAjGyDig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jtI7zbBF+QW/aYYTkn90zzyHLXLgmy7l1bzgMb2oqic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q0KmJl9txPdn962UNvnfe6UFhdk9YaFZuTm33F+csso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ULNdEqeZJgtmNOhN/Y9INzsE9AnxWYwOMn+pIbRXIFs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "R4oz9+wkdjpKe5tE1jpG7IURAnfvS5fLP4LrD5cZfTE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qG5Z7VhwSu/HT/YFTgDzyAAzJKq51xPw2HeEV5btYC4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OM/1DmIIZ5Qyhtq8TGkHTBEMVKjAnKRZMRXYtTG8ctc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2R5vZbljLXnDFA99YfGuRB7pAdPJVKsT25zLNMC0fUk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OMbavF2EmdAz1fHkLV3ctFEUDfriKhoT2gidwHZ9z1o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MWT4Zrw3/vVvTYMa1Is5Pjr3wEwnBfnEAPPUAHKQhNU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tBkRPfG9yxfKocQx5pAJX0oEHKPL0Tgtr+0UYe09InE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lqxpnDR/H0YgH7RcfKoNoaaRhe1SIazIeMbQ1fu9y3Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "utT1UdR22PWOTrOkZauztX613lAplV4eh/ejTRb7ZSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S+Y2yFyKi/a6FXhih4yGo29X8I8OT6/zwEoX6NMKT4o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QSjVppg29x6oS5yBg8OFjrFt0tuTpWCuKxfIy0k8YnE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y3r6/Xsfvsl3HksXlVYkJgHUqpQGfICxg3x9f8Zw1qM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BSltHzEwDjFN4du9rDHAPvl22atlcTioEtt+gC5L1tk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0arGXjSN0006UnXbrWsGqhvBair569DeFDUME3Df3rA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s/DumaMad08S+PBUUcrS+v42K0z8HgcdiQtrFAEu2Qs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EzJ8Y8N0OQBTlnvrK82PdevDNZZO4E6CNgYVu8Cj6Ks=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VA4vr8jBPI5QdiPrULzzZjBMIUbG3V7Slg5zm0bFcKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YAOvEB2ZLtq9LQiFViBHWaxxWVVonC2rNYj9tN9s3L0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hgaHMo9aAGS+nBwvqnTjZO+YkiQPY1c1XcIYeaYKHyI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YvaoLt3ZpH0atB0tNzwMjpoxRYJXl0DqSjisMJiGVBE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EMmW6CptFsiLoPOi5/uAJQ2FmeLg6mCpuVLLrRWk7Mc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1jQsNMarSnarlYmXEuoFokeBMg/090qUD9wqo1Zn8Gs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hupXNKhRpJxpyDAAP1TgJ5JMZh9lhbMk6s7D7dMS3C8=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Update.json new file mode 100644 index 0000000000..8cfb7b525b --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Update.json @@ -0,0 +1,1911 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. Update.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + }, + "update": { + "$set": { + "encryptedDecimalNoPrecision": { + "$numberDecimal": "2" + } + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command_name": "update", + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$binary": { + "base64": "DeFiAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAAA", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + "$db": "default" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mr/laWHUijZT5VT3x2a7crb7wgd/UXOGz8jr8BVqBpM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXVD/HSbBljko0jJcaxJ1nrzs2+pchLQqYR3vywS8SU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VDCpBYsJIxTfcI6Zgf7FTmKMxUffQv+Ys8zt5dlK76I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zYDslUwOUVNwTYkETfjceH/PU3bac9X3UuQyYJ19qK0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rAOmHSz18Jx107xpbv9fYcPOmh/KPAqge0PAtuhIRnc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BFOB1OGVUen7VsOuS0g8Ti7oDsTt2Yj/k/7ta8YAdGM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2fckE5SPs0GU+akDkUEM6mm0EtcV3WDE/sQsnTtodlk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mi9+aNjuwIvaMpSHENvKzKRAmX9cYguo2mXLvOoftHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "K6TWn4VcWWkz/gkUkLmbtwkG7SNeABICmLDnoYJFlLU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z+2/cEtGU0Fq7QJFNGA/0y4aWAsw0ncG6X0LYRqwS3c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rrSIf+lgcNZFbbUkS9BmE045jRWBpcBJXHzfMVEFuzE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KlHL3Kyje1/LMIfgbCqw1SolxffJvvgsYBV5y77wxuA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hzJ1YBoETmYeCh352dBmG8d8Wse/bUcqojTWpWQlgsc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lSdcllDXx8MA+s0GULjDA1lQkcV0L8/aHtZ6dM2pZ2c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "HGr7JLTTA7ksAnlmjSIwwdBVvgr3fv46/FTdiCPYpos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mMr25v1VwOEVZ8xaNUTHJCcsYqV+kwK6RzGYilxPtJ4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "129hJbziPJzNo0IoTU3bECdge0FtaPW8dm4dyNVNwYU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "doiLJ96qoo+v7NqIAZLq6BI5axV8Id8gT5vyJ1ZZ0PM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cW/Lcul3xYmfyvI/0x/+ybN78aQmBK1XIGs1EEU09N8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1aVIwzu9N5EJV9yEES+/g6hOTH7cA2NTcLIc59cu0wU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kw5tyl7Ew0r1wFyrN1mB9FiVW2hK2BxxxUuJDNWjyjQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ADAY2YBrm6RJBDY/eLLcfNxmSJku+mefz74gH66oyco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8gkqB1LojzPrstpFG7RHYmWxXpIlPDTqWnNsXH7XDRU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TESfVQMDQjfTZmHmUeYUE2XrokJ6CcrsKx/GmypGjOw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qFM+HFVQ539S0Ouynd1fBHoemFxtU9PRxE5+Dq7Ljy4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jPiFgUZteSmOg4wf3bsEKCZzcnxmMoILsgp/GaZD+dM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YaWUgJhYgPNN7TkFK16H8SsQS226JguaVhOIQxZwQNQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x90/Qk3AgyaFsvWf2KUCu5XF3j76WFSjt/GrnG01060=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZGWybWL/xlEdMYRFCZDUoz10sywTf7U/7wufsb78lH0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8l4ganN66jIcdxfHAdYLaym/mdzUUQ8TViw3MDRySPc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c8p5XEGTqxqvRGVlR+nkxw9uUdoqDqTB0jlYQ361qMA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1ZGFLlpQBcU3zIUg8MmgWwFKVz/SaA7eSYFrfe3Hb70=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "34529174M77rHr3Ftn9r8jU4a5ztYtyVhMn1wryZSkU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YkQ4pxFWzc49MS0vZM6S8mNo4wAwo21rePBeF3C+9mI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MhOf4mYY00KKVhptOcXf0bXB7WfuuM801MRJg4vXPgc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7pbbD8ihNIYIBJ3tAUPGzHpFPpIeCTAk5L88qCB0/9w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C9Q5PoNJTQo6pmNzXEEXUEqH22//UUWY1gqILcIywec=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AqGVk1QjDNDLYWGRBX/nv9QdGR2SEgXZEhF0EWBAiSE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/sGI3VCbJUKATULJmhTayPOeVW+5MjWSvVCqS77sRbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yOtbL0ih7gsuoxVtRrACMz+4N5uo7jIR7zzmtih2Beo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uA6dkb2Iyg9Su8UNDvZzkPx33kPZtWr/CCuEY+XgzUM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1DoSFPdHIplqZk+DyWAmEPckWwXw/GdB25NLmzeEZhk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OfDVS0T3ZuIXI/LNbTp6C9UbPIWLKiMy6Wx+9tqNl+g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3PZjHXbmG6GtPz+iapKtQ3yY4PoFFgjIy+fV2xQv1YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kaoLN0BoBWsmqE7kKkJQejATmLShd8qffcAmlhsxsGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vpiw9KgQdegGmp7IJnSGX2miujRLU0xzs0ITTqbPW7c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NuXFf7xGUefYjIUTuMxNUTCfVHrF8oL0AT7dPv5Plk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8Tz53LxtfEBJ9eR+d2690kwNsqPV6XyKo2PlqZCbUrc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "e6zsOmHSyV8tyQtSX6BSwui6wK9v1xG3giY/IILJQ2w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2fedFMCxa2DzmIpfbDKGXhQg0PPwbUv6vIWdwwlvhms=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yEJKMFnWXTC8tJUfzCInzQRByNEPjHxpw4L4m8No91Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YbFuWwOiFuQyOzIJXDbOkCWC2DyrG+248TBuVCa1pXU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "w7IkwGdrguwDrar5+w0Z3va5wXyZ4VXJkDMISyRjPGo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YmJUoILTRJPhyIyWyXJTsQ6KSZHHbEpwPVup6Ldm/Ko=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FvMjcwVZJmfh6FP/yBg2wgskK+KHD8YVUY6WtrE8xbg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "h4HCtD4HyYz0nci49IVAa10Z4NJD/FHnRMV4sRX6qro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nC7BpXCmym+a0Is2kReM9cYN2M1Eh5rVo8fjms14Oiw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1qtVWaeVo649ZZZtN8gXbwLgMWGLhz8beODbvru0I7Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Ej+mC0QFyMNIiSjR939S+iGBm7dm+1xObu5IcF/OpbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UQ8LbUG3cMegbr9yKfKanAPQE1EfPkFciVDrNqZ5GHY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4iI3mXIDjnX+ralk1HhJY43mZx2uTJM7hsv9MQzTX7E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0WQCcs3rvsasgohERHHCaBM4Iy6yomS4qJ5To3/yYiw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qDCTVPoue1/DOAGNAlUstdA9Sid8MgEY4e5EzHcVHRk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9F9Mus0UnlzHb8E8ImxgXtz6SU98YXD0JqswOKw/Bzs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pctHpHKVBBcsahQ6TNh6/1V1ZrqOtKSAPtATV6BJqh0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vfR3C/4cPkVdxtNaqtF/v635ONbhTf5WbwJM6s4EXNE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ejP43xUBIex6szDcqExAFpx1IE/Ksi5ywJ84GKDFRrs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jbP4AWYd3S2f3ejmMG7dS5IbrFol48UUoT+ve3JLN6U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CiDifI7958sUjNqJUBQULeyF7x0Up3loPWvYKw9uAuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "e2dQFsiHqd2BFHNhlSxocjd+cPs4wkcUW/CnCz4KNuM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PJFckVmzBipqaEqsuP2mkjhJE4qhw36NhfQ9DcOHyEU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S3MeuJhET/B8VcfZYDR9fvX0nscDj416jdDekhmK11s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CGVHZRXpuNtQviDB2Kj03Q8uvs4w3RwTgV847R7GwPw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yUGgmgyLrxbEpDVy89XN3c2cmFpZXWWmuJ/35zVZ+Jw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "inb6Q97mL1a9onfNTT8v9wsoi/fz7KXKq3p8j90AU9c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CCyYx/4npq9xGO1lsCo8ZJhFO9/tN7DB+/DTE778rYg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LNnYw4fwbiAZu0kBdAHPEm/OFnreS+oArdB5O/l/I98=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "P006SxmUS/RjiQJVYPdMFnNo3827GIEmSzagggkg05Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oyvwY+WsnYV6UHuPki1o0ILJ2jN4uyXf9yaUNtZJyBA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "36Lk3RHWh1wmtCWC/Yj6jNIo17U5y6SofAgQjzjVxD8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vOOo8FqeHnuO9mqOYjIb4vgwIwVyXZ5Y+bY5d9tGFUM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bJiDJjwQRNxqxlGjRm5lLziFhcfTDCnQ/qU1V85qcRg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2Qgrm1n0wUELAQnpkEiIHB856yv76q8jLbpiucetcm0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5ciPOYxTK0WDwwYyfs7yiVymwtYQXDELLxmM4JLl4/o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "31dC2WUSIOKQc4jwT6PikfeYTwi80mTlh7P31T5KNQU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YluTV2Mu53EGCKLcWfHZb0BM/IPW2xJdG3vYlDMEsM4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dh/8lGo2Ek6KukSwutH6Q35iy8TgV0FN0SJqe0ZVHN8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EVw6HpIs3BKen2qY2gz4y5dw1JpXilfh07msZfQqJpc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FYolLla9L8EZMROEdWetozroU40Dnmwwx2jIMrr7c1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8M6k4QIutSIj6CM41vvkQtuFsaGrjoR9SZJVSLbfGKQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9LM0VoddDNHway442MqY+Z7vohB2UHau/cddshhzf40=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "66i8Ytco4Yq/FMl6pIRZazz3CZlu8fO2OI6Pne0pvHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2a/HgX+MjZxjXtSvHgF1yEpHMJBkl8Caee8XrJtn0WM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "frhBM662c4ZVG7mWP8K/HhRjd01lydW/cPcHnDjifqc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6k1T7Q1t668PBqv6fwpVnT1HWh7Am5LtbKvwPJKcpGU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UlJ5Edfusp8S/Pyhw6KTglIejmbr1HO0zUeHn/qFETA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jsxsB+1ECB3assUdoC333do9tYH+LglHmVSJHy4N8Hg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2nzIQxGYF7j3bGsIesECEOqhObKs/9ywknPHeJ3yges=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xJYKtuWrX90JrJVoYtnwP7Ce59XQGFYoalxpNfBXEH0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NLI5lriBTleGCELcHBtNnmnvwSRkHHaLOX4cKboMgTw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hUOQV0RmE5aJdJww1AR9rirJG4zOYPo+6cCkgn/BGvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "h4G2Of76AgxcUziBwCyH+ayMOpdBWzg4yFrTfehSC2c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VuamM75RzGfQpj2/Y1jSVuQLrhy6OAwlZxjuQLB/9Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kn9+hLq7hvw02xr9vrplOCDXKBTuFhfbX7d5v/l85Pg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fAiGqKyLZpGngBYFbtYUYt8LUrJ49vYafiboifTDjxs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BxRILymgfVJCczqjUIWXcfrfSgrrYkxTM5VTg0HkZLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CrFY/PzfPU2zsFkGLu/dI6mEeizZzCR+uYgjZBAHro0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AEbrIuwvXLTtYgMjOqnGQ8y8axUn5Ukrn7UZRSyfQVw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ouWeVH3PEFg+dKWlXc6BmqirJOaVWjJbMzZbCsce4dA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+hd6xFB+EG+kVP7WH4uMd1CLaWMnt5xJRaY/Guuga9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zmpGalfAOL3gmcUMJYcLYIRT/2VDO/1Dw4KdYZoNcng=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2PbHAoM/46J2UIZ/vyksKzmVVfxA7YUyIxWeL/N/vBk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7fD9x+zk5MVFesb59Klqiwwmve7P5ON/5COURXj5smE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tlrNQ4jaq051iaWonuv1sSrYhKkL1LtNZuHsvATha3s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fBodm28iClNpvlRyVq0dOdXQ08S7/N3aDwid+PdWvRo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "O+/nnRqT3Zv7yMMGug8GhKHaWy6u7BfRGtZoj0sdN1c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5AZZ/RTMY4Photnm/cpXZr/HnFRi3eljacMsipkJLHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oFVyo/kgoMxBIk2VE52ySSimeyU+Gr0EfCwapXnTpKA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z8v59DfcnviA0mzvnUk+URVO0UuqAWvtarEgJva/n1c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "P64GOntZ+zBJEHkigoh9FSxSO+rJTqR20z5aiGQ9an4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xMbSuDPfWuO/Dm7wuVl06GnzG9uzTlJJX9vFy7boGlY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kXPB19mRClxdH2UsHwlttS6lLU2uHvzuZgZz7kC45jU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NDVjVYXAw4k0w4tFzvs7QDq39aaU3HQor4I2XMKKnCk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uKw/+ErVfpTO1dGUfd3T/eWfZW3nUxXCdBGdjvHtZ88=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "av0uxEzWkizYWm0QUM/MN1hLibnxPvCWJKwjOV4yVQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ERwUC47dvgOBzIsEESMIioLYbFOxOe8PtJTnmDkKuHM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2gseKlG5Le12fS/vj4eaED4lturF16kAgJ1TpW3HxEE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7Cvg0Y3j/5i2F1TeXxlMmU7xwif5dCmwkZAOrVC5K2Y=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Aggregate.json new file mode 100644 index 0000000000..801beefe18 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Aggregate.json @@ -0,0 +1,585 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. Aggregate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "aggregate" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MgwakFvPyBlwqFTbhWUF79URJQWFoJTGotlEVSPPUsQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DyBERpMSD5lEM5Nhpcn4WGgxgn/mkUVJp+PYSLX5jsE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I43iazc0xj1WVbYB/V+uTL/tughN1bBlxh1iypBnNsA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wjOBa/ATMuOywFmuPgC0GF/oeLqu0Z7eK5udzkTPbis=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gRQVwiR+m+0Vg8ZDXqrQQcVnTyobwCXNaA4BCJVXtMc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WUZ6huwx0ZbLb0R00uiC9FOJzsUocUN8qE5+YRenkvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7s79aKEuPgQcS/YPOOVcYNZvHIo7FFsWtFCrnDKXefA=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Correctness.json new file mode 100644 index 0000000000..b8a6953611 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Correctness.json @@ -0,0 +1,1648 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Find with $gt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberDecimal": "0.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "1.0" + } + } + } + }, + "result": [] + } + ] + }, + { + "description": "Find with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$lt": { + "$numberDecimal": "1.0" + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$lte": { + "$numberDecimal": "1.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$lt": { + "$numberDecimal": "0.0" + } + } + } + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "200.0" + } + } + } + }, + "result": { + "errorContains": "must be less than the range max" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0.0" + }, + "$lt": { + "$numberDecimal": "2.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberDecimal": "0.0" + }, + "$lte": { + "$numberDecimal": "200.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$in": [ + { + "$numberDecimal": "0.0" + } + ] + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "-1" + } + } + }, + "result": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedDecimalPrecision": { + "$numberDecimal": "200.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 200, + "encryptedDecimalPrecision": { + "$numberDecimal": "200.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "1.0" + } + } + } + } + ] + }, + "result": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$lt": { + "$numberDecimal": "1.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$lte": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$lt": { + "$numberDecimal": "0.0" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "200.0" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be less than the range max" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0.0" + }, + "$lt": { + "$numberDecimal": "2.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberDecimal": "0.0" + }, + "$lte": { + "$numberDecimal": "200.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$in": [ + { + "$numberDecimal": "0.0" + } + ] + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberInt": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Delete.json new file mode 100644 index 0000000000..1abb59bfd1 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Delete.json @@ -0,0 +1,471 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. Delete.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-FindOneAndUpdate.json new file mode 100644 index 0000000000..8d763431fa --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-FindOneAndUpdate.json @@ -0,0 +1,589 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. FindOneAndUpdate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + }, + "update": { + "$set": { + "encryptedDecimalPrecision": { + "$numberDecimal": "2" + } + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0OKSXELxPP85SBVwDGf3LtMEQCJ8TTkFUl/+6jlkdb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uEw0lpQtBppR3vqV9j9+NQRSBF1BzZukb8c9IhyWvxc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zVhZ7Q59O087ji49oMJvBIgeir2oqvUpnh4p53GcTow=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dowrzKs+qJhRMZyKDbhjXbuX43FbmUKOaw9I8YlOZDw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ep5B6cska6THLIF7Mn3tn3RvV9EiwLSt0eZM/CLRUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "URNp/YmmDh5wIZUfAzzgPyJeMNiVx9PMsz52DZRujGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wlM4IAQhhKQEzoVqS8b1Ddd50GB95OFb9LnzOwyjCP4=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-InsertFind.json new file mode 100644 index 0000000000..5407fba18b --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-InsertFind.json @@ -0,0 +1,572 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. Insert and Find.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MgwakFvPyBlwqFTbhWUF79URJQWFoJTGotlEVSPPUsQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DyBERpMSD5lEM5Nhpcn4WGgxgn/mkUVJp+PYSLX5jsE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I43iazc0xj1WVbYB/V+uTL/tughN1bBlxh1iypBnNsA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wjOBa/ATMuOywFmuPgC0GF/oeLqu0Z7eK5udzkTPbis=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gRQVwiR+m+0Vg8ZDXqrQQcVnTyobwCXNaA4BCJVXtMc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WUZ6huwx0ZbLb0R00uiC9FOJzsUocUN8qE5+YRenkvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7s79aKEuPgQcS/YPOOVcYNZvHIo7FFsWtFCrnDKXefA=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Update.json new file mode 100644 index 0000000000..e5d1a4e059 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Update.json @@ -0,0 +1,589 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. Update.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + }, + "update": { + "$set": { + "encryptedDecimalPrecision": { + "$numberDecimal": "2" + } + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command_name": "update", + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + "$db": "default" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0OKSXELxPP85SBVwDGf3LtMEQCJ8TTkFUl/+6jlkdb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uEw0lpQtBppR3vqV9j9+NQRSBF1BzZukb8c9IhyWvxc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zVhZ7Q59O087ji49oMJvBIgeir2oqvUpnh4p53GcTow=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dowrzKs+qJhRMZyKDbhjXbuX43FbmUKOaw9I8YlOZDw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ep5B6cska6THLIF7Mn3tn3RvV9EiwLSt0eZM/CLRUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "URNp/YmmDh5wIZUfAzzgPyJeMNiVx9PMsz52DZRujGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wlM4IAQhhKQEzoVqS8b1Ddd50GB95OFb9LnzOwyjCP4=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Aggregate.json new file mode 100644 index 0000000000..d8c9cacdcc --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Aggregate.json @@ -0,0 +1,1133 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Double. Aggregate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$binary": { + "base64": "DYckAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "aggregate" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2FIZh/9N+NeJEQwxYIX5ikQT85xJzulBNReXk8PnG/s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I93Md7QNPGmEEGYU1+VVCqBPBEvXdqHPtTJtMOn06Yk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "GecBFQ1PemlECWZWCl7f74vmsL6eB6mzQ9n6tK6FYfs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QpjhZl+O1ORifgtCZuWAdcP6OKL7IZ2cA46v8FJcV28=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FWXI/yZ1M+2fIboeMCDMlp+I2NwPQDtoM/wWselOPYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uk26nvN/LdRLaBphiBgIZzT0sSpoO1z0RdDWRm/xrSA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hiiYSH1KZovAULc7rlmEU74wCjzDR+mm6ZnsgvFQjMw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hRzvMvWPX0sJme+wck67lwbKDFaWOa+Eyef+JSdc1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PSx5D+zqC9c295dguX4+EobT4IEzfffdfjzC8DWpB5Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QzfXQCVTjPQv2h21v95HYPq8uCsVJ2tPnjv79gAaM9M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XcGDO/dlTcEMLqwcm55UmOqK+KpBmbzZO1LIzX7GPaQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Lf+o4E7YB5ynzUPC6KTyW0lj6Cg9oLIu1Sdd1ODHctA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wAuVn02LAVo5Y+TUocvkoenFYWzpu38k0NmGZOsAjS4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yJGDtveLbbo/0HtCtiTSsvVI/0agg/U1bFaQ0yhK12o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KsEy0zgYcmkM+O/fWF9z3aJGIk22XCk+Aw96HB6JU68=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "p+AnMI5ZxdJMSIEJmXXya+FeH5yubmOdViwUO89j0Rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/jLix56jzeywBtNuGw55lCXyebQoSIhbful0hOKxKDY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fvDvSPomtJsl1S3+8/tzFCE8scHIdJY5hB9CdTEsoFo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oV5hOJzPXxfTuRdKIlF4uYEoMDuqH+G7/3qgndDr0PM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3ALwcvLj3VOfgD6OqXAO13h1ZkOv46R6+Oy6SUKh53I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gxaB9FJj0IM+InhvAjwWaex3UIZ9SAnDiUd5WHSY/l0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "66NPvDygJzKJqddfNuDuNOpvGajjFRtvhkwfUkiYmXw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1dWcQIocRAcO9XnXYqbhl83jc0RgjQpsrWd8dC27trg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "npos0Uf1DT3ztSCjPVY9EImlRnTHB1KLrvmVSqBQ/8E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TEI9qBx/tK1l1H0v1scMG8Srmtwo5VxWHADPBSlWrXk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3wUN2ypQKoj+5ASkeIK9ycxhahVxyTmGopigoUAlyYs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o/oksSnUS+nIq6ozWTbB5bJh+NoaPj8deAA23uxiWCk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KExYPruhA31e8xuSwvfUfDcyY/H2Va6taUd0k4yFgLc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/x+dNfxdd/lkx8Z8VZVfoYl7LPoaZ/iKEzZXBrAtIJc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DE4cmjFLPqZlmRomO0qQiruUBtzoCe8ZdNRcfNH92pU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M6EKNcLPw/iojAChgYUSieaBYWcbsjKtB94SaHOr8vk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+qP49lDPeyhaduTvXJgtJEqHNEYANVu9Bg3Bxz7Td9w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ruMrC2VIS+VKbJwCFb3bfkaLTju9nE+yPONV9s0M0Vo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EbjDlSB5JKnDKff4d8hOmaOwJ7B9Q6NQFisLj+DPC+0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C/yYOTB94edyqAbiQNu8/H7FoG3yRRjHDkMykz4+Mv0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CBxqrejG+qQQq2YTd6iP/06kiu2CxxzBFaZK3Ofb1CM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2ZOQ/fpho+AbDENWBZaln7wRoepIRdhyT648dr8O5cU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EghIgEPz01+myPgj8oid+PgncvobvC7vjvG3THEEQ0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "92CysZYNF8riwAMhdrIPKxfODw9p07cKQy/Snn8XmVY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VO0LeTBQmsEf7sCHzTnZwUPNTqRZ49R8V5E9XnZ/5N4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "exs8BQMJq7U6ZXYgIizT7XN+X/hOmmn4YEuzev9zgSI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qHpS4k1I+gPniNp4CA8TY8lLN36vBYmgbKMFpbYMEqg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+7lWKCKAWFw6gPZdHE6E8KIfI14/fSvtWUmllb5WLi0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YiH/US0q6679hWblFDDKNqUjCgggoU8sUCssTIF1QbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YgwkKElEubNfvXL9hJxzqQUQtHiXN/OCGxNL1MUZZlM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hZFST4INZTTuhvJlGJeMwlUAK270UCOTCDeBAnN4a7g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "24I1Zw35AuGnK3CqJhbCwYb0IPuu5sCRrM5iyeITOLc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vgD12JB4Q1S/kGPSQ1KOgp386KnG1GbM/5+60oRGcGw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+wNE+OL+CB9d4AUJdVxd56jUJCAXmmk9fapuB2TAc4g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uhQh1B2Pe4RkNw/kPEcgaLenuikKoRf1iyfZhpXdodc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eu8gjAUIp8ybO204AgeOq5v1neI1yljqy5v3I6lo1lM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7QG6oVbASBAjrnCPxzzUNnuFSFNlKhbuBafkF8pr7Is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PUS1xb2oHSDTdYltutoSSxBiJ1NjxH3l2kA4P1CZLEs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XPMh/JDC/O93gJJCwwgJDb8ssWZvRvezNmKmyn3nIfk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jWz+KGwMk/GOvFAK2rOxF3OjxeZAWfmUQ1HGJ7icw4A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o7XbW68pc6flYigf3LW4WAGUWxpeqxaQLkHUhUR9RZ8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nqR+g60+5U0okbqJadSqGgnC+j1JcP8rwMcfzOs2ACI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hz43qVK95tSfbYFtaE/8fE97XMk1RiO8XpWjwZHB80o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "noZUWlZ8M6KXU5rkifyo8/duw5IL7/fXbJvT7bNmW9k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WONVHCuPSanXDRQQ/3tmyJ0Vq+Lu/4hRaMUf0g0kSuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UEaj6vQRoIghE8Movd8AGXhtwIOXlP4cBsECIUvE5Y8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "D3n2YcO8+PB4C8brDo7kxKjF9Y844rVkdRMLTgsQkrw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C+YA0G9KjxZVaWwOMuh/dcnHnHAlYnbFrRl0IEpmsY0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rUnmbmQanxrbFPYYrwyQ53x66OSt27yAvF+s48ezKDc=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Correctness.json new file mode 100644 index 0000000000..65594bcb11 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Correctness.json @@ -0,0 +1,1158 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Find with $gt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gte": { + "$numberDouble": "0.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "1.0" + } + } + } + }, + "result": [] + } + ] + }, + { + "description": "Find with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$lt": { + "$numberDouble": "1.0" + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$lte": { + "$numberDouble": "1.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0.0" + }, + "$lt": { + "$numberDouble": "2.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$in": [ + { + "$numberDouble": "0.0" + } + ] + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$gte": { + "$numberDouble": "0.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "1.0" + } + } + } + } + ] + }, + "result": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$lt": { + "$numberDouble": "1.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$lte": { + "$numberDouble": "1.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0.0" + }, + "$lt": { + "$numberDouble": "2.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$in": [ + { + "$numberDouble": "0.0" + } + ] + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberInt": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Delete.json new file mode 100644 index 0000000000..392e722f1f --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Delete.json @@ -0,0 +1,727 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Double. Delete.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$binary": { + "base64": "DYckAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-FindOneAndUpdate.json new file mode 100644 index 0000000000..bbcfb321f5 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-FindOneAndUpdate.json @@ -0,0 +1,1137 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Double. FindOneAndUpdate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0" + } + } + }, + "update": { + "$set": { + "encryptedDoubleNoPrecision": { + "$numberDouble": "2" + } + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$binary": { + "base64": "DYckAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "HI88j1zrIsFoijIXKybr9mYubNV5uVeODyLHFH4Ueco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXVD/HSbBljko0jJcaxJ1nrzs2+pchLQqYR3vywS8SU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KhscCh+tt/pp8lxtKZQSPPUU94RvJYPKG/sjtzIa4Ws=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RISnuNrTTVNW5HnwCgQJ301pFw8DOcYrAMQIwVwjOkI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Ra5zukLh2boua0Bh74qA+mtIoixGXlsNsxiJqHtqdTI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eqr0v+NNWXWszi9ni8qH58Q6gw5x737tJvH3lPaNHO4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d42QupriWIwGrFAquXNFi0ehEuidIbHLFZtg1Sm2nN8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2azRVxaaTIJKcgY2FU012gcyP8Y05cRDpfUaMnCBaQU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3nlgkM4K/AAcHesRYYdEu24UGetHodVnVfHzw4yxZBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hqy91FNmAAac2zUaPO6eWFkx0/37rOWGrwXN+fzL0tU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "akX+fmscSDSF9pB5MPj56iaJPtohr0hfXNk/OPWsGv8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1ZvUb10Q7cN4cNLktd5yNjqgtawsYnkbeVBZV6WuY/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "otCwtuKiY4hCyXvYzXvo10OcnzZppebo38KsAlq49QM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mty8EscckeT/dhMfrPFyDbLnmMOcYRUQ3mLK4KTu6V8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tnvgLLkJINO7csREYu4dEVe1ICrBeu7OP+HdfoX3M2E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kOefsHgEVhkJ17UuP7Dxogy6sAQbzf1SFPKCj6XRlrQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F+JQ79xavpaHdJzdhvwyHbzdZJLNHAymc/+67La3gao=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NCZ9zp5rDRceENuSgAfTLEyKg0YgmXAhK0B8WSj7+Pw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wL1CJ7cYR5slx8mHq++uMdjDfkt9037lQTUztEMF56M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "txefkzTMITZE+XvvRFZ7QcgwDT/7m8jNmxRk4QBaoZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jFunW3v1tSYMyZtQQD28eEy9qqDp4Kqo7gMN29N4bfQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QMO915KUiS3X3R1bU1YoafVM2s0NeHo3EjgTA9PnGwY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nwdKJEXdilzvb7494vbuDJ+y6SrfJahza1dYIsHIWVI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vpWMX+T/VXXajFo0UbuYjtp0AEzBU0Y+lP+ih2EQ7mg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1lmzG0J1DhKDRhhq5y5Buygu4G8eV2X0t7kUY90EohM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SiKqpXqO0trwhFvBWK274hMklpCgMhNs/JY84yyn/NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7cPGPYCKPTay+ZR9Gx6oOueduOgaFrSuAXmNDpDHXdI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4THEYvAkjs2Fh7FIe5LC45P4i4N0L7ob67UOVbhp6Nk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "B+UGsChLLZR7iqnt8yq91OgmTgwiUKTJhFxY4NT0O6c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X1uYwBCsCg1H+PnKdwtBqXlt0zKEURi8bOM940GcPfk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xYOgT5l7shlNXCwHlguovmDkcEnF8dXyYlTyYrgZ8GE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vFMTZqV8bh1+gcKzTkXweMddJlgdUnwX0DWzUUaMok4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4HI0y9FrtleZxZ7M6INdNhLelrQ2Rv/+ykWCBl+tMC8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jpJ0bBE474OUkn1vUiLWumIBtYmwc7J5+LQU/nyeLQc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jQTPeXZvdxY/DjtPfYfKUArIDsf0E9MVFy2O26sv1ec=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QLLto0ExR2ZYMGqlyaMZc/hXFFTlwmgtKbiVq/xJIeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yBJNviU1nchbGbhx6InXCVRXa90sEepz1EwbYuKXu2U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jpEf0vHxrPu9gTJutNXSi2g/2Mc4WXFEN7yHonZEb7A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "E09kLFckMYwNuhggMxmPtwndyvIAx+Vl+b2CV6FP75s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "N+ue6/cLPb5NssmJCCeo18LlbKPz6r2z20AsnTKRvOo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yVQNZP8hhsvNGyDph2QP2qTNdXZTiIEVineKg+Qf33o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cSC9uI+9c5S8X+0G7amVyug1p0ZlgBsbEDYYyezBevQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1NpZGjoQzuQtekj80Rifxe9HbE08W07dfwxaFHaVn84=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5Ghuq/8l11Ug9Uf/RTwf9On3OxOwIXUcb9soiy4J7/w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0LWKaEty6ywxLFhDaAqulqfMnYc+tgPfH4apyEeKg80=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OwSthmCBtt6NIAoAh7aCbj82Yr/+9t8U7WuBQhFT3AQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "iYiyg6/1isqbMdvFPIGucu3cNM4NAZNtJhHpGZ4eM+c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "waBgs8jWuGJPIF5zCRh6OmIyfK5GCBQgTMfmKSR2wyY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1Jdtbe2BKJXPU2G9ywOrlODZ/cNYEQlKzAW3aMe1Hy4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xaLEnNUS/2ySerBpb9dN/D31t+wYcKekwTfkwtni0Mc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bIVBrOhOvr6cL55Tr24+B+CC9MiG7U6K54aAr2IXXuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6Cdq5wroGu2TEFnekuT7LhOpd/K/+PcipIljcHU9QL4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "K5l64vI4S/pLviLW6Pl0U3iQkI3ge0xg4RAHcEsyKJo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bzhuvZ0Ls22yIOX+Hz51eAHlSuDbWR/e0u4EhfdpHbc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Qv+fr6uD4o0bZRp69QJCFL6zvn3G82c7L+N1IFzj7H0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XAmISMbD3aEyQT+BQEphCKFNa0F0GDKFuhM9cGceKoQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4VLCokntMfm1AogpUnYGvhV7nllWSo3mS3hVESMy+hA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xiXNLj/CipEH63Vb5cidi8q9X47EF4f3HtJSOH7mfM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4XlCYfYBjI9XA5zOSgTiEBYcZsdwyXL+f5XtH2xUIOc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "k6DfQy7ZYJIkEly2B5hjOZznL4NcgMkllZjJLb7yq7w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZzM6gwWesa3lxbZVZthpPFs2s3GV0RZREE2zOMhBRBo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "US+jeMeeOd7J0wR0efJtq2/18lcO8YFvhT4O3DeaonQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b6iSxiI1FM9SzxuG1bHqGA1i4+3GOi0/SPW00XB4L7o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kn3LsxAVkzIZKK9I6fi0Cctr0yjXOYgaQWMCoj4hLpM=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-InsertFind.json new file mode 100644 index 0000000000..9f2c7c9911 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-InsertFind.json @@ -0,0 +1,1124 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Double. Insert and Find.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$binary": { + "base64": "DYckAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2FIZh/9N+NeJEQwxYIX5ikQT85xJzulBNReXk8PnG/s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I93Md7QNPGmEEGYU1+VVCqBPBEvXdqHPtTJtMOn06Yk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "GecBFQ1PemlECWZWCl7f74vmsL6eB6mzQ9n6tK6FYfs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QpjhZl+O1ORifgtCZuWAdcP6OKL7IZ2cA46v8FJcV28=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FWXI/yZ1M+2fIboeMCDMlp+I2NwPQDtoM/wWselOPYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uk26nvN/LdRLaBphiBgIZzT0sSpoO1z0RdDWRm/xrSA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hiiYSH1KZovAULc7rlmEU74wCjzDR+mm6ZnsgvFQjMw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hRzvMvWPX0sJme+wck67lwbKDFaWOa+Eyef+JSdc1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PSx5D+zqC9c295dguX4+EobT4IEzfffdfjzC8DWpB5Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QzfXQCVTjPQv2h21v95HYPq8uCsVJ2tPnjv79gAaM9M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XcGDO/dlTcEMLqwcm55UmOqK+KpBmbzZO1LIzX7GPaQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Lf+o4E7YB5ynzUPC6KTyW0lj6Cg9oLIu1Sdd1ODHctA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wAuVn02LAVo5Y+TUocvkoenFYWzpu38k0NmGZOsAjS4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yJGDtveLbbo/0HtCtiTSsvVI/0agg/U1bFaQ0yhK12o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KsEy0zgYcmkM+O/fWF9z3aJGIk22XCk+Aw96HB6JU68=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "p+AnMI5ZxdJMSIEJmXXya+FeH5yubmOdViwUO89j0Rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/jLix56jzeywBtNuGw55lCXyebQoSIhbful0hOKxKDY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fvDvSPomtJsl1S3+8/tzFCE8scHIdJY5hB9CdTEsoFo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oV5hOJzPXxfTuRdKIlF4uYEoMDuqH+G7/3qgndDr0PM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3ALwcvLj3VOfgD6OqXAO13h1ZkOv46R6+Oy6SUKh53I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gxaB9FJj0IM+InhvAjwWaex3UIZ9SAnDiUd5WHSY/l0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "66NPvDygJzKJqddfNuDuNOpvGajjFRtvhkwfUkiYmXw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1dWcQIocRAcO9XnXYqbhl83jc0RgjQpsrWd8dC27trg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "npos0Uf1DT3ztSCjPVY9EImlRnTHB1KLrvmVSqBQ/8E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TEI9qBx/tK1l1H0v1scMG8Srmtwo5VxWHADPBSlWrXk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3wUN2ypQKoj+5ASkeIK9ycxhahVxyTmGopigoUAlyYs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o/oksSnUS+nIq6ozWTbB5bJh+NoaPj8deAA23uxiWCk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KExYPruhA31e8xuSwvfUfDcyY/H2Va6taUd0k4yFgLc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/x+dNfxdd/lkx8Z8VZVfoYl7LPoaZ/iKEzZXBrAtIJc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DE4cmjFLPqZlmRomO0qQiruUBtzoCe8ZdNRcfNH92pU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M6EKNcLPw/iojAChgYUSieaBYWcbsjKtB94SaHOr8vk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+qP49lDPeyhaduTvXJgtJEqHNEYANVu9Bg3Bxz7Td9w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ruMrC2VIS+VKbJwCFb3bfkaLTju9nE+yPONV9s0M0Vo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EbjDlSB5JKnDKff4d8hOmaOwJ7B9Q6NQFisLj+DPC+0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C/yYOTB94edyqAbiQNu8/H7FoG3yRRjHDkMykz4+Mv0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CBxqrejG+qQQq2YTd6iP/06kiu2CxxzBFaZK3Ofb1CM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2ZOQ/fpho+AbDENWBZaln7wRoepIRdhyT648dr8O5cU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EghIgEPz01+myPgj8oid+PgncvobvC7vjvG3THEEQ0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "92CysZYNF8riwAMhdrIPKxfODw9p07cKQy/Snn8XmVY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VO0LeTBQmsEf7sCHzTnZwUPNTqRZ49R8V5E9XnZ/5N4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "exs8BQMJq7U6ZXYgIizT7XN+X/hOmmn4YEuzev9zgSI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qHpS4k1I+gPniNp4CA8TY8lLN36vBYmgbKMFpbYMEqg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+7lWKCKAWFw6gPZdHE6E8KIfI14/fSvtWUmllb5WLi0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YiH/US0q6679hWblFDDKNqUjCgggoU8sUCssTIF1QbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YgwkKElEubNfvXL9hJxzqQUQtHiXN/OCGxNL1MUZZlM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hZFST4INZTTuhvJlGJeMwlUAK270UCOTCDeBAnN4a7g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "24I1Zw35AuGnK3CqJhbCwYb0IPuu5sCRrM5iyeITOLc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vgD12JB4Q1S/kGPSQ1KOgp386KnG1GbM/5+60oRGcGw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+wNE+OL+CB9d4AUJdVxd56jUJCAXmmk9fapuB2TAc4g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uhQh1B2Pe4RkNw/kPEcgaLenuikKoRf1iyfZhpXdodc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eu8gjAUIp8ybO204AgeOq5v1neI1yljqy5v3I6lo1lM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7QG6oVbASBAjrnCPxzzUNnuFSFNlKhbuBafkF8pr7Is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PUS1xb2oHSDTdYltutoSSxBiJ1NjxH3l2kA4P1CZLEs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XPMh/JDC/O93gJJCwwgJDb8ssWZvRvezNmKmyn3nIfk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jWz+KGwMk/GOvFAK2rOxF3OjxeZAWfmUQ1HGJ7icw4A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o7XbW68pc6flYigf3LW4WAGUWxpeqxaQLkHUhUR9RZ8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nqR+g60+5U0okbqJadSqGgnC+j1JcP8rwMcfzOs2ACI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hz43qVK95tSfbYFtaE/8fE97XMk1RiO8XpWjwZHB80o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "noZUWlZ8M6KXU5rkifyo8/duw5IL7/fXbJvT7bNmW9k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WONVHCuPSanXDRQQ/3tmyJ0Vq+Lu/4hRaMUf0g0kSuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UEaj6vQRoIghE8Movd8AGXhtwIOXlP4cBsECIUvE5Y8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "D3n2YcO8+PB4C8brDo7kxKjF9Y844rVkdRMLTgsQkrw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C+YA0G9KjxZVaWwOMuh/dcnHnHAlYnbFrRl0IEpmsY0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rUnmbmQanxrbFPYYrwyQ53x66OSt27yAvF+s48ezKDc=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Update.json new file mode 100644 index 0000000000..ce03576f88 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Update.json @@ -0,0 +1,1141 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Double. Update.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0" + } + } + }, + "update": { + "$set": { + "encryptedDoubleNoPrecision": { + "$numberDouble": "2" + } + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command_name": "update", + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$binary": { + "base64": "DYckAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + "$db": "default" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "HI88j1zrIsFoijIXKybr9mYubNV5uVeODyLHFH4Ueco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXVD/HSbBljko0jJcaxJ1nrzs2+pchLQqYR3vywS8SU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KhscCh+tt/pp8lxtKZQSPPUU94RvJYPKG/sjtzIa4Ws=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RISnuNrTTVNW5HnwCgQJ301pFw8DOcYrAMQIwVwjOkI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Ra5zukLh2boua0Bh74qA+mtIoixGXlsNsxiJqHtqdTI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eqr0v+NNWXWszi9ni8qH58Q6gw5x737tJvH3lPaNHO4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d42QupriWIwGrFAquXNFi0ehEuidIbHLFZtg1Sm2nN8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2azRVxaaTIJKcgY2FU012gcyP8Y05cRDpfUaMnCBaQU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3nlgkM4K/AAcHesRYYdEu24UGetHodVnVfHzw4yxZBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hqy91FNmAAac2zUaPO6eWFkx0/37rOWGrwXN+fzL0tU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "akX+fmscSDSF9pB5MPj56iaJPtohr0hfXNk/OPWsGv8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1ZvUb10Q7cN4cNLktd5yNjqgtawsYnkbeVBZV6WuY/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "otCwtuKiY4hCyXvYzXvo10OcnzZppebo38KsAlq49QM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mty8EscckeT/dhMfrPFyDbLnmMOcYRUQ3mLK4KTu6V8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tnvgLLkJINO7csREYu4dEVe1ICrBeu7OP+HdfoX3M2E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kOefsHgEVhkJ17UuP7Dxogy6sAQbzf1SFPKCj6XRlrQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F+JQ79xavpaHdJzdhvwyHbzdZJLNHAymc/+67La3gao=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NCZ9zp5rDRceENuSgAfTLEyKg0YgmXAhK0B8WSj7+Pw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wL1CJ7cYR5slx8mHq++uMdjDfkt9037lQTUztEMF56M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "txefkzTMITZE+XvvRFZ7QcgwDT/7m8jNmxRk4QBaoZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jFunW3v1tSYMyZtQQD28eEy9qqDp4Kqo7gMN29N4bfQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QMO915KUiS3X3R1bU1YoafVM2s0NeHo3EjgTA9PnGwY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nwdKJEXdilzvb7494vbuDJ+y6SrfJahza1dYIsHIWVI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vpWMX+T/VXXajFo0UbuYjtp0AEzBU0Y+lP+ih2EQ7mg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1lmzG0J1DhKDRhhq5y5Buygu4G8eV2X0t7kUY90EohM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SiKqpXqO0trwhFvBWK274hMklpCgMhNs/JY84yyn/NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7cPGPYCKPTay+ZR9Gx6oOueduOgaFrSuAXmNDpDHXdI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4THEYvAkjs2Fh7FIe5LC45P4i4N0L7ob67UOVbhp6Nk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "B+UGsChLLZR7iqnt8yq91OgmTgwiUKTJhFxY4NT0O6c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X1uYwBCsCg1H+PnKdwtBqXlt0zKEURi8bOM940GcPfk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xYOgT5l7shlNXCwHlguovmDkcEnF8dXyYlTyYrgZ8GE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vFMTZqV8bh1+gcKzTkXweMddJlgdUnwX0DWzUUaMok4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4HI0y9FrtleZxZ7M6INdNhLelrQ2Rv/+ykWCBl+tMC8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jpJ0bBE474OUkn1vUiLWumIBtYmwc7J5+LQU/nyeLQc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jQTPeXZvdxY/DjtPfYfKUArIDsf0E9MVFy2O26sv1ec=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QLLto0ExR2ZYMGqlyaMZc/hXFFTlwmgtKbiVq/xJIeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yBJNviU1nchbGbhx6InXCVRXa90sEepz1EwbYuKXu2U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jpEf0vHxrPu9gTJutNXSi2g/2Mc4WXFEN7yHonZEb7A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "E09kLFckMYwNuhggMxmPtwndyvIAx+Vl+b2CV6FP75s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "N+ue6/cLPb5NssmJCCeo18LlbKPz6r2z20AsnTKRvOo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yVQNZP8hhsvNGyDph2QP2qTNdXZTiIEVineKg+Qf33o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cSC9uI+9c5S8X+0G7amVyug1p0ZlgBsbEDYYyezBevQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1NpZGjoQzuQtekj80Rifxe9HbE08W07dfwxaFHaVn84=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5Ghuq/8l11Ug9Uf/RTwf9On3OxOwIXUcb9soiy4J7/w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0LWKaEty6ywxLFhDaAqulqfMnYc+tgPfH4apyEeKg80=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OwSthmCBtt6NIAoAh7aCbj82Yr/+9t8U7WuBQhFT3AQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "iYiyg6/1isqbMdvFPIGucu3cNM4NAZNtJhHpGZ4eM+c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "waBgs8jWuGJPIF5zCRh6OmIyfK5GCBQgTMfmKSR2wyY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1Jdtbe2BKJXPU2G9ywOrlODZ/cNYEQlKzAW3aMe1Hy4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xaLEnNUS/2ySerBpb9dN/D31t+wYcKekwTfkwtni0Mc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bIVBrOhOvr6cL55Tr24+B+CC9MiG7U6K54aAr2IXXuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6Cdq5wroGu2TEFnekuT7LhOpd/K/+PcipIljcHU9QL4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "K5l64vI4S/pLviLW6Pl0U3iQkI3ge0xg4RAHcEsyKJo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bzhuvZ0Ls22yIOX+Hz51eAHlSuDbWR/e0u4EhfdpHbc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Qv+fr6uD4o0bZRp69QJCFL6zvn3G82c7L+N1IFzj7H0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XAmISMbD3aEyQT+BQEphCKFNa0F0GDKFuhM9cGceKoQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4VLCokntMfm1AogpUnYGvhV7nllWSo3mS3hVESMy+hA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xiXNLj/CipEH63Vb5cidi8q9X47EF4f3HtJSOH7mfM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4XlCYfYBjI9XA5zOSgTiEBYcZsdwyXL+f5XtH2xUIOc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "k6DfQy7ZYJIkEly2B5hjOZznL4NcgMkllZjJLb7yq7w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZzM6gwWesa3lxbZVZthpPFs2s3GV0RZREE2zOMhBRBo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "US+jeMeeOd7J0wR0efJtq2/18lcO8YFvhT4O3DeaonQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b6iSxiI1FM9SzxuG1bHqGA1i4+3GOi0/SPW00XB4L7o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kn3LsxAVkzIZKK9I6fi0Cctr0yjXOYgaQWMCoj4hLpM=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Aggregate.json new file mode 100644 index 0000000000..b121c72f14 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Aggregate.json @@ -0,0 +1,581 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. Aggregate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "aggregate" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MgwakFvPyBlwqFTbhWUF79URJQWFoJTGotlEVSPPUsQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DyBERpMSD5lEM5Nhpcn4WGgxgn/mkUVJp+PYSLX5jsE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I43iazc0xj1WVbYB/V+uTL/tughN1bBlxh1iypBnNsA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wjOBa/ATMuOywFmuPgC0GF/oeLqu0Z7eK5udzkTPbis=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gRQVwiR+m+0Vg8ZDXqrQQcVnTyobwCXNaA4BCJVXtMc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WUZ6huwx0ZbLb0R00uiC9FOJzsUocUN8qE5+YRenkvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7s79aKEuPgQcS/YPOOVcYNZvHIo7FFsWtFCrnDKXefA=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Correctness.json new file mode 100644 index 0000000000..6b42ecfe82 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Correctness.json @@ -0,0 +1,1648 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Find with $gt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gte": { + "$numberDouble": "0.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "1.0" + } + } + } + }, + "result": [] + } + ] + }, + { + "description": "Find with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$lt": { + "$numberDouble": "1.0" + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$lte": { + "$numberDouble": "1.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$lt": { + "$numberDouble": "0.0" + } + } + } + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "200.0" + } + } + } + }, + "result": { + "errorContains": "must be less than the range max" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0.0" + }, + "$lt": { + "$numberDouble": "2.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gte": { + "$numberDouble": "0.0" + }, + "$lte": { + "$numberDouble": "200.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$in": [ + { + "$numberDouble": "0.0" + } + ] + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "-1" + } + } + }, + "result": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedDoublePrecision": { + "$numberDouble": "200.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 200, + "encryptedDoublePrecision": { + "$numberDouble": "200.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gte": { + "$numberDouble": "0.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "1.0" + } + } + } + } + ] + }, + "result": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$lt": { + "$numberDouble": "1.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$lte": { + "$numberDouble": "1.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$lt": { + "$numberDouble": "0.0" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "200.0" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be less than the range max" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0.0" + }, + "$lt": { + "$numberDouble": "2.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gte": { + "$numberDouble": "0.0" + }, + "$lte": { + "$numberDouble": "200.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$in": [ + { + "$numberDouble": "0.0" + } + ] + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberInt": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Delete.json new file mode 100644 index 0000000000..a5c397d0be --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Delete.json @@ -0,0 +1,469 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. Delete.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-FindOneAndUpdate.json new file mode 100644 index 0000000000..b6df9463e8 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-FindOneAndUpdate.json @@ -0,0 +1,585 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. FindOneAndUpdate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + }, + "update": { + "$set": { + "encryptedDoublePrecision": { + "$numberDouble": "2" + } + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0OKSXELxPP85SBVwDGf3LtMEQCJ8TTkFUl/+6jlkdb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uEw0lpQtBppR3vqV9j9+NQRSBF1BzZukb8c9IhyWvxc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zVhZ7Q59O087ji49oMJvBIgeir2oqvUpnh4p53GcTow=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dowrzKs+qJhRMZyKDbhjXbuX43FbmUKOaw9I8YlOZDw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ep5B6cska6THLIF7Mn3tn3RvV9EiwLSt0eZM/CLRUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "URNp/YmmDh5wIZUfAzzgPyJeMNiVx9PMsz52DZRujGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wlM4IAQhhKQEzoVqS8b1Ddd50GB95OFb9LnzOwyjCP4=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-InsertFind.json new file mode 100644 index 0000000000..1cea25545b --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-InsertFind.json @@ -0,0 +1,572 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. Insert and Find.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MgwakFvPyBlwqFTbhWUF79URJQWFoJTGotlEVSPPUsQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DyBERpMSD5lEM5Nhpcn4WGgxgn/mkUVJp+PYSLX5jsE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I43iazc0xj1WVbYB/V+uTL/tughN1bBlxh1iypBnNsA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wjOBa/ATMuOywFmuPgC0GF/oeLqu0Z7eK5udzkTPbis=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gRQVwiR+m+0Vg8ZDXqrQQcVnTyobwCXNaA4BCJVXtMc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WUZ6huwx0ZbLb0R00uiC9FOJzsUocUN8qE5+YRenkvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7s79aKEuPgQcS/YPOOVcYNZvHIo7FFsWtFCrnDKXefA=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Update.json new file mode 100644 index 0000000000..7703c9057d --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Update.json @@ -0,0 +1,589 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. Update.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + }, + "update": { + "$set": { + "encryptedDoublePrecision": { + "$numberDouble": "2" + } + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command_name": "update", + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + "$db": "default" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0OKSXELxPP85SBVwDGf3LtMEQCJ8TTkFUl/+6jlkdb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uEw0lpQtBppR3vqV9j9+NQRSBF1BzZukb8c9IhyWvxc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zVhZ7Q59O087ji49oMJvBIgeir2oqvUpnh4p53GcTow=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dowrzKs+qJhRMZyKDbhjXbuX43FbmUKOaw9I8YlOZDw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ep5B6cska6THLIF7Mn3tn3RvV9EiwLSt0eZM/CLRUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "URNp/YmmDh5wIZUfAzzgPyJeMNiVx9PMsz52DZRujGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wlM4IAQhhKQEzoVqS8b1Ddd50GB95OFb9LnzOwyjCP4=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Aggregate.json new file mode 100644 index 0000000000..9c2536264d --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Aggregate.json @@ -0,0 +1,485 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Int. Aggregate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "aggregate" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Correctness.json new file mode 100644 index 0000000000..58ccf3efc8 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Correctness.json @@ -0,0 +1,1642 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Find with $gt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "1" + } + } + } + }, + "result": [] + } + ] + }, + { + "description": "Find with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$lt": { + "$numberInt": "1" + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$lte": { + "$numberInt": "1" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$lt": { + "$numberInt": "0" + } + } + } + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "200" + } + } + } + }, + "result": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + }, + "$lt": { + "$numberInt": "2" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gte": { + "$numberInt": "0" + }, + "$lte": { + "$numberInt": "200" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$in": [ + { + "$numberInt": "0" + } + ] + } + } + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "-1" + } + } + }, + "result": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedInt": { + "$numberInt": "200" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 200, + "encryptedInt": { + "$numberInt": "200" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gte": { + "$numberInt": "0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$numberInt": "1" + } + } + } + } + ] + }, + "result": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$lt": { + "$numberInt": "1" + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$lte": { + "$numberInt": "1" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$lt": { + "$numberInt": "0" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$numberInt": "200" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + }, + "$lt": { + "$numberInt": "2" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$numberInt": "0" + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$numberInt": "1" + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gte": { + "$numberInt": "0" + }, + "$lte": { + "$numberInt": "200" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$in": [ + { + "$numberInt": "0" + } + ] + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberDouble": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gte": { + "$numberDouble": "0" + } + } + } + }, + "result": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Delete.json new file mode 100644 index 0000000000..b20b2750bb --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Delete.json @@ -0,0 +1,415 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Int. Delete.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-FindOneAndUpdate.json new file mode 100644 index 0000000000..f9c189ace9 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-FindOneAndUpdate.json @@ -0,0 +1,489 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Int. FindOneAndUpdate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + }, + "update": { + "$set": { + "encryptedInt": { + "$numberInt": "2" + } + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedInt": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-InsertFind.json new file mode 100644 index 0000000000..874d4760c8 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-InsertFind.json @@ -0,0 +1,476 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Int. Insert and Find.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Update.json new file mode 100644 index 0000000000..c2b62b4d1c --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Update.json @@ -0,0 +1,493 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Int. Update.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + }, + "update": { + "$set": { + "encryptedInt": { + "$numberInt": "2" + } + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command_name": "update", + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedInt": { + "$$type": "binData" + } + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + }, + "$db": "default" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Aggregate.json new file mode 100644 index 0000000000..afc0f97be1 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Aggregate.json @@ -0,0 +1,485 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Long. Aggregate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "aggregate" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Correctness.json new file mode 100644 index 0000000000..cda941de8a --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Correctness.json @@ -0,0 +1,1642 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Find with $gt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gte": { + "$numberLong": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "1" + } + } + } + }, + "result": [] + } + ] + }, + { + "description": "Find with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$lt": { + "$numberLong": "1" + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$lte": { + "$numberLong": "1" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$lt": { + "$numberLong": "0" + } + } + } + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "200" + } + } + } + }, + "result": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + }, + "$lt": { + "$numberLong": "2" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "result": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gte": { + "$numberLong": "0" + }, + "$lte": { + "$numberLong": "200" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$in": [ + { + "$numberLong": "0" + } + ] + } + } + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "-1" + } + } + }, + "result": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedLong": { + "$numberLong": "200" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 200, + "encryptedLong": { + "$numberLong": "200" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gte": { + "$numberLong": "0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$numberLong": "1" + } + } + } + } + ] + }, + "result": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$lt": { + "$numberLong": "1" + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$lte": { + "$numberLong": "1" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$lt": { + "$numberLong": "0" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$numberLong": "200" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + }, + "$lt": { + "$numberLong": "2" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$numberLong": "0" + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$numberLong": "1" + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gte": { + "$numberLong": "0" + }, + "$lte": { + "$numberLong": "200" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$in": [ + { + "$numberLong": "0" + } + ] + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberDouble": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gte": { + "$numberDouble": "0" + } + } + } + }, + "result": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Delete.json new file mode 100644 index 0000000000..ad344e21b4 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Delete.json @@ -0,0 +1,415 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Long. Delete.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-FindOneAndUpdate.json new file mode 100644 index 0000000000..d447200468 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-FindOneAndUpdate.json @@ -0,0 +1,489 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Long. FindOneAndUpdate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + }, + "update": { + "$set": { + "encryptedLong": { + "$numberLong": "2" + } + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedLong": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-InsertFind.json new file mode 100644 index 0000000000..4eb837f28b --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-InsertFind.json @@ -0,0 +1,476 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Long. Insert and Find.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Update.json new file mode 100644 index 0000000000..3ba7f17c14 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Update.json @@ -0,0 +1,493 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Long. Update.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + }, + "update": { + "$set": { + "encryptedLong": { + "$numberLong": "2" + } + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command_name": "update", + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedLong": { + "$$type": "binData" + } + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + }, + "$db": "default" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Range-WrongType.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-WrongType.json new file mode 100644 index 0000000000..e5e9ddc821 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-WrongType.json @@ -0,0 +1,160 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Wrong type: Insert Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberDouble": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gte": { + "$numberDouble": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Update.json new file mode 100644 index 0000000000..14104e2cd8 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Update.json @@ -0,0 +1,571 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Update can query an FLE2 indexed field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + }, + "update": { + "$set": { + "foo": "bar" + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "update": "default", + "updates": [ + { + "q": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "foo": "bar" + } + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "update" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "foo": "bar", + "__safeContent__": [ + { + "$binary": { + "base64": "ThpoKfQ8AkOzkFfNC1+9PF0pY2nIzfXvRdxQgjkNbBw=", + "subType": "00" + } + } + ] + } + ] + } + } + }, + { + "description": "Update can modify an FLE2 indexed field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + }, + "update": { + "$set": { + "encryptedIndexed": "value456" + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "encryptedIndexed": "value456" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "update": "default", + "updates": [ + { + "q": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedIndexed": { + "$$type": "binData" + } + } + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "update" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": { + "$eq": 1 + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rhe7/w8Ob8Unl44rGr/moScx6m5VODQnscDhF4Nkn6g=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-validatorAndPartialFieldExpression.json b/test/client-side-encryption/spec/legacy/fle2v2-validatorAndPartialFieldExpression.json new file mode 100644 index 0000000000..4adf6fc07d --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-validatorAndPartialFieldExpression.json @@ -0,0 +1,504 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "tests": [ + { + "description": "create with a validator on an unencrypted field is OK", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "validator": { + "unencrypted_string": "foo" + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + } + ] + }, + { + "description": "create with a validator on an encrypted field is an error", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "validator": { + "encryptedIndexed": "foo" + } + }, + "result": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + }, + { + "description": "collMod with a validator on an unencrypted field is OK", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "collMod": "encryptedCollection", + "validator": { + "unencrypted_string": "foo" + } + } + } + } + ] + }, + { + "description": "collMod with a validator on an encrypted field is an error", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "collMod": "encryptedCollection", + "validator": { + "encryptedIndexed": "foo" + } + } + }, + "result": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + }, + { + "description": "createIndexes with a partialFilterExpression on an unencrypted field is OK", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "name", + "key": { + "name": 1 + }, + "partialFilterExpression": { + "unencrypted_string": "foo" + } + } + ] + } + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "name" + } + } + ] + }, + { + "description": "createIndexes with a partialFilterExpression on an encrypted field is an error", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "name", + "key": { + "name": 1 + }, + "partialFilterExpression": { + "encryptedIndexed": "foo" + } + } + ] + } + }, + "result": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/gcpKMS.json b/test/client-side-encryption/spec/legacy/gcpKMS.json new file mode 100644 index 0000000000..c2c08b8a23 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/gcpKMS.json @@ -0,0 +1,226 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "json_schema": { + "properties": { + "encrypted_string_aws": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_azure": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZURE+AAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_gcp": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCP+AAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_local": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_kmip": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "dBHpr8aITfeBQ15grpbLpQ==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "GCP+AAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "CiQAIgLj0WyktnB4dfYHo5SLZ41K4ASQrjJUaSzl5vvVH0G12G0SiQEAjlV8XPlbnHDEDFbdTO4QIe8ER2/172U1ouLazG0ysDtFFIlSvWX5ZnZUrRMmp/R2aJkzLXEt/zf8Mn4Lfm+itnjgo5R9K4pmPNvvPKNZX5C16lrPT+aA+rd+zXFSmlMg3i5jnxvTdLHhg3G7Q/Uv1ZIJskKt95bzLoe0tUVzRWMYXLIEcohnQg==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1601574333107" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1601574333107" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyAltNames": [ + "altname", + "gcp_altname" + ] + } + ], + "tests": [ + { + "description": "Insert a document with auto encryption using GCP KMS provider", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "gcp": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string_gcp": "string0" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "GCP+AAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault" + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string_gcp": { + "$binary": { + "base64": "ARgj/gAAAAAAAAAAAAAAAAACwFd+Y5Ojw45GUXNvbcIpN9YkRdoHDHkR4kssdn0tIMKlDQOLFkWFY9X07IRlXsxPD8DcTiKnl6XINK28vhcGlg==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string_gcp": { + "$binary": { + "base64": "ARgj/gAAAAAAAAAAAAAAAAACwFd+Y5Ojw45GUXNvbcIpN9YkRdoHDHkR4kssdn0tIMKlDQOLFkWFY9X07IRlXsxPD8DcTiKnl6XINK28vhcGlg==", + "subType": "06" + } + } + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/getMore.json b/test/client-side-encryption/spec/legacy/getMore.json similarity index 95% rename from test/client-side-encryption/spec/getMore.json rename to test/client-side-encryption/spec/legacy/getMore.json index 637f69d509..ee99bf7537 100644 --- a/test/client-side-encryption/spec/getMore.json +++ b/test/client-side-encryption/spec/legacy/getMore.json @@ -179,18 +179,6 @@ "command_name": "find" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "admin" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -216,7 +204,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/insert.json b/test/client-side-encryption/spec/legacy/insert.json similarity index 93% rename from test/client-side-encryption/spec/insert.json rename to test/client-side-encryption/spec/legacy/insert.json index beb98c5eb0..cf2910fd7a 100644 --- a/test/client-side-encryption/spec/insert.json +++ b/test/client-side-encryption/spec/legacy/insert.json @@ -131,18 +131,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "admin" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -168,7 +156,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } @@ -258,18 +246,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "admin" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -295,7 +271,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/keyAltName.json b/test/client-side-encryption/spec/legacy/keyAltName.json similarity index 94% rename from test/client-side-encryption/spec/keyAltName.json rename to test/client-side-encryption/spec/legacy/keyAltName.json index 7088d0b0be..7f71b9dbeb 100644 --- a/test/client-side-encryption/spec/keyAltName.json +++ b/test/client-side-encryption/spec/legacy/keyAltName.json @@ -131,18 +131,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "admin" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -163,7 +151,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/legacy/kmipKMS.json b/test/client-side-encryption/spec/legacy/kmipKMS.json new file mode 100644 index 0000000000..5749d21ab8 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/kmipKMS.json @@ -0,0 +1,223 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "json_schema": { + "properties": { + "encrypted_string_aws": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_azure": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZURE+AAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_gcp": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCP+AAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_local": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_kmip": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "dBHpr8aITfeBQ15grpbLpQ==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "dBHpr8aITfeBQ15grpbLpQ==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "eUYDyB0HuWb+lQgUwO+6qJQyTTDTY2gp9FbemL7ZFo0pvr0x6rm6Ff9OVUTGH6HyMKipaeHdiIJU1dzsLwvqKvi7Beh+U4iaIWX/K0oEg1GOsJc0+Z/in8gNHbGUYLmycHViM3LES3kdt7FdFSUl5rEBHrM71yoNEXImz17QJWMGOuT4x6yoi2pvnaRJwfrI4DjpmnnTrDMac92jgZehbg==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1634220190041" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1634220190041" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "kmip", + "keyId": "1" + }, + "keyAltNames": [ + "altname", + "kmip_altname" + ] + } + ], + "tests": [ + { + "description": "Insert a document with auto encryption using KMIP KMS provider", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "kmip": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string_kmip": "string0" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "dBHpr8aITfeBQ15grpbLpQ==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault" + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string_kmip": { + "$binary": { + "base64": "AXQR6a/GiE33gUNeYK6Wy6UCKCwtKFIsL8eKObDVxvqGupJNUk7kXswHhB7G5j/C1D+6no+Asra0KgSU43bTL3ooIBLVyIzbV5CDJYqzAsa4WQ==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string_kmip": { + "$binary": { + "base64": "AXQR6a/GiE33gUNeYK6Wy6UCKCwtKFIsL8eKObDVxvqGupJNUk7kXswHhB7G5j/C1D+6no+Asra0KgSU43bTL3ooIBLVyIzbV5CDJYqzAsa4WQ==", + "subType": "06" + } + } + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/localKMS.json b/test/client-side-encryption/spec/legacy/localKMS.json similarity index 93% rename from test/client-side-encryption/spec/localKMS.json rename to test/client-side-encryption/spec/legacy/localKMS.json index febc1ccfc8..67c4ba1308 100644 --- a/test/client-side-encryption/spec/localKMS.json +++ b/test/client-side-encryption/spec/legacy/localKMS.json @@ -114,18 +114,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "admin" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -151,7 +139,7 @@ } ] }, - "$db": "admin" + "$db": "keyvault" }, "command_name": "find" } diff --git a/test/client-side-encryption/spec/localSchema.json b/test/client-side-encryption/spec/legacy/localSchema.json similarity index 95% rename from test/client-side-encryption/spec/localSchema.json rename to test/client-side-encryption/spec/legacy/localSchema.json index f939dbc123..4698520f6f 100644 --- a/test/client-side-encryption/spec/localSchema.json +++ b/test/client-side-encryption/spec/legacy/localSchema.json @@ -136,18 +136,6 @@ } ], "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "admin" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -173,7 +161,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/malformedCiphertext.json b/test/client-side-encryption/spec/legacy/malformedCiphertext.json similarity index 100% rename from test/client-side-encryption/spec/malformedCiphertext.json rename to test/client-side-encryption/spec/legacy/malformedCiphertext.json diff --git a/test/client-side-encryption/spec/maxWireVersion.json b/test/client-side-encryption/spec/legacy/maxWireVersion.json similarity index 93% rename from test/client-side-encryption/spec/maxWireVersion.json rename to test/client-side-encryption/spec/legacy/maxWireVersion.json index 144786290d..f04f58dffd 100644 --- a/test/client-side-encryption/spec/maxWireVersion.json +++ b/test/client-side-encryption/spec/legacy/maxWireVersion.json @@ -1,7 +1,7 @@ { "runOn": [ { - "maxServerVersion": "4.0" + "maxServerVersion": "4.0.99" } ], "database_name": "default", @@ -50,6 +50,9 @@ "autoEncryptOpts": { "kmsProviders": { "aws": {} + }, + "extraOptions": { + "mongocryptdBypassSpawn": true } } }, diff --git a/test/client-side-encryption/spec/missingKey.json b/test/client-side-encryption/spec/legacy/missingKey.json similarity index 92% rename from test/client-side-encryption/spec/missingKey.json rename to test/client-side-encryption/spec/legacy/missingKey.json index a7237f1792..275147bb72 100644 --- a/test/client-side-encryption/spec/missingKey.json +++ b/test/client-side-encryption/spec/legacy/missingKey.json @@ -102,7 +102,7 @@ "description": "Insert with encryption on a missing key", "clientOptions": { "autoEncryptOpts": { - "keyVaultNamespace": "admin.different", + "keyVaultNamespace": "keyvault.different", "kmsProviders": { "aws": {} } @@ -140,18 +140,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "different" - }, - "$db": "admin" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -177,7 +165,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/legacy/noSchema.json b/test/client-side-encryption/spec/legacy/noSchema.json new file mode 100644 index 0000000000..095434f886 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/noSchema.json @@ -0,0 +1,67 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "unencrypted", + "tests": [ + { + "description": "Insert on an unencrypted collection", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "unencrypted" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "insert": "unencrypted", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true + }, + "command_name": "insert" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/replaceOne.json b/test/client-side-encryption/spec/legacy/replaceOne.json similarity index 95% rename from test/client-side-encryption/spec/replaceOne.json rename to test/client-side-encryption/spec/legacy/replaceOne.json index 1287fdea14..9757686819 100644 --- a/test/client-side-encryption/spec/replaceOne.json +++ b/test/client-side-encryption/spec/legacy/replaceOne.json @@ -148,18 +148,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "admin" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -185,7 +173,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/legacy/timeoutMS.json b/test/client-side-encryption/spec/legacy/timeoutMS.json new file mode 100644 index 0000000000..443aa0aa23 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/timeoutMS.json @@ -0,0 +1,200 @@ +{ + "runOn": [ + { + "minServerVersion": "4.4" + } + ], + "database_name": "cse-timeouts-db", + "collection_name": "cse-timeouts-coll", + "data": [], + "json_schema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "timeoutMS applied to listCollections to get collection schema", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + }, + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + }, + "timeoutMS": 50 + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + } + }, + "result": { + "isTimeoutError": true + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "cse-timeouts-coll" + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + }, + "command_name": "listCollections" + } + } + ] + }, + { + "description": "remaining timeoutMS applied to find to get keyvault data", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 3 + }, + "data": { + "failCommands": [ + "listCollections", + "find" + ], + "blockConnection": true, + "blockTimeMS": 20 + } + }, + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + }, + "timeoutMS": 50 + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + } + }, + "result": { + "isTimeoutError": true + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/types.json b/test/client-side-encryption/spec/legacy/types.json similarity index 92% rename from test/client-side-encryption/spec/types.json rename to test/client-side-encryption/spec/legacy/types.json index 08928381e1..a6c6507e90 100644 --- a/test/client-side-encryption/spec/types.json +++ b/test/client-side-encryption/spec/legacy/types.json @@ -103,18 +103,6 @@ } ], "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "admin" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -140,7 +128,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } @@ -254,18 +242,6 @@ } ], "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "admin" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -291,7 +267,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } @@ -405,18 +381,6 @@ } ], "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "admin" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -442,7 +406,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } @@ -540,7 +504,7 @@ } }, "result": { - "errorContains": "Cannot use deterministic encryption for element of type: double" + "errorContains": "element of type: double" } } ] @@ -587,7 +551,7 @@ } }, "result": { - "errorContains": "Cannot use deterministic encryption for element of type: decimal" + "errorContains": "element of type: decimal" } } ] @@ -656,18 +620,6 @@ } ], "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "admin" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -693,7 +645,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } @@ -807,18 +759,6 @@ } ], "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "admin" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -844,7 +784,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } @@ -943,7 +883,7 @@ } }, "result": { - "errorContains": "Cannot use deterministic encryption for element of type: javascriptWithScope" + "errorContains": "element of type: javascriptWithScope" } } ] @@ -988,7 +928,7 @@ } }, "result": { - "errorContains": "Cannot use deterministic encryption for element of type: object" + "errorContains": "element of type: object" } } ] @@ -1057,18 +997,6 @@ } ], "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "admin" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -1094,7 +1022,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } @@ -1214,18 +1142,6 @@ } ], "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "admin" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -1251,7 +1167,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } @@ -1369,18 +1285,6 @@ } ], "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "admin" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -1406,7 +1310,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } @@ -1643,7 +1547,7 @@ } }, "result": { - "errorContains": "Cannot use deterministic encryption for element of type: array" + "errorContains": "element of type: array" } } ] @@ -1688,7 +1592,7 @@ } }, "result": { - "errorContains": "Cannot use deterministic encryption for element of type: bool" + "errorContains": "element of type: bool" } } ] diff --git a/test/client-side-encryption/spec/unsupportedCommand.json b/test/client-side-encryption/spec/legacy/unsupportedCommand.json similarity index 100% rename from test/client-side-encryption/spec/unsupportedCommand.json rename to test/client-side-encryption/spec/legacy/unsupportedCommand.json diff --git a/test/client-side-encryption/spec/updateMany.json b/test/client-side-encryption/spec/legacy/updateMany.json similarity index 96% rename from test/client-side-encryption/spec/updateMany.json rename to test/client-side-encryption/spec/legacy/updateMany.json index 43c6dd717c..823909044b 100644 --- a/test/client-side-encryption/spec/updateMany.json +++ b/test/client-side-encryption/spec/legacy/updateMany.json @@ -164,18 +164,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "admin" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -201,7 +189,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/updateOne.json b/test/client-side-encryption/spec/legacy/updateOne.json similarity index 97% rename from test/client-side-encryption/spec/updateOne.json rename to test/client-side-encryption/spec/legacy/updateOne.json index d6a6de79e2..23bada964f 100644 --- a/test/client-side-encryption/spec/updateOne.json +++ b/test/client-side-encryption/spec/legacy/updateOne.json @@ -150,18 +150,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "admin" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -187,7 +175,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/legacy/validatorAndPartialFieldExpression.json b/test/client-side-encryption/spec/legacy/validatorAndPartialFieldExpression.json new file mode 100644 index 0000000000..e07137ce15 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/validatorAndPartialFieldExpression.json @@ -0,0 +1,642 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "tests": [ + { + "description": "create with a validator on an unencrypted field is OK", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "schemaMap": { + "default.encryptedCollection": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "validator": { + "unencrypted_string": "foo" + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + } + ] + }, + { + "description": "create with a validator on an encrypted field is an error", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "schemaMap": { + "default.encryptedCollection": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "validator": { + "encrypted_string": "foo" + } + }, + "result": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + }, + { + "description": "collMod with a validator on an unencrypted field is OK", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "schemaMap": { + "default.encryptedCollection": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "collMod": "encryptedCollection", + "validator": { + "unencrypted_string": "foo" + } + } + } + } + ] + }, + { + "description": "collMod with a validator on an encrypted field is an error", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "schemaMap": { + "default.encryptedCollection": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "collMod": "encryptedCollection", + "validator": { + "encrypted_string": "foo" + } + } + }, + "result": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + }, + { + "description": "createIndexes with a partialFilterExpression on an unencrypted field is OK", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "schemaMap": { + "default.encryptedCollection": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "name", + "key": { + "name": 1 + }, + "partialFilterExpression": { + "unencrypted_string": "foo" + } + } + ] + } + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "name" + } + } + ] + }, + { + "description": "createIndexes with a partialFilterExpression on an encrypted field is an error", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "schemaMap": { + "default.encryptedCollection": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "name", + "key": { + "name": 1 + }, + "partialFilterExpression": { + "encrypted_string": "foo" + } + } + ] + } + }, + "result": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/addKeyAltName.json b/test/client-side-encryption/spec/unified/addKeyAltName.json new file mode 100644 index 0000000000..f70bc572a8 --- /dev/null +++ b/test/client-side-encryption/spec/unified/addKeyAltName.json @@ -0,0 +1,609 @@ +{ + "description": "addKeyAltName", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": { + "$$placeholder": 1 + } + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "add keyAltName to non-existent data key", + "operations": [ + { + "name": "addKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "AAAjYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "new_key_alt_name" + }, + "expectResult": { + "$$unsetOrMatches": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "AAAjYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": { + "$addToSet": { + "keyAltNames": "new_key_alt_name" + } + }, + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "datakeys", + "databaseName": "keyvault", + "documents": [ + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ] + }, + { + "description": "add new keyAltName to data key with no keyAltNames", + "operations": [ + { + "name": "addKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "local_key" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {}, + "projection": { + "_id": 0, + "keyAltNames": 1 + } + }, + "expectResult": [ + { + "keyAltNames": [ + "local_key" + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": { + "$addToSet": { + "keyAltNames": "local_key" + } + }, + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "add existing keyAltName to existing data key", + "operations": [ + { + "name": "addKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "local_key" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + }, + { + "name": "addKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "local_key" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {}, + "projection": { + "_id": 0, + "keyAltNames": 1 + } + }, + "expectResult": [ + { + "keyAltNames": [ + "local_key" + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": { + "$addToSet": { + "keyAltNames": "local_key" + } + }, + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": { + "$addToSet": { + "keyAltNames": "local_key" + } + }, + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "add new keyAltName to data key with keyAltNames", + "operations": [ + { + "name": "addKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "local_key" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + }, + { + "name": "addKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "another_name" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + }, + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 0, + "keyAltNames": "$keyAltNames" + } + }, + { + "$unwind": "$keyAltNames" + }, + { + "$sort": { + "keyAltNames": 1 + } + } + ] + }, + "expectResult": [ + { + "keyAltNames": "another_name" + }, + { + "keyAltNames": "local_key" + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": { + "$addToSet": { + "keyAltNames": "local_key" + } + }, + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": { + "$addToSet": { + "keyAltNames": "another_name" + } + }, + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/createDataKey-kms_providers-invalid.json b/test/client-side-encryption/spec/unified/createDataKey-kms_providers-invalid.json new file mode 100644 index 0000000000..2344a61a95 --- /dev/null +++ b/test/client-side-encryption/spec/unified/createDataKey-kms_providers-invalid.json @@ -0,0 +1,119 @@ +{ + "description": "createDataKey-kms_providers-invalid", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + } + } + } + ], + "tests": [ + { + "description": "create data key without required master key fields", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "aws", + "opts": { + "masterKey": {} + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "create data key with invalid master key field", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local", + "opts": { + "masterKey": { + "invalid": 1 + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "create data key with invalid master key", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "aws", + "opts": { + "masterKey": { + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "invalid" + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/createDataKey.json b/test/client-side-encryption/spec/unified/createDataKey.json new file mode 100644 index 0000000000..110c726f9a --- /dev/null +++ b/test/client-side-encryption/spec/unified/createDataKey.json @@ -0,0 +1,711 @@ +{ + "description": "createDataKey", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + }, + "azure": { + "tenantId": { + "$$placeholder": 1 + }, + "clientId": { + "$$placeholder": 1 + }, + "clientSecret": { + "$$placeholder": 1 + } + }, + "gcp": { + "email": { + "$$placeholder": 1 + }, + "privateKey": { + "$$placeholder": 1 + } + }, + "kmip": { + "endpoint": { + "$$placeholder": 1 + } + }, + "local": { + "key": { + "$$placeholder": 1 + } + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [] + } + ], + "tests": [ + { + "description": "create data key with AWS KMS provider", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "aws", + "opts": { + "masterKey": { + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with Azure KMS provider", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "azure", + "opts": { + "masterKey": { + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + } + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with GCP KMS provider", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "gcp", + "opts": { + "masterKey": { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + } + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with KMIP KMS provider", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "kmip" + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "kmip", + "keyId": { + "$$type": "string" + } + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with local KMS provider", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local" + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "local" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with no keyAltName", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local", + "opts": { + "keyAltNames": [] + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyAltNames": { + "$$exists": false + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$type": "int" + }, + "masterKey": { + "$$type": "object" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with single keyAltName", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local", + "opts": { + "keyAltNames": [ + "local_key" + ] + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$type": "int" + }, + "masterKey": { + "$$type": "object" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with multiple keyAltNames", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local", + "opts": { + "keyAltNames": [ + "abc", + "def" + ] + } + }, + "expectResult": { + "$$type": "binData" + } + }, + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 0, + "keyAltNames": 1 + } + }, + { + "$unwind": "$keyAltNames" + }, + { + "$sort": { + "keyAltNames": 1 + } + } + ] + }, + "expectResult": [ + { + "keyAltNames": "abc" + }, + { + "keyAltNames": "def" + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyAltNames": { + "$$type": "array" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$type": "int" + }, + "masterKey": { + "$$type": "object" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "create datakey with custom key material", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local", + "opts": { + "keyMaterial": { + "$binary": { + "base64": "a2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFs", + "subType": "00" + } + } + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$type": "int" + }, + "masterKey": { + "$$type": "object" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with invalid custom key material (too short)", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local", + "opts": { + "keyMaterial": { + "$binary": { + "base64": "a2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFs", + "subType": "00" + } + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/deleteKey.json b/test/client-side-encryption/spec/unified/deleteKey.json new file mode 100644 index 0000000000..3a10fb082f --- /dev/null +++ b/test/client-side-encryption/spec/unified/deleteKey.json @@ -0,0 +1,557 @@ +{ + "description": "deleteKey", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": { + "$$placeholder": 1 + } + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + }, + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "delete non-existent data key", + "operations": [ + { + "name": "deleteKey", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "AAAzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "expectResult": { + "deletedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "delete": "datakeys", + "deletes": [ + { + "q": { + "_id": { + "$binary": { + "base64": "AAAzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "limit": 1 + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "datakeys", + "databaseName": "keyvault", + "documents": [ + { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + }, + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ] + }, + { + "description": "delete existing AWS data key", + "operations": [ + { + "name": "deleteKey", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "delete": "datakeys", + "deletes": [ + { + "q": { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "limit": 1 + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "datakeys", + "databaseName": "keyvault", + "documents": [ + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ] + }, + { + "description": "delete existing local data key", + "operations": [ + { + "name": "deleteKey", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "delete": "datakeys", + "deletes": [ + { + "q": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "limit": 1 + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "datakeys", + "databaseName": "keyvault", + "documents": [ + { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + } + ] + } + ] + }, + { + "description": "delete existing data key twice", + "operations": [ + { + "name": "deleteKey", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "expectResult": { + "deletedCount": 1 + } + }, + { + "name": "deleteKey", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "expectResult": { + "deletedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "delete": "datakeys", + "deletes": [ + { + "q": { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "limit": 1 + } + ], + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "delete": "datakeys", + "deletes": [ + { + "q": { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "limit": 1 + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "datakeys", + "databaseName": "keyvault", + "documents": [ + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/getKey.json b/test/client-side-encryption/spec/unified/getKey.json new file mode 100644 index 0000000000..2ea3fe7358 --- /dev/null +++ b/test/client-side-encryption/spec/unified/getKey.json @@ -0,0 +1,319 @@ +{ + "description": "getKey", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": { + "$$placeholder": 1 + } + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + }, + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "get non-existent data key", + "operations": [ + { + "name": "getKey", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "AAAzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "expectResult": { + "$$unsetOrMatches": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "_id": { + "$binary": { + "base64": "AAAzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "get existing AWS data key", + "operations": [ + { + "name": "getKey", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "get existing local data key", + "operations": [ + { + "name": "getKey", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/getKeyByAltName.json b/test/client-side-encryption/spec/unified/getKeyByAltName.json new file mode 100644 index 0000000000..2505abc16e --- /dev/null +++ b/test/client-side-encryption/spec/unified/getKeyByAltName.json @@ -0,0 +1,289 @@ +{ + "description": "getKeyByAltName", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": { + "$$placeholder": 1 + } + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + }, + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "get non-existent data key", + "operations": [ + { + "name": "getKeyByAltName", + "object": "clientEncryption0", + "arguments": { + "keyAltName": "does_not_exist" + }, + "expectResult": { + "$$unsetOrMatches": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": "does_not_exist" + }, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "get existing AWS data key", + "operations": [ + { + "name": "getKeyByAltName", + "object": "clientEncryption0", + "arguments": { + "keyAltName": "aws_key" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": "aws_key" + }, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "get existing local data key", + "operations": [ + { + "name": "getKeyByAltName", + "object": "clientEncryption0", + "arguments": { + "keyAltName": "local_key" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": "local_key" + }, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/getKeys.json b/test/client-side-encryption/spec/unified/getKeys.json new file mode 100644 index 0000000000..d944712357 --- /dev/null +++ b/test/client-side-encryption/spec/unified/getKeys.json @@ -0,0 +1,260 @@ +{ + "description": "getKeys", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": { + "$$placeholder": 1 + } + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [] + } + ], + "tests": [ + { + "description": "getKeys with zero key documents", + "operations": [ + { + "name": "getKeys", + "object": "clientEncryption0", + "expectResult": [] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": {}, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "getKeys with single key documents", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local", + "opts": { + "keyAltNames": [ + "abc" + ] + } + }, + "expectResult": { + "$$type": "binData" + } + }, + { + "name": "getKeys", + "object": "clientEncryption0", + "expectResult": [ + { + "_id": { + "$$type": "binData" + }, + "keyAltNames": [ + "abc" + ], + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$type": "int" + }, + "masterKey": { + "$$type": "object" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": {}, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "getKeys with many key documents", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local" + }, + "expectResult": { + "$$type": "binData" + } + }, + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local" + }, + "expectResult": { + "$$type": "binData" + } + }, + { + "name": "getKeys", + "object": "clientEncryption0", + "expectResult": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$type": "int" + }, + "masterKey": { + "$$type": "object" + } + }, + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$type": "int" + }, + "masterKey": { + "$$type": "object" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": {}, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/removeKeyAltName.json b/test/client-side-encryption/spec/unified/removeKeyAltName.json new file mode 100644 index 0000000000..1b7077077a --- /dev/null +++ b/test/client-side-encryption/spec/unified/removeKeyAltName.json @@ -0,0 +1,672 @@ +{ + "description": "removeKeyAltName", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": { + "$$placeholder": 1 + } + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "alternate_name", + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "remove keyAltName from non-existent data key", + "operations": [ + { + "name": "removeKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "AAAjYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "does_not_exist" + }, + "expectResult": { + "$$unsetOrMatches": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "AAAjYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": [ + { + "$set": { + "keyAltNames": { + "$cond": [ + { + "$eq": [ + "$keyAltNames", + [ + "does_not_exist" + ] + ] + }, + "$$REMOVE", + { + "$filter": { + "input": "$keyAltNames", + "cond": { + "$ne": [ + "$$this", + "does_not_exist" + ] + } + } + } + ] + } + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "datakeys", + "databaseName": "keyvault", + "documents": [ + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "alternate_name", + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ] + }, + { + "description": "remove non-existent keyAltName from existing data key", + "operations": [ + { + "name": "removeKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "does_not_exist" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "alternate_name", + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": [ + { + "$set": { + "keyAltNames": { + "$cond": [ + { + "$eq": [ + "$keyAltNames", + [ + "does_not_exist" + ] + ] + }, + "$$REMOVE", + { + "$filter": { + "input": "$keyAltNames", + "cond": { + "$ne": [ + "$$this", + "does_not_exist" + ] + } + } + } + ] + } + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "datakeys", + "databaseName": "keyvault", + "documents": [ + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "alternate_name", + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ] + }, + { + "description": "remove an existing keyAltName from an existing data key", + "operations": [ + { + "name": "removeKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "alternate_name" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "alternate_name", + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {}, + "projection": { + "_id": 0, + "keyAltNames": 1 + } + }, + "expectResult": [ + { + "keyAltNames": [ + "local_key" + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": [ + { + "$set": { + "keyAltNames": { + "$cond": [ + { + "$eq": [ + "$keyAltNames", + [ + "alternate_name" + ] + ] + }, + "$$REMOVE", + { + "$filter": { + "input": "$keyAltNames", + "cond": { + "$ne": [ + "$$this", + "alternate_name" + ] + } + } + } + ] + } + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "remove the last keyAltName from an existing data key", + "operations": [ + { + "name": "removeKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "alternate_name" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "alternate_name", + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + }, + { + "name": "removeKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "local_key" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": [ + { + "$set": { + "keyAltNames": { + "$cond": [ + { + "$eq": [ + "$keyAltNames", + [ + "alternate_name" + ] + ] + }, + "$$REMOVE", + { + "$filter": { + "input": "$keyAltNames", + "cond": { + "$ne": [ + "$$this", + "alternate_name" + ] + } + } + } + ] + } + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": [ + { + "$set": { + "keyAltNames": { + "$cond": [ + { + "$eq": [ + "$keyAltNames", + [ + "local_key" + ] + ] + }, + "$$REMOVE", + { + "$filter": { + "input": "$keyAltNames", + "cond": { + "$ne": [ + "$$this", + "local_key" + ] + } + } + } + ] + } + } + } + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/rewrapManyDataKey-decrypt_failure.json b/test/client-side-encryption/spec/unified/rewrapManyDataKey-decrypt_failure.json new file mode 100644 index 0000000000..4c7d4e8048 --- /dev/null +++ b/test/client-side-encryption/spec/unified/rewrapManyDataKey-decrypt_failure.json @@ -0,0 +1,162 @@ +{ + "description": "rewrapManyDataKey-decrypt_failure", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + }, + "azure": { + "tenantId": { + "$$placeholder": 1 + }, + "clientId": { + "$$placeholder": 1 + }, + "clientSecret": { + "$$placeholder": 1 + } + }, + "gcp": { + "email": { + "$$placeholder": 1 + }, + "privateKey": { + "$$placeholder": 1 + } + }, + "kmip": { + "endpoint": { + "$$placeholder": 1 + } + }, + "local": { + "key": { + "$$placeholder": 1 + } + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-2:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-2" + } + } + ] + } + ], + "tests": [ + { + "description": "rewrap data key that fails during decryption due to invalid masterKey", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": {}, + "opts": { + "provider": "local" + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": {}, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/rewrapManyDataKey-encrypt_failure.json b/test/client-side-encryption/spec/unified/rewrapManyDataKey-encrypt_failure.json new file mode 100644 index 0000000000..cd2d20c255 --- /dev/null +++ b/test/client-side-encryption/spec/unified/rewrapManyDataKey-encrypt_failure.json @@ -0,0 +1,250 @@ +{ + "description": "rewrapManyDataKey-encrypt_failure", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + }, + "azure": { + "tenantId": { + "$$placeholder": 1 + }, + "clientId": { + "$$placeholder": 1 + }, + "clientSecret": { + "$$placeholder": 1 + } + }, + "gcp": { + "email": { + "$$placeholder": 1 + }, + "privateKey": { + "$$placeholder": 1 + } + }, + "kmip": { + "endpoint": { + "$$placeholder": 1 + } + }, + "local": { + "key": { + "$$placeholder": 1 + } + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "rewrap with invalid masterKey for AWS KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": {}, + "opts": { + "provider": "aws", + "masterKey": { + "key": "arn:aws:kms:us-east-2:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-2" + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": {}, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap with invalid masterKey for Azure KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": {}, + "opts": { + "provider": "azure", + "masterKey": { + "keyVaultEndpoint": "invalid-vault-csfle.vault.azure.net", + "keyName": "invalid-name-csfle" + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": {}, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap with invalid masterKey for GCP KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": {}, + "opts": { + "provider": "gcp", + "masterKey": { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "invalid-ring-csfle", + "keyName": "invalid-name-csfle" + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": {}, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/rewrapManyDataKey.json b/test/client-side-encryption/spec/unified/rewrapManyDataKey.json new file mode 100644 index 0000000000..6b3c9664a9 --- /dev/null +++ b/test/client-side-encryption/spec/unified/rewrapManyDataKey.json @@ -0,0 +1,1493 @@ +{ + "description": "rewrapManyDataKey", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + }, + "azure": { + "tenantId": { + "$$placeholder": 1 + }, + "clientId": { + "$$placeholder": 1 + }, + "clientSecret": { + "$$placeholder": 1 + } + }, + "gcp": { + "email": { + "$$placeholder": 1 + }, + "privateKey": { + "$$placeholder": 1 + } + }, + "kmip": { + "endpoint": { + "$$placeholder": 1 + } + }, + "local": { + "key": { + "$$placeholder": 1 + } + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + }, + { + "_id": { + "$binary": { + "base64": "YXp1cmVhenVyZWF6dXJlYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "azure_key" + ], + "keyMaterial": { + "$binary": { + "base64": "pr01l7qDygUkFE/0peFwpnNlv3iIy8zrQK38Q9i12UCN2jwZHDmfyx8wokiIKMb9kAleeY+vnt3Cf1MKu9kcDmI+KxbNDd+V3ytAAGzOVLDJr77CiWjF9f8ntkXRHrAY9WwnVDANYkDwXlyU0Y2GQFTiW65jiQhUtYLYH63Tk48SsJuQvnWw1Q+PzY8ga+QeVec8wbcThwtm+r2IHsCFnc72Gv73qq7weISw+O4mN08z3wOp5FOS2ZM3MK7tBGmPdBcktW7F8ODGsOQ1FU53OrWUnyX2aTi2ftFFFMWVHqQo7EYuBZHru8RRODNKMyQk0BFfKovAeTAVRv9WH9QU7g==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + } + }, + { + "_id": { + "$binary": { + "base64": "Z2NwZ2NwZ2NwZ2NwZ2NwZw==", + "subType": "04" + } + }, + "keyAltNames": [ + "gcp_key" + ], + "keyMaterial": { + "$binary": { + "base64": "CiQAIgLj0USbQtof/pYRLQO96yg/JEtZbD1UxKueaC37yzT5tTkSiQEAhClWB5ZCSgzHgxv8raWjNB4r7e8ePGdsmSuYTYmLC5oHHS/BdQisConzNKFaobEQZHamTCjyhy5NotKF8MWoo+dyfQApwI29+vAGyrUIQCXzKwRnNdNQ+lb3vJtS5bqvLTvSxKHpVca2kqyC9nhonV+u4qru5Q2bAqUgVFc8fL4pBuvlowZFTQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + } + }, + { + "_id": { + "$binary": { + "base64": "a21pcGttaXBrbWlwa21pcA==", + "subType": "04" + } + }, + "keyAltNames": [ + "kmip_key" + ], + "keyMaterial": { + "$binary": { + "base64": "CklVctHzke4mcytd0TxGqvepkdkQN8NUF4+jV7aZQITAKdz6WjdDpq3lMt9nSzWGG2vAEfvRb3mFEVjV57qqGqxjq2751gmiMRHXz0btStbIK3mQ5xbY9kdye4tsixlCryEwQONr96gwlwKKI9Nubl9/8+uRF6tgYjje7Q7OjauEf1SrJwKcoQ3WwnjZmEqAug0kImCpJ/irhdqPzivRiA==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "kmip", + "keyId": "1" + } + }, + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "no keys to rewrap due to no filter matches", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": "no_matching_keys" + }, + "opts": { + "provider": "local" + } + }, + "expectResult": { + "bulkWriteResult": { + "$$exists": false + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": "no_matching_keys" + }, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap with new AWS KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "aws_key" + } + }, + "opts": { + "provider": "aws", + "masterKey": { + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + } + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "aws_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap with new Azure KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "azure_key" + } + }, + "opts": { + "provider": "azure", + "masterKey": { + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + } + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "azure_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap with new GCP KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "gcp_key" + } + }, + "opts": { + "provider": "gcp", + "masterKey": { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + } + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "gcp_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap with new KMIP KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "kmip_key" + } + }, + "opts": { + "provider": "kmip" + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "kmip_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip", + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip", + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip", + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip", + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap with new local KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "local_key" + } + }, + "opts": { + "provider": "local" + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "local_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap with current KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": {} + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 5, + "modifiedCount": 5, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {}, + "projection": { + "masterKey": 1 + }, + "sort": { + "keyAltNames": 1 + } + }, + "expectResult": [ + { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + }, + { + "_id": { + "$binary": { + "base64": "YXp1cmVhenVyZWF6dXJlYQ==", + "subType": "04" + } + }, + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + } + }, + { + "_id": { + "$binary": { + "base64": "Z2NwZ2NwZ2NwZ2NwZ2NwZw==", + "subType": "04" + } + }, + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + } + }, + { + "_id": { + "$binary": { + "base64": "a21pcGttaXBrbWlwa21pcA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "kmip", + "keyId": "1" + } + }, + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": {}, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "$$type": "object" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "$$type": "object" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "$$type": "object" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "$$type": "object" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "$$type": "object" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/cmap/connection-must-have-id.json b/test/cmap/connection-must-have-id.json index 487a5979d0..f2d6fb95e9 100644 --- a/test/cmap/connection-must-have-id.json +++ b/test/cmap/connection-must-have-id.json @@ -3,6 +3,9 @@ "style": "unit", "description": "must have an ID number associated with it", "operations": [ + { + "name": "ready" + }, { "name": "checkOut" }, @@ -12,30 +15,37 @@ ], "events": [ { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { "type": "ConnectionCreated", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { "type": "ConnectionCreated", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 } ], "ignore": [ "ConnectionPoolCreated", + "ConnectionPoolReady", "ConnectionPoolClosed", "ConnectionReady" ] diff --git a/test/cmap/connection-must-order-ids.json b/test/cmap/connection-must-order-ids.json index dda515c1a9..b7c2751dd7 100644 --- a/test/cmap/connection-must-order-ids.json +++ b/test/cmap/connection-must-order-ids.json @@ -3,6 +3,9 @@ "style": "unit", "description": "must have IDs assigned in order of creation", "operations": [ + { + "name": "ready" + }, { "name": "checkOut" }, @@ -12,30 +15,37 @@ ], "events": [ { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { "type": "ConnectionCreated", - "connectionId": 1 + "connectionId": 1, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 1 + "connectionId": 1, + "address": 42 }, { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { "type": "ConnectionCreated", - "connectionId": 2 + "connectionId": 2, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 2 + "connectionId": 2, + "address": 42 } ], "ignore": [ "ConnectionPoolCreated", + "ConnectionPoolReady", "ConnectionPoolClosed", "ConnectionReady" ] diff --git a/test/cmap/pool-checkin-destroy-closed.json b/test/cmap/pool-checkin-destroy-closed.json index 3b6f1d2484..55d0c03752 100644 --- a/test/cmap/pool-checkin-destroy-closed.json +++ b/test/cmap/pool-checkin-destroy-closed.json @@ -3,6 +3,9 @@ "style": "unit", "description": "must destroy checked in connection if pool has been closed", "operations": [ + { + "name": "ready" + }, { "name": "checkOut", "label": "conn" @@ -18,7 +21,8 @@ "events": [ { "type": "ConnectionCheckedOut", - "connectionId": 1 + "connectionId": 1, + "address": 42 }, { "type": "ConnectionPoolClosed", @@ -26,16 +30,19 @@ }, { "type": "ConnectionCheckedIn", - "connectionId": 1 + "connectionId": 1, + "address": 42 }, { "type": "ConnectionClosed", "connectionId": 1, - "reason": "poolClosed" + "reason": "poolClosed", + "address": 42 } ], "ignore": [ "ConnectionPoolCreated", + "ConnectionPoolReady", "ConnectionCreated", "ConnectionReady", "ConnectionCheckOutStarted" diff --git a/test/cmap/pool-checkin-destroy-stale.json b/test/cmap/pool-checkin-destroy-stale.json index 7faa44d33c..6ffb8f53d1 100644 --- a/test/cmap/pool-checkin-destroy-stale.json +++ b/test/cmap/pool-checkin-destroy-stale.json @@ -3,6 +3,9 @@ "style": "unit", "description": "must destroy checked in connection if it is stale", "operations": [ + { + "name": "ready" + }, { "name": "checkOut", "label": "conn" @@ -18,7 +21,8 @@ "events": [ { "type": "ConnectionCheckedOut", - "connectionId": 1 + "connectionId": 1, + "address": 42 }, { "type": "ConnectionPoolCleared", @@ -26,16 +30,19 @@ }, { "type": "ConnectionCheckedIn", - "connectionId": 1 + "connectionId": 1, + "address": 42 }, { "type": "ConnectionClosed", "connectionId": 1, - "reason": "stale" + "reason": "stale", + "address": 42 } ], "ignore": [ "ConnectionPoolCreated", + "ConnectionPoolReady", "ConnectionCreated", "ConnectionReady", "ConnectionCheckOutStarted" diff --git a/test/cmap/pool-checkin-make-available.json b/test/cmap/pool-checkin-make-available.json index 838194fe8e..41c522ae67 100644 --- a/test/cmap/pool-checkin-make-available.json +++ b/test/cmap/pool-checkin-make-available.json @@ -3,6 +3,9 @@ "style": "unit", "description": "must make valid checked in connection available", "operations": [ + { + "name": "ready" + }, { "name": "checkOut", "label": "conn" @@ -18,19 +21,23 @@ "events": [ { "type": "ConnectionCheckedOut", - "connectionId": 1 + "connectionId": 1, + "address": 42 }, { "type": "ConnectionCheckedIn", - "connectionId": 1 + "connectionId": 1, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 1 + "connectionId": 1, + "address": 42 } ], "ignore": [ "ConnectionPoolCreated", + "ConnectionPoolReady", "ConnectionCreated", "ConnectionReady", "ConnectionCheckOutStarted" diff --git a/test/cmap/pool-checkin.json b/test/cmap/pool-checkin.json index 5e93c207a9..3b40cec6f4 100644 --- a/test/cmap/pool-checkin.json +++ b/test/cmap/pool-checkin.json @@ -3,6 +3,9 @@ "style": "unit", "description": "must have a method of allowing the driver to check in a connection", "operations": [ + { + "name": "ready" + }, { "name": "checkOut", "label": "conn" @@ -15,11 +18,13 @@ "events": [ { "type": "ConnectionCheckedIn", - "connectionId": 42 + "connectionId": 42, + "address": 42 } ], "ignore": [ "ConnectionPoolCreated", + "ConnectionPoolReady", "ConnectionCreated", "ConnectionReady", "ConnectionClosed", diff --git a/test/cmap/pool-checkout-connection.json b/test/cmap/pool-checkout-connection.json index e6e108ce58..d89b342605 100644 --- a/test/cmap/pool-checkout-connection.json +++ b/test/cmap/pool-checkout-connection.json @@ -3,22 +3,36 @@ "style": "unit", "description": "must be able to check out a connection", "operations": [ + { + "name": "ready" + }, { "name": "checkOut" } ], "events": [ { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCreated", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionReady", + "connectionId": 1, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 1 + "connectionId": 1, + "address": 42 } ], "ignore": [ - "ConnectionPoolCreated", - "ConnectionCreated", - "ConnectionReady" + "ConnectionPoolReady", + "ConnectionPoolCreated" ] } diff --git a/test/cmap/pool-checkout-custom-maxConnecting-is-enforced.json b/test/cmap/pool-checkout-custom-maxConnecting-is-enforced.json new file mode 100644 index 0000000000..6620f82fd9 --- /dev/null +++ b/test/cmap/pool-checkout-custom-maxConnecting-is-enforced.json @@ -0,0 +1,81 @@ +{ + "version": 1, + "style": "integration", + "description": "custom maxConnecting is enforced", + "runOn": [ + { + "minServerVersion": "4.4.0" + } + ], + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "isMaster", + "hello" + ], + "closeConnection": false, + "blockConnection": true, + "blockTimeMS": 500 + } + }, + "poolOptions": { + "maxConnecting": 1, + "maxPoolSize": 2, + "waitQueueTimeoutMS": 5000 + }, + "operations": [ + { + "name": "ready" + }, + { + "name": "start", + "target": "thread1" + }, + { + "name": "start", + "target": "thread2" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "waitForEvent", + "event": "ConnectionCreated", + "count": 1 + }, + { + "name": "checkOut", + "thread": "thread2" + }, + { + "name": "waitForEvent", + "event": "ConnectionReady", + "count": 2 + } + ], + "events": [ + { + "type": "ConnectionCreated" + }, + { + "type": "ConnectionReady" + }, + { + "type": "ConnectionCreated" + }, + { + "type": "ConnectionReady" + } + ], + "ignore": [ + "ConnectionCheckOutStarted", + "ConnectionCheckedIn", + "ConnectionCheckedOut", + "ConnectionClosed", + "ConnectionPoolCreated", + "ConnectionPoolReady" + ] +} diff --git a/test/cmap/pool-checkout-error-closed.json b/test/cmap/pool-checkout-error-closed.json index 3823c23a78..ee2926e1c0 100644 --- a/test/cmap/pool-checkout-error-closed.json +++ b/test/cmap/pool-checkout-error-closed.json @@ -3,6 +3,9 @@ "style": "unit", "description": "must throw error if checkOut is called on a closed pool", "operations": [ + { + "name": "ready" + }, { "name": "checkOut", "label": "conn1" @@ -57,6 +60,7 @@ } ], "ignore": [ + "ConnectionPoolReady", "ConnectionCreated", "ConnectionReady", "ConnectionClosed" diff --git a/test/cmap/pool-checkout-maxConnecting-is-enforced.json b/test/cmap/pool-checkout-maxConnecting-is-enforced.json new file mode 100644 index 0000000000..732478bf7e --- /dev/null +++ b/test/cmap/pool-checkout-maxConnecting-is-enforced.json @@ -0,0 +1,108 @@ +{ + "version": 1, + "style": "integration", + "description": "maxConnecting is enforced", + "runOn": [ + { + "minServerVersion": "4.4.0" + } + ], + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 50 + }, + "data": { + "failCommands": [ + "isMaster", + "hello" + ], + "closeConnection": false, + "blockConnection": true, + "blockTimeMS": 750 + } + }, + "poolOptions": { + "maxPoolSize": 10, + "waitQueueTimeoutMS": 5000 + }, + "operations": [ + { + "name": "ready" + }, + { + "name": "start", + "target": "thread1" + }, + { + "name": "start", + "target": "thread2" + }, + { + "name": "start", + "target": "thread3" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "waitForEvent", + "event": "ConnectionCreated", + "count": 1 + }, + { + "name": "wait", + "ms": 100 + }, + { + "name": "checkOut", + "thread": "thread2" + }, + { + "name": "checkOut", + "thread": "thread3" + }, + { + "name": "waitForEvent", + "event": "ConnectionReady", + "count": 3 + } + ], + "events": [ + { + "type": "ConnectionCreated", + "address": 42, + "connectionId": 1 + }, + { + "type": "ConnectionCreated", + "address": 42 + }, + { + "type": "ConnectionReady", + "address": 42, + "connectionId": 1 + }, + { + "type": "ConnectionCreated", + "address": 42 + }, + { + "type": "ConnectionReady", + "address": 42 + }, + { + "type": "ConnectionReady", + "address": 42 + } + ], + "ignore": [ + "ConnectionCheckOutStarted", + "ConnectionCheckedIn", + "ConnectionCheckedOut", + "ConnectionClosed", + "ConnectionPoolCreated", + "ConnectionPoolReady" + ] +} diff --git a/test/cmap/pool-checkout-maxConnecting-timeout.json b/test/cmap/pool-checkout-maxConnecting-timeout.json new file mode 100644 index 0000000000..84ddf8fdba --- /dev/null +++ b/test/cmap/pool-checkout-maxConnecting-timeout.json @@ -0,0 +1,103 @@ +{ + "version": 1, + "style": "integration", + "description": "waiting on maxConnecting is limited by WaitQueueTimeoutMS", + "runOn": [ + { + "minServerVersion": "4.4.0" + } + ], + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 50 + }, + "data": { + "failCommands": [ + "isMaster", + "hello" + ], + "closeConnection": false, + "blockConnection": true, + "blockTimeMS": 750 + } + }, + "poolOptions": { + "maxPoolSize": 10, + "waitQueueTimeoutMS": 50 + }, + "operations": [ + { + "name": "ready" + }, + { + "name": "start", + "target": "thread1" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "start", + "target": "thread2" + }, + { + "name": "checkOut", + "thread": "thread2" + }, + { + "name": "waitForEvent", + "event": "ConnectionCreated", + "count": 2 + }, + { + "name": "start", + "target": "thread3" + }, + { + "name": "checkOut", + "thread": "thread3" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutFailed", + "count": 1 + }, + { + "name": "waitForThread", + "target": "thread3" + } + ], + "error": { + "type": "WaitQueueTimeoutError", + "message": "Timed out while checking out a connection from connection pool" + }, + "events": [ + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckOutFailed", + "reason": "timeout", + "address": 42 + } + ], + "ignore": [ + "ConnectionCreated", + "ConnectionCheckedIn", + "ConnectionCheckedOut", + "ConnectionClosed", + "ConnectionPoolCreated", + "ConnectionPoolReady" + ] +} diff --git a/test/cmap/pool-checkout-minPoolSize-connection-maxConnecting.json b/test/cmap/pool-checkout-minPoolSize-connection-maxConnecting.json new file mode 100644 index 0000000000..3b0d43e877 --- /dev/null +++ b/test/cmap/pool-checkout-minPoolSize-connection-maxConnecting.json @@ -0,0 +1,88 @@ +{ + "version": 1, + "style": "integration", + "description": "threads blocked by maxConnecting check out minPoolSize connections", + "runOn": [ + { + "minServerVersion": "4.4.0" + } + ], + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "isMaster", + "hello" + ], + "closeConnection": false, + "blockConnection": true, + "blockTimeMS": 500 + } + }, + "poolOptions": { + "minPoolSize": 2, + "maxPoolSize": 3, + "waitQueueTimeoutMS": 5000 + }, + "operations": [ + { + "name": "ready" + }, + { + "name": "start", + "target": "thread1" + }, + { + "name": "start", + "target": "thread2" + }, + { + "name": "wait", + "ms": 200 + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "waitForEvent", + "event": "ConnectionCreated", + "count": 2 + }, + { + "name": "checkOut", + "thread": "thread2" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckedOut", + "count": 2 + } + ], + "events": [ + { + "type": "ConnectionCreated", + "address": 42 + }, + { + "type": "ConnectionCreated", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolReady", + "ConnectionClosed", + "ConnectionReady", + "ConnectionPoolCreated", + "ConnectionCheckOutStarted" + ] +} diff --git a/test/cmap/pool-checkout-multiple.json b/test/cmap/pool-checkout-multiple.json index f3ecdb9be9..07a4eda629 100644 --- a/test/cmap/pool-checkout-multiple.json +++ b/test/cmap/pool-checkout-multiple.json @@ -3,6 +3,9 @@ "style": "unit", "description": "must be able to check out multiple connections at the same time", "operations": [ + { + "name": "ready" + }, { "name": "start", "target": "thread1" @@ -43,19 +46,23 @@ "events": [ { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 } ], "ignore": [ "ConnectionCreated", + "ConnectionPoolReady", "ConnectionReady", "ConnectionPoolCreated", "ConnectionCheckOutStarted" diff --git a/test/cmap/pool-checkout-no-idle.json b/test/cmap/pool-checkout-no-idle.json index 77ce40deac..0b0fe572ff 100644 --- a/test/cmap/pool-checkout-no-idle.json +++ b/test/cmap/pool-checkout-no-idle.json @@ -3,9 +3,13 @@ "style": "unit", "description": "must destroy and must not check out an idle connection if found while iterating available connections", "poolOptions": { - "maxIdleTimeMS": 10 + "maxIdleTimeMS": 10, + "backgroundThreadIntervalMS": -1 }, "operations": [ + { + "name": "ready" + }, { "name": "checkOut", "label": "conn" @@ -20,6 +24,11 @@ }, { "name": "checkOut" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckedOut", + "count": 2 } ], "events": [ @@ -30,24 +39,29 @@ }, { "type": "ConnectionCheckedOut", - "connectionId": 1 + "connectionId": 1, + "address": 42 }, { "type": "ConnectionCheckedIn", - "connectionId": 1 + "connectionId": 1, + "address": 42 }, { "type": "ConnectionClosed", "connectionId": 1, - "reason": "idle" + "reason": "idle", + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 2 + "connectionId": 2, + "address": 42 } ], "ignore": [ "ConnectionReady", + "ConnectionPoolReady", "ConnectionCreated", "ConnectionCheckOutStarted" ] diff --git a/test/cmap/pool-checkout-no-stale.json b/test/cmap/pool-checkout-no-stale.json index e5ebedfbe5..ec76f4e9c8 100644 --- a/test/cmap/pool-checkout-no-stale.json +++ b/test/cmap/pool-checkout-no-stale.json @@ -2,7 +2,13 @@ "version": 1, "style": "unit", "description": "must destroy and must not check out a stale connection if found while iterating available connections", + "poolOptions": { + "backgroundThreadIntervalMS": -1 + }, "operations": [ + { + "name": "ready" + }, { "name": "checkOut", "label": "conn" @@ -14,8 +20,16 @@ { "name": "clear" }, + { + "name": "ready" + }, { "name": "checkOut" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckedOut", + "count": 2 } ], "events": [ @@ -26,11 +40,13 @@ }, { "type": "ConnectionCheckedOut", - "connectionId": 1 + "connectionId": 1, + "address": 42 }, { "type": "ConnectionCheckedIn", - "connectionId": 1 + "connectionId": 1, + "address": 42 }, { "type": "ConnectionPoolCleared", @@ -39,15 +55,18 @@ { "type": "ConnectionClosed", "connectionId": 1, - "reason": "stale" + "reason": "stale", + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 2 + "connectionId": 2, + "address": 42 } ], "ignore": [ "ConnectionReady", + "ConnectionPoolReady", "ConnectionCreated", "ConnectionCheckOutStarted" ] diff --git a/test/cmap/pool-checkout-returned-connection-maxConnecting.json b/test/cmap/pool-checkout-returned-connection-maxConnecting.json new file mode 100644 index 0000000000..965d56f6d8 --- /dev/null +++ b/test/cmap/pool-checkout-returned-connection-maxConnecting.json @@ -0,0 +1,124 @@ +{ + "version": 1, + "style": "integration", + "description": "threads blocked by maxConnecting check out returned connections", + "runOn": [ + { + "minServerVersion": "4.4.0" + } + ], + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 50 + }, + "data": { + "failCommands": [ + "isMaster", + "hello" + ], + "closeConnection": false, + "blockConnection": true, + "blockTimeMS": 750 + } + }, + "poolOptions": { + "maxPoolSize": 10, + "waitQueueTimeoutMS": 5000 + }, + "operations": [ + { + "name": "ready" + }, + { + "name": "checkOut", + "label": "conn0" + }, + { + "name": "start", + "target": "thread1" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "start", + "target": "thread2" + }, + { + "name": "checkOut", + "thread": "thread2" + }, + { + "name": "start", + "target": "thread3" + }, + { + "name": "checkOut", + "thread": "thread3" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutStarted", + "count": 4 + }, + { + "name": "wait", + "ms": 100 + }, + { + "name": "checkIn", + "connection": "conn0" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckedOut", + "count": 4 + } + ], + "events": [ + { + "type": "ConnectionCreated", + "address": 42, + "connectionId": 1 + }, + { + "type": "ConnectionCheckedOut", + "address": 42 + }, + { + "type": "ConnectionCreated", + "address": 42 + }, + { + "type": "ConnectionCreated", + "address": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolReady", + "ConnectionClosed", + "ConnectionReady", + "ConnectionPoolCreated", + "ConnectionCheckOutStarted" + ] +} diff --git a/test/cmap/pool-clear-clears-waitqueue.json b/test/cmap/pool-clear-clears-waitqueue.json new file mode 100644 index 0000000000..d4aef928c7 --- /dev/null +++ b/test/cmap/pool-clear-clears-waitqueue.json @@ -0,0 +1,101 @@ +{ + "version": 1, + "style": "unit", + "description": "clearing pool clears the WaitQueue", + "poolOptions": { + "maxPoolSize": 1, + "waitQueueTimeoutMS": 30000 + }, + "operations": [ + { + "name": "ready" + }, + { + "name": "checkOut" + }, + { + "name": "start", + "target": "thread1" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "start", + "target": "thread2" + }, + { + "name": "checkOut", + "thread": "thread2" + }, + { + "name": "start", + "target": "thread3" + }, + { + "name": "checkOut", + "thread": "thread3" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutStarted", + "count": 4 + }, + { + "name": "clear" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutFailed", + "count": 3, + "timeout": 1000 + } + ], + "events": [ + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckOutFailed", + "reason": "connectionError", + "address": 42 + }, + { + "type": "ConnectionCheckOutFailed", + "reason": "connectionError", + "address": 42 + }, + { + "type": "ConnectionCheckOutFailed", + "reason": "connectionError", + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolReady", + "ConnectionPoolCleared", + "ConnectionPoolCreated", + "ConnectionCreated", + "ConnectionReady", + "ConnectionCheckedIn", + "ConnectionClosed" + ] +} diff --git a/test/cmap/pool-clear-min-size.json b/test/cmap/pool-clear-min-size.json new file mode 100644 index 0000000000..239df871b8 --- /dev/null +++ b/test/cmap/pool-clear-min-size.json @@ -0,0 +1,68 @@ +{ + "version": 1, + "style": "unit", + "description": "pool clear halts background minPoolSize establishments", + "poolOptions": { + "minPoolSize": 1, + "backgroundThreadIntervalMS": 50 + }, + "operations": [ + { + "name": "ready" + }, + { + "name": "waitForEvent", + "event": "ConnectionReady", + "count": 1 + }, + { + "name": "clear" + }, + { + "name": "wait", + "ms": 200 + }, + { + "name": "ready" + }, + { + "name": "waitForEvent", + "event": "ConnectionReady", + "count": 2 + } + ], + "events": [ + { + "type": "ConnectionPoolReady", + "address": 42 + }, + { + "type": "ConnectionCreated", + "address": 42 + }, + { + "type": "ConnectionReady", + "address": 42 + }, + { + "type": "ConnectionPoolCleared", + "address": 42 + }, + { + "type": "ConnectionPoolReady", + "address": 42 + }, + { + "type": "ConnectionCreated", + "address": 42 + }, + { + "type": "ConnectionReady", + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated", + "ConnectionClosed" + ] +} diff --git a/test/cmap/pool-clear-paused.json b/test/cmap/pool-clear-paused.json new file mode 100644 index 0000000000..847f08d849 --- /dev/null +++ b/test/cmap/pool-clear-paused.json @@ -0,0 +1,32 @@ +{ + "version": 1, + "style": "unit", + "description": "clearing a paused pool emits no events", + "operations": [ + { + "name": "clear" + }, + { + "name": "ready" + }, + { + "name": "clear" + }, + { + "name": "clear" + } + ], + "events": [ + { + "type": "ConnectionPoolReady", + "address": 42 + }, + { + "type": "ConnectionPoolCleared", + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated" + ] +} diff --git a/test/cmap/pool-clear-ready.json b/test/cmap/pool-clear-ready.json new file mode 100644 index 0000000000..800c3545ad --- /dev/null +++ b/test/cmap/pool-clear-ready.json @@ -0,0 +1,69 @@ +{ + "version": 1, + "style": "unit", + "description": "after clear, cannot check out connections until pool ready", + "operations": [ + { + "name": "ready" + }, + { + "name": "checkOut" + }, + { + "name": "clear" + }, + { + "name": "start", + "target": "thread1" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutFailed", + "count": 1 + }, + { + "name": "ready" + }, + { + "name": "checkOut" + } + ], + "events": [ + { + "type": "ConnectionPoolReady", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "address": 42, + "connectionId": 42 + }, + { + "type": "ConnectionPoolCleared", + "address": 42 + }, + { + "type": "ConnectionCheckOutFailed", + "address": 42, + "reason": "connectionError" + }, + { + "type": "ConnectionPoolReady", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated", + "ConnectionReady", + "ConnectionCheckOutStarted", + "ConnectionCreated" + ] +} diff --git a/test/cmap/pool-close-destroy-conns.json b/test/cmap/pool-close-destroy-conns.json index 2bc50419b4..a3d58a2136 100644 --- a/test/cmap/pool-close-destroy-conns.json +++ b/test/cmap/pool-close-destroy-conns.json @@ -3,6 +3,9 @@ "style": "unit", "description": "When a pool is closed, it MUST first destroy all available connections in that pool", "operations": [ + { + "name": "ready" + }, { "name": "checkOut" }, @@ -24,12 +27,14 @@ "events": [ { "type": "ConnectionCheckedIn", - "connectionId": 2 + "connectionId": 2, + "address": 42 }, { "type": "ConnectionClosed", "connectionId": 2, - "reason": "poolClosed" + "reason": "poolClosed", + "address": 42 }, { "type": "ConnectionPoolClosed", @@ -38,6 +43,7 @@ ], "ignore": [ "ConnectionCreated", + "ConnectionPoolReady", "ConnectionReady", "ConnectionPoolCreated", "ConnectionCheckOutStarted", diff --git a/test/cmap/pool-create-max-size.json b/test/cmap/pool-create-max-size.json index 2ba7bdf62b..e3a1fa8eda 100644 --- a/test/cmap/pool-create-max-size.json +++ b/test/cmap/pool-create-max-size.json @@ -6,6 +6,9 @@ "maxPoolSize": 3 }, "operations": [ + { + "name": "ready" + }, { "name": "checkOut", "label": "conn1" @@ -53,62 +56,78 @@ "options": 42 }, { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { "type": "ConnectionCreated", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { "type": "ConnectionCreated", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { "type": "ConnectionCreated", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCheckedIn", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { "type": "ConnectionCheckedIn", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 } ], "ignore": [ - "ConnectionReady" + "ConnectionReady", + "ConnectionPoolReady" ] } diff --git a/test/cmap/pool-create-min-size-error.json b/test/cmap/pool-create-min-size-error.json new file mode 100644 index 0000000000..1c744b850c --- /dev/null +++ b/test/cmap/pool-create-min-size-error.json @@ -0,0 +1,66 @@ +{ + "version": 1, + "style": "integration", + "description": "error during minPoolSize population clears pool", + "runOn": [ + { + "minServerVersion": "4.9.0" + } + ], + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 50 + }, + "data": { + "failCommands": [ + "isMaster", + "hello" + ], + "closeConnection": true, + "appName": "poolCreateMinSizeErrorTest" + } + }, + "poolOptions": { + "minPoolSize": 1, + "backgroundThreadIntervalMS": 50, + "appName": "poolCreateMinSizeErrorTest" + }, + "operations": [ + { + "name": "ready" + }, + { + "name": "waitForEvent", + "event": "ConnectionPoolCleared", + "count": 1 + }, + { + "name": "wait", + "ms": 200 + } + ], + "events": [ + { + "type": "ConnectionPoolReady", + "address": 42 + }, + { + "type": "ConnectionCreated", + "address": 42 + }, + { + "type": "ConnectionClosed", + "address": 42, + "connectionId": 42, + "reason": "error" + }, + { + "type": "ConnectionPoolCleared", + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated" + ] +} diff --git a/test/cmap/pool-create-min-size.json b/test/cmap/pool-create-min-size.json index 470988043f..43118f7841 100644 --- a/test/cmap/pool-create-min-size.json +++ b/test/cmap/pool-create-min-size.json @@ -6,11 +6,23 @@ "minPoolSize": 3 }, "operations": [ + { + "name": "wait", + "ms": 200 + }, + { + "name": "ready" + }, { "name": "waitForEvent", "event": "ConnectionCreated", "count": 3 }, + { + "name": "waitForEvent", + "event": "ConnectionReady", + "count": 3 + }, { "name": "checkOut" } @@ -21,21 +33,29 @@ "address": 42, "options": 42 }, + { + "type": "ConnectionPoolReady", + "address": 42 + }, { "type": "ConnectionCreated", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCreated", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCreated", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 } ], "ignore": [ diff --git a/test/cmap/pool-ready-ready.json b/test/cmap/pool-ready-ready.json new file mode 100644 index 0000000000..25dfa9c97c --- /dev/null +++ b/test/cmap/pool-ready-ready.json @@ -0,0 +1,39 @@ +{ + "version": 1, + "style": "unit", + "description": "readying a ready pool emits no events", + "operations": [ + { + "name": "ready" + }, + { + "name": "ready" + }, + { + "name": "ready" + }, + { + "name": "clear" + }, + { + "name": "ready" + } + ], + "events": [ + { + "type": "ConnectionPoolReady", + "address": 42 + }, + { + "type": "ConnectionPoolCleared", + "address": 42 + }, + { + "type": "ConnectionPoolReady", + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated" + ] +} diff --git a/test/cmap/pool-ready.json b/test/cmap/pool-ready.json new file mode 100644 index 0000000000..29ce7326cf --- /dev/null +++ b/test/cmap/pool-ready.json @@ -0,0 +1,57 @@ +{ + "version": 1, + "style": "unit", + "description": "pool starts as cleared and becomes ready", + "operations": [ + { + "name": "start", + "target": "thread1" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutFailed", + "count": 1 + }, + { + "name": "ready" + }, + { + "name": "checkOut" + } + ], + "events": [ + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckOutFailed", + "reason": "connectionError", + "address": 42 + }, + { + "type": "ConnectionPoolReady", + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCreated", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated", + "ConnectionReady" + ] +} diff --git a/test/cmap/wait-queue-timeout.json b/test/cmap/wait-queue-timeout.json index 90ec2f62d9..fbcbdfb04d 100644 --- a/test/cmap/wait-queue-timeout.json +++ b/test/cmap/wait-queue-timeout.json @@ -4,9 +4,12 @@ "description": "must aggressively timeout threads enqueued longer than waitQueueTimeoutMS", "poolOptions": { "maxPoolSize": 1, - "waitQueueTimeoutMS": 20 + "waitQueueTimeoutMS": 50 }, "operations": [ + { + "name": "ready" + }, { "name": "checkOut", "label": "conn0" @@ -39,28 +42,34 @@ }, "events": [ { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { "type": "ConnectionCheckOutFailed", - "reason": "timeout" + "reason": "timeout", + "address": 42 }, { "type": "ConnectionCheckedIn", - "connectionId": 42 + "connectionId": 42, + "address": 42 } ], "ignore": [ "ConnectionCreated", "ConnectionReady", "ConnectionClosed", - "ConnectionPoolCreated" + "ConnectionPoolCreated", + "ConnectionPoolReady" ] } diff --git a/test/collection_management/clustered-indexes.json b/test/collection_management/clustered-indexes.json new file mode 100644 index 0000000000..9db5ff06d7 --- /dev/null +++ b/test/collection_management/clustered-indexes.json @@ -0,0 +1,291 @@ +{ + "description": "clustered-indexes", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "5.3", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "ci-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "ci-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "createCollection with clusteredIndex", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "test" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "test", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true, + "name": "test index" + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "ci-tests", + "collectionName": "test" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test" + }, + "databaseName": "ci-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true, + "name": "test index" + } + }, + "databaseName": "ci-tests" + } + } + ] + } + ] + }, + { + "description": "listCollections includes clusteredIndex", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "test" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "test", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true, + "name": "test index" + } + } + }, + { + "name": "listCollections", + "object": "database0", + "arguments": { + "filter": { + "name": { + "$eq": "test" + } + } + }, + "expectResult": [ + { + "name": "test", + "options": { + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true, + "name": "test index", + "v": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test" + }, + "databaseName": "ci-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true, + "name": "test index" + } + }, + "databaseName": "ci-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": { + "$eq": "test" + } + } + }, + "databaseName": "ci-tests" + } + } + ] + } + ] + }, + { + "description": "listIndexes returns the index", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "test" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "test", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true, + "name": "test index" + } + } + }, + { + "name": "listIndexes", + "object": "collection0", + "expectResult": [ + { + "key": { + "_id": 1 + }, + "name": "test index", + "clustered": true, + "unique": true, + "v": { + "$$type": [ + "int", + "long" + ] + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test" + }, + "databaseName": "ci-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true, + "name": "test index" + } + }, + "databaseName": "ci-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "test" + }, + "databaseName": "ci-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/collection_management/createCollection-pre_and_post_images.json b/test/collection_management/createCollection-pre_and_post_images.json new file mode 100644 index 0000000000..f488deacd8 --- /dev/null +++ b/test/collection_management/createCollection-pre_and_post_images.json @@ -0,0 +1,92 @@ +{ + "description": "createCollection-pre_and_post_images", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "6.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "papi-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "tests": [ + { + "description": "createCollection with changeStreamPreAndPostImages enabled", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "test" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "test", + "changeStreamPreAndPostImages": { + "enabled": true + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "papi-tests", + "collectionName": "test" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test" + }, + "databaseName": "papi-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "changeStreamPreAndPostImages": { + "enabled": true + } + }, + "databaseName": "papi-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/collection_management/modifyCollection-pre_and_post_images.json b/test/collection_management/modifyCollection-pre_and_post_images.json new file mode 100644 index 0000000000..8026faeb17 --- /dev/null +++ b/test/collection_management/modifyCollection-pre_and_post_images.json @@ -0,0 +1,111 @@ +{ + "description": "modifyCollection-pre_and_post_images", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "6.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "papi-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "tests": [ + { + "description": "modifyCollection to changeStreamPreAndPostImages enabled", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "test" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "test", + "changeStreamPreAndPostImages": { + "enabled": false + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "papi-tests", + "collectionName": "test" + } + }, + { + "name": "modifyCollection", + "object": "database0", + "arguments": { + "collection": "test", + "changeStreamPreAndPostImages": { + "enabled": true + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test" + }, + "databaseName": "papi-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "changeStreamPreAndPostImages": { + "enabled": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": true + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/collection_management/timeseries-collection.json b/test/collection_management/timeseries-collection.json new file mode 100644 index 0000000000..8525056fd1 --- /dev/null +++ b/test/collection_management/timeseries-collection.json @@ -0,0 +1,320 @@ +{ + "description": "timeseries-collection", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "ts-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "ts-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "createCollection with all options", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "test" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "test", + "expireAfterSeconds": 604800, + "timeseries": { + "timeField": "time", + "metaField": "meta", + "granularity": "minutes" + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "ts-tests", + "collectionName": "test" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test" + }, + "databaseName": "ts-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "expireAfterSeconds": 604800, + "timeseries": { + "timeField": "time", + "metaField": "meta", + "granularity": "minutes" + } + }, + "databaseName": "ts-tests" + } + } + ] + } + ] + }, + { + "description": "insertMany with duplicate ids", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "test" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "test", + "expireAfterSeconds": 604800, + "timeseries": { + "timeField": "time", + "metaField": "meta", + "granularity": "minutes" + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "ts-tests", + "collectionName": "test" + } + }, + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "documents": [ + { + "_id": 1, + "time": { + "$date": { + "$numberLong": "1552949630482" + } + } + }, + { + "_id": 1, + "time": { + "$date": { + "$numberLong": "1552949630483" + } + } + } + ] + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {}, + "sort": { + "time": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "time": { + "$date": { + "$numberLong": "1552949630482" + } + } + }, + { + "_id": 1, + "time": { + "$date": { + "$numberLong": "1552949630483" + } + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test" + }, + "databaseName": "ts-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "expireAfterSeconds": 604800, + "timeseries": { + "timeField": "time", + "metaField": "meta", + "granularity": "minutes" + } + }, + "databaseName": "ts-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1, + "time": { + "$date": { + "$numberLong": "1552949630482" + } + } + }, + { + "_id": 1, + "time": { + "$date": { + "$numberLong": "1552949630483" + } + } + } + ] + } + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": {}, + "sort": { + "time": 1 + } + }, + "databaseName": "ts-tests" + } + } + ] + } + ] + }, + { + "description": "createCollection with bucketing options", + "runOnRequirements": [ + { + "minServerVersion": "7.0" + } + ], + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "test" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "test", + "timeseries": { + "timeField": "time", + "bucketMaxSpanSeconds": 3600, + "bucketRoundingSeconds": 3600 + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "ts-tests", + "collectionName": "test" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test" + }, + "databaseName": "ts-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "timeseries": { + "timeField": "time", + "bucketMaxSpanSeconds": 3600, + "bucketRoundingSeconds": 3600 + } + }, + "databaseName": "ts-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/bulkWrite.json b/test/command_monitoring/bulkWrite.json index c5cd5a2399..49c728442e 100644 --- a/test/command_monitoring/bulkWrite.json +++ b/test/command_monitoring/bulkWrite.json @@ -1,110 +1,152 @@ { - "data": [ + "description": "bulkWrite", + "schemaVersion": "1.0", + "createEntities": [ { - "_id": 1, - "x": 11 + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } }, { - "_id": 2, - "x": 22 + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } }, { - "_id": 3, - "x": 33 + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } } ], - "collection_name": "test", - "database_name": "command-monitoring-tests", - "tests": [ + "initialData": [ { - "description": "A successful mixed bulk write", - "operation": { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 4, - "x": 44 - } - } - }, - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 3 - }, - "update": { - "$set": { - "x": 333 - } - } - } - } - ] - } - }, - "expectations": [ + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 4, - "x": 44 - } - ], - "ordered": true - }, - "command_name": "insert", - "database_name": "command-monitoring-tests" - } + "_id": 1, + "x": 11 }, { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1 - }, - "command_name": "insert" - } + "_id": 2, + "x": 22 }, { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "A successful mixed bulk write", + "operations": [ + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 4, + "x": 44 + } + } + }, + { + "updateOne": { + "filter": { "_id": 3 }, - "u": { + "update": { "$set": { "x": 333 } - }, - "upsert": false, - "multi": false + } } - ], - "ordered": true - }, - "command_name": "update", - "database_name": "command-monitoring-tests" + } + ] } - }, + } + ], + "expectEvents": [ { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1 + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 4, + "x": 44 + } + ], + "ordered": true + }, + "commandName": "insert", + "databaseName": "command-monitoring-tests" + } }, - "command_name": "update" - } + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 3 + }, + "u": { + "$set": { + "x": 333 + } + }, + "upsert": { + "$$unsetOrMatches": false + }, + "multi": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true + }, + "commandName": "update", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "update" + } + } + ] } ] } diff --git a/test/command_monitoring/command.json b/test/command_monitoring/command.json index 7e1e347be0..c28af95fed 100644 --- a/test/command_monitoring/command.json +++ b/test/command_monitoring/command.json @@ -1,111 +1,81 @@ { - "data": [ + "description": "command", + "schemaVersion": "1.0", + "createEntities": [ { - "_id": 1, - "x": 11 + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } } ], - "collection_name": "test", - "database_name": "command-monitoring-tests", - "tests": [ + "initialData": [ { - "description": "A successful command", - "operation": { - "name": "count", - "arguments": { - "filter": { - "_id": 1 - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "count": "test", - "query": { - "_id": 1 - } - }, - "command_name": "count", - "database_name": "command-monitoring-tests" - } - }, + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1 - }, - "command_name": "count" - } + "_id": 1, + "x": 11 } ] - }, + } + ], + "tests": [ { - "description": "A failed command event", - "operation": { - "name": "count", - "arguments": { - "filter": { - "$or": true - } - } - }, - "expectations": [ + "description": "A successful command", + "operations": [ { - "command_started_event": { + "name": "runCommand", + "object": "database", + "arguments": { "command": { - "count": "test", - "query": { - "$or": true - } + "ping": 1 }, - "command_name": "count", - "database_name": "command-monitoring-tests" - } - }, - { - "command_failed_event": { - "command_name": "count" - } - } - ] - }, - { - "description": "A successful command with a non-primary read preference", - "operation": { - "name": "count", - "arguments": { - "filter": { - "_id": 1 + "commandName": "ping" } - }, - "read_preference": { - "mode": "primaryPreferred" } - }, - "expectations": [ + ], + "expectEvents": [ { - "command_started_event": { - "command": { - "count": "test", - "query": { - "_id": 1 + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "commandName": "ping", + "databaseName": "command-monitoring-tests" } }, - "command_name": "count", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1 - }, - "command_name": "count" - } + { + "commandSucceededEvent": { + "reply": { + "ok": 1 + }, + "commandName": "ping" + } + } + ] } ] } diff --git a/test/command_monitoring/deleteMany.json b/test/command_monitoring/deleteMany.json index 7cd396806c..78ebad1f98 100644 --- a/test/command_monitoring/deleteMany.json +++ b/test/command_monitoring/deleteMany.json @@ -1,113 +1,160 @@ { - "data": [ + "description": "deleteMany", + "schemaVersion": "1.0", + "createEntities": [ { - "_id": 1, - "x": 11 + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } }, { - "_id": 2, - "x": 22 + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } }, { - "_id": 3, - "x": 33 + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] } ], - "collection_name": "test", - "database_name": "command-monitoring-tests", "tests": [ { - "description": "A successful delete many", - "operation": { - "name": "deleteMany", - "arguments": { - "filter": { - "_id": { - "$gt": 1 + "description": "A successful deleteMany", + "operations": [ + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } } } } - }, - "expectations": [ + ], + "expectEvents": [ { - "command_started_event": { - "command": { - "delete": "test", - "deletes": [ - { - "q": { - "_id": { - "$gt": 1 + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "limit": 0 } - }, - "limit": 0 - } - ], - "ordered": true - }, - "command_name": "delete", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 2 + ], + "ordered": true + }, + "commandName": "delete", + "databaseName": "command-monitoring-tests" + } }, - "command_name": "delete" - } + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 2 + }, + "commandName": "delete" + } + } + ] } ] }, { - "description": "A successful delete many command with write errors", - "operation": { - "name": "deleteMany", - "arguments": { - "filter": { - "_id": { - "$nothing": 1 + "description": "A successful deleteMany with write errors", + "operations": [ + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$unsupported": 1 + } } + }, + "expectError": { + "isClientError": false } } - }, - "expectations": [ + ], + "expectEvents": [ { - "command_started_event": { - "command": { - "delete": "test", - "deletes": [ - { - "q": { - "_id": { - "$nothing": 1 + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": { + "$unsupported": 1 + } + }, + "limit": 0 } - }, - "limit": 0 - } - ], - "ordered": true + ], + "ordered": true + }, + "commandName": "delete", + "databaseName": "command-monitoring-tests" + } }, - "command_name": "delete", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 0, - "writeErrors": [ - { - "index": 0, - "code": 42, - "errmsg": "" - } - ] - }, - "command_name": "delete" - } + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 0, + "writeErrors": { + "$$type": "array" + } + }, + "commandName": "delete" + } + } + ] } ] } diff --git a/test/command_monitoring/deleteOne.json b/test/command_monitoring/deleteOne.json index 0971dfcf2c..2420794fe5 100644 --- a/test/command_monitoring/deleteOne.json +++ b/test/command_monitoring/deleteOne.json @@ -1,113 +1,160 @@ { - "data": [ + "description": "deleteOne", + "schemaVersion": "1.0", + "createEntities": [ { - "_id": 1, - "x": 11 + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } }, { - "_id": 2, - "x": 22 + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } }, { - "_id": 3, - "x": 33 + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] } ], - "collection_name": "test", - "database_name": "command-monitoring-tests", "tests": [ { - "description": "A successful delete one", - "operation": { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": { - "$gt": 1 + "description": "A successful deleteOne", + "operations": [ + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } } } } - }, - "expectations": [ + ], + "expectEvents": [ { - "command_started_event": { - "command": { - "delete": "test", - "deletes": [ - { - "q": { - "_id": { - "$gt": 1 + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "limit": 1 } - }, - "limit": 1 - } - ], - "ordered": true - }, - "command_name": "delete", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1 + ], + "ordered": true + }, + "commandName": "delete", + "databaseName": "command-monitoring-tests" + } }, - "command_name": "delete" - } + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "delete" + } + } + ] } ] }, { - "description": "A successful delete one command with write errors", - "operation": { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": { - "$nothing": 1 + "description": "A successful deleteOne with write errors", + "operations": [ + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$unsupported": 1 + } } + }, + "expectError": { + "isClientError": false } } - }, - "expectations": [ + ], + "expectEvents": [ { - "command_started_event": { - "command": { - "delete": "test", - "deletes": [ - { - "q": { - "_id": { - "$nothing": 1 + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": { + "$unsupported": 1 + } + }, + "limit": 1 } - }, - "limit": 1 - } - ], - "ordered": true + ], + "ordered": true + }, + "commandName": "delete", + "databaseName": "command-monitoring-tests" + } }, - "command_name": "delete", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 0, - "writeErrors": [ - { - "index": 0, - "code": 42, - "errmsg": "" - } - ] - }, - "command_name": "delete" - } + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 0, + "writeErrors": { + "$$type": "array" + } + }, + "commandName": "delete" + } + } + ] } ] } diff --git a/test/command_monitoring/find.json b/test/command_monitoring/find.json index 039c5fead1..bc9668499b 100644 --- a/test/command_monitoring/find.json +++ b/test/command_monitoring/find.json @@ -1,558 +1,556 @@ { - "data": [ + "description": "find", + "schemaVersion": "1.15", + "createEntities": [ { - "_id": 1, - "x": 11 + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } }, { - "_id": 2, - "x": 22 + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } }, { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - }, + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "_yamlAnchors": { + "namespace": "command-monitoring-tests.test" + }, + "initialData": [ { - "_id": 5, - "x": 55 + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] } ], - "collection_name": "test", - "database_name": "command-monitoring-tests", - "namespace": "command-monitoring-tests.test", "tests": [ { - "description": "A successful find event with no options", - "operation": { - "name": "find", - "arguments": { - "filter": { - "_id": 1 + "description": "A successful find with no options", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } } } - }, - "expectations": [ + ], + "expectEvents": [ { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "_id": 1 + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": 1 + } + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" } }, - "command_name": "find", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "cursor": { - "id": { - "$numberLong": "0" - }, - "ns": "command-monitoring-tests.test", - "firstBatch": [ - { - "_id": 1, - "x": 11 + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": 0, + "ns": "command-monitoring-tests.test", + "firstBatch": [ + { + "_id": 1, + "x": 11 + } + ] } - ] + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" } - }, - "command_name": "find" - } + } + ] } ] }, { - "description": "A successful find event with options", - "operation": { - "name": "find", - "read_preference": { - "mode": "primaryPreferred" - }, - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "sort": { - "_id": 1 - }, - "skip": { - "$numberLong": "2" - }, - "modifiers": { - "$comment": "test", - "$hint": { + "description": "A successful find with options", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "x": -1 + }, + "projection": { + "_id": 0, + "x": 1 + }, + "skip": 2, + "comment": "test", + "hint": { "_id": 1 }, - "$max": { + "max": { "_id": 6 }, - "$maxTimeMS": 6000, - "$min": { + "maxTimeMS": 6000, + "min": { "_id": 0 - }, - "$returnKey": false, - "$showDiskLoc": false + } } } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "_id": { - "$gt": 1 - } - }, - "sort": { - "_id": 1 - }, - "skip": { - "$numberLong": "2" - }, - "comment": "test", - "hint": { - "_id": 1 - }, - "max": { - "_id": 6 - }, - "maxTimeMS": 6000, - "min": { - "_id": 0 - }, - "returnKey": false, - "showRecordId": false - }, - "command_name": "find", - "database_name": "command-monitoring-tests" - } - }, + ], + "expectEvents": [ { - "command_succeeded_event": { - "reply": { - "ok": 1, - "cursor": { - "id": { - "$numberLong": "0" - }, - "ns": "command-monitoring-tests.test", - "firstBatch": [ - { - "_id": 4, - "x": 44 + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": { + "$gt": 1 + } }, - { - "_id": 5, - "x": 55 + "sort": { + "x": -1 + }, + "projection": { + "_id": 0, + "x": 1 + }, + "skip": 2, + "comment": "test", + "hint": { + "_id": 1 + }, + "max": { + "_id": 6 + }, + "maxTimeMS": 6000, + "min": { + "_id": 0 } - ] + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" } }, - "command_name": "find" - } + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": 0, + "ns": "command-monitoring-tests.test", + "firstBatch": [ + { + "x": 33 + }, + { + "x": 22 + } + ] + } + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + } + ] } ] }, { - "description": "A successful find event with a getmore", - "operation": { - "name": "find", - "arguments": { - "filter": { - "_id": { - "$gte": 1 - } - }, - "sort": { - "_id": 1 - }, - "batchSize": { - "$numberLong": "3" - } - } - }, - "expectations": [ + "description": "A successful find with showRecordId and returnKey", + "operations": [ { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "_id": { - "$gte": 1 - } - }, - "sort": { - "_id": 1 - }, - "batchSize": { - "$numberLong": "3" - } + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 }, - "command_name": "find", - "database_name": "command-monitoring-tests" + "showRecordId": true, + "returnKey": true } - }, + } + ], + "expectEvents": [ { - "command_succeeded_event": { - "reply": { - "ok": 1, - "cursor": { - "id": { - "$numberLong": "42" + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "showRecordId": true, + "returnKey": true }, - "ns": "command-monitoring-tests.test", - "firstBatch": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - }, - "command_name": "find" - } - }, - { - "command_started_event": { - "command": { - "getMore": { - "$numberLong": "42" - }, - "collection": "test", - "batchSize": { - "$numberLong": "3" + "commandName": "find", + "databaseName": "command-monitoring-tests" } }, - "command_name": "getMore", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "cursor": { - "id": { - "$numberLong": "0" - }, - "ns": "command-monitoring-tests.test", - "nextBatch": [ - { - "_id": 4, - "x": 44 - }, - { - "_id": 5, - "x": 55 + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": 0, + "ns": "command-monitoring-tests.test", + "firstBatch": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + }, + { + "_id": 5 + } + ] } - ] + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" } - }, - "command_name": "getMore" - } + } + ] } ] }, { - "description": "A successful find event with a getmore and killcursors", - "ignore_if_server_version_greater_than": "3.0", - "operation": { - "name": "find", - "arguments": { - "filter": { - "_id": { - "$gte": 1 - } - }, - "sort": { - "_id": 1 - }, - "batchSize": { - "$numberLong": "3" - }, - "limit": { - "$numberLong": "4" - } - } - }, - "expectations": [ + "description": "A successful find with a getMore", + "operations": [ { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "_id": { - "$gte": 1 - } - }, - "sort": { - "_id": 1 - }, - "batchSize": { - "$numberLong": "3" - }, - "limit": { - "$numberLong": "4" + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gte": 1 } }, - "command_name": "find", - "database_name": "command-monitoring-tests" + "sort": { + "_id": 1 + }, + "batchSize": 3 } - }, + } + ], + "expectEvents": [ { - "command_succeeded_event": { - "reply": { - "ok": 1, - "cursor": { - "id": { - "$numberLong": "42" - }, - "ns": "command-monitoring-tests.test", - "firstBatch": [ - { - "_id": 1, - "x": 11 + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": { + "$gte": 1 + } }, - { - "_id": 2, - "x": 22 + "sort": { + "_id": 1 }, - { - "_id": 3, - "x": 33 - } - ] + "batchSize": 3 + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" } }, - "command_name": "find" - } - }, - { - "command_started_event": { - "command": { - "getMore": { - "$numberLong": "42" - }, - "collection": "test", - "batchSize": { - "$numberLong": "1" + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": { + "$$type": [ + "int", + "long" + ] + }, + "ns": "command-monitoring-tests.test", + "firstBatch": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" } }, - "command_name": "getMore", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "cursor": { - "id": { - "$numberLong": "42" + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3 }, - "ns": "command-monitoring-tests.test", - "nextBatch": [ - { - "_id": 4, - "x": 44 - } - ] + "commandName": "getMore", + "databaseName": "command-monitoring-tests" } }, - "command_name": "getMore" - } - }, - { - "command_started_event": { - "command": { - "killCursors": "test", - "cursors": [ - { - "$numberLong": "42" - } - ] - }, - "command_name": "killCursors", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "cursorsUnknown": [ - { - "$numberLong": "42" - } - ] - }, - "command_name": "killCursors" - } + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": 0, + "ns": "command-monitoring-tests.test", + "nextBatch": [ + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + }, + "commandName": "getMore", + "databaseName": "command-monitoring-tests" + } + } + ] } ] }, { - "description": "A successful find event with a getmore and the server kills the cursor", - "ignore_if_server_version_less_than": "3.1", - "ignore_if_topology_type": [ - "sharded" - ], - "operation": { - "name": "find", - "arguments": { - "filter": { - "_id": { - "$gte": 1 - } - }, - "sort": { - "_id": 1 - }, - "batchSize": { - "$numberLong": "3" - }, - "limit": { - "$numberLong": "4" - } + "description": "A successful find event with a getmore and the server kills the cursor (<= 4.4)", + "runOnRequirements": [ + { + "minServerVersion": "3.1", + "maxServerVersion": "4.4.99", + "topologies": [ + "single", + "replicaset" + ] } - }, - "expectations": [ + ], + "operations": [ { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "_id": { - "$gte": 1 - } - }, - "sort": { - "_id": 1 - }, - "batchSize": { - "$numberLong": "3" - }, - "limit": { - "$numberLong": "4" + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gte": 1 } }, - "command_name": "find", - "database_name": "command-monitoring-tests" + "sort": { + "_id": 1 + }, + "batchSize": 3, + "limit": 4 } - }, + } + ], + "expectEvents": [ { - "command_succeeded_event": { - "reply": { - "ok": 1, - "cursor": { - "id": { - "$numberLong": "42" - }, - "ns": "command-monitoring-tests.test", - "firstBatch": [ - { - "_id": 1, - "x": 11 + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": { + "$gte": 1 + } }, - { - "_id": 2, - "x": 22 + "sort": { + "_id": 1 }, - { - "_id": 3, - "x": 33 - } - ] + "batchSize": 3, + "limit": 4 + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" } }, - "command_name": "find" - } - }, - { - "command_started_event": { - "command": { - "getMore": { - "$numberLong": "42" - }, - "collection": "test", - "batchSize": { - "$numberLong": "1" + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": { + "$$type": [ + "int", + "long" + ] + }, + "ns": "command-monitoring-tests.test", + "firstBatch": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" } }, - "command_name": "getMore", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "cursor": { - "id": { - "$numberLong": "0" + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 1 }, - "ns": "command-monitoring-tests.test", - "nextBatch": [ - { - "_id": 4, - "x": 44 - } - ] + "commandName": "getMore", + "databaseName": "command-monitoring-tests" } }, - "command_name": "getMore" - } + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": 0, + "ns": "command-monitoring-tests.test", + "nextBatch": [ + { + "_id": 4, + "x": 44 + } + ] + } + }, + "commandName": "getMore", + "databaseName": "command-monitoring-tests" + } + } + ] } ] }, { "description": "A failed find event", - "operation": { - "name": "find", - "arguments": { - "filter": { - "$or": true + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "$or": true + } + }, + "expectError": { + "isClientError": false } } - }, - "expectations": [ + ], + "expectEvents": [ { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "$or": true + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "$or": true + } + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" } }, - "command_name": "find", - "database_name": "command-monitoring-tests" - } - }, - { - "command_failed_event": { - "command_name": "find" - } + { + "commandFailedEvent": { + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + } + ] } ] } diff --git a/test/command_monitoring/insertMany.json b/test/command_monitoring/insertMany.json index 327da61b74..a80a218c67 100644 --- a/test/command_monitoring/insertMany.json +++ b/test/command_monitoring/insertMany.json @@ -1,149 +1,146 @@ { - "data": [ + "description": "insertMany", + "schemaVersion": "1.0", + "createEntities": [ { - "_id": 1, - "x": 11 + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } } ], - "collection_name": "test", - "database_name": "command-monitoring-tests", - "tests": [ + "initialData": [ { - "description": "A successful insert many", - "operation": { - "name": "insertMany", - "arguments": { - "documents": [ - { - "_id": 2, - "x": 22 - } - ] - } - }, - "expectations": [ + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2, - "x": 22 - } - ], - "options": { - "ordered": true - } - }, - "command_name": "insert", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1 - }, - "command_name": "insert" - } + "_id": 1, + "x": 11 } ] - }, + } + ], + "tests": [ { - "description": "A successful insert many command with write errors", - "operation": { - "name": "insertMany", - "arguments": { - "documents": [ - { - "_id": 1, - "x": 11 - } - ] - } - }, - "expectations": [ + "description": "A successful insertMany", + "operations": [ { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1, - "x": 11 - } - ], - "options": { - "ordered": true + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 } - }, - "command_name": "insert", - "database_name": "command-monitoring-tests" + ] } - }, + } + ], + "expectEvents": [ { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 0, - "writeErrors": [ - { - "index": 0, - "code": 42, - "errmsg": "" - } - ] + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "ordered": true + }, + "commandName": "insert", + "databaseName": "command-monitoring-tests" + } }, - "command_name": "insert" - } + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "insert" + } + } + ] } ] }, { - "description": "A successful unordered insert many", - "operation": { - "name": "insertMany", - "arguments": { - "documents": [ - { - "_id": 2, - "x": 22 - } - ], - "options": { - "ordered": false - } - } - }, - "expectations": [ + "description": "A successful insertMany with write errors", + "operations": [ { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2, - "x": 22 - } - ], - "options": { - "ordered": false + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1, + "x": 11 } - }, - "command_name": "insert", - "database_name": "command-monitoring-tests" + ] + }, + "expectError": { + "isClientError": false } - }, + } + ], + "expectEvents": [ { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1 + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1, + "x": 11 + } + ], + "ordered": true + }, + "commandName": "insert", + "databaseName": "command-monitoring-tests" + } }, - "command_name": "insert" - } + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 0, + "writeErrors": { + "$$type": "array" + } + }, + "commandName": "insert" + } + } + ] } ] } diff --git a/test/command_monitoring/insertOne.json b/test/command_monitoring/insertOne.json index 877bca1a61..6ff732e41b 100644 --- a/test/command_monitoring/insertOne.json +++ b/test/command_monitoring/insertOne.json @@ -1,95 +1,142 @@ { - "data": [ + "description": "insertOne", + "schemaVersion": "1.0", + "createEntities": [ { - "_id": 1, - "x": 11 + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } } ], - "collection_name": "test", - "database_name": "command-monitoring-tests", - "tests": [ + "initialData": [ { - "description": "A successful insert one", - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 2, - "x": 22 - } + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 } - }, - "expectations": [ + ] + } + ], + "tests": [ + { + "description": "A successful insertOne", + "operations": [ { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2, - "x": 22 - } - ], - "ordered": true - }, - "command_name": "insert", - "database_name": "command-monitoring-tests" + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2, + "x": 22 + } } - }, + } + ], + "expectEvents": [ { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1 + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "ordered": true + }, + "commandName": "insert", + "databaseName": "command-monitoring-tests" + } }, - "command_name": "insert" - } + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "insert" + } + } + ] } ] }, { - "description": "A successful insert one command with write errors", - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "x": 11 - } - } - }, - "expectations": [ + "description": "A successful insertOne with write errors", + "operations": [ { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1, - "x": 11 - } - ], - "ordered": true - }, - "command_name": "insert", - "database_name": "command-monitoring-tests" + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectError": { + "isClientError": false } - }, + } + ], + "expectEvents": [ { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 0, - "writeErrors": [ - { - "index": 0, - "code": 42, - "errmsg": "" - } - ] + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1, + "x": 11 + } + ], + "ordered": true + }, + "commandName": "insert", + "databaseName": "command-monitoring-tests" + } }, - "command_name": "insert" - } + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 0, + "writeErrors": { + "$$type": "array" + } + }, + "commandName": "insert" + } + } + ] } ] } diff --git a/test/command_monitoring/redacted-commands.json b/test/command_monitoring/redacted-commands.json new file mode 100644 index 0000000000..4302ba8900 --- /dev/null +++ b/test/command_monitoring/redacted-commands.json @@ -0,0 +1,679 @@ +{ + "description": "redacted-commands", + "schemaVersion": "1.5", + "runOnRequirements": [ + { + "minServerVersion": "5.0", + "auth": false + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ], + "observeSensitiveCommands": true + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + } + ], + "tests": [ + { + "description": "authenticate", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "authenticate", + "command": { + "authenticate": 1, + "mechanism": "MONGODB-X509", + "user": "CN=myName,OU=myOrgUnit,O=myOrg,L=myLocality,ST=myState,C=myCountry", + "db": "$external" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "authenticate", + "command": { + "authenticate": { + "$$exists": false + }, + "mechanism": { + "$$exists": false + }, + "user": { + "$$exists": false + }, + "db": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "saslStart", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "saslStart", + "command": { + "saslStart": 1, + "payload": "definitely-invalid-payload", + "db": "admin" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "saslStart", + "command": { + "saslStart": { + "$$exists": false + }, + "payload": { + "$$exists": false + }, + "db": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "saslContinue", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "saslContinue", + "command": { + "saslContinue": 1, + "conversationId": 0, + "payload": "definitely-invalid-payload" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "saslContinue", + "command": { + "saslContinue": { + "$$exists": false + }, + "conversationId": { + "$$exists": false + }, + "payload": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "getnonce", + "runOnRequirements": [ + { + "maxServerVersion": "6.1.99" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "getnonce", + "command": { + "getnonce": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "getnonce", + "command": { + "getnonce": { + "$$exists": false + } + } + } + }, + { + "commandSucceededEvent": { + "commandName": "getnonce", + "reply": { + "ok": { + "$$exists": false + }, + "nonce": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "createUser", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "createUser", + "command": { + "createUser": "private", + "pwd": {}, + "roles": [] + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createUser", + "command": { + "createUser": { + "$$exists": false + }, + "pwd": { + "$$exists": false + }, + "roles": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "updateUser", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "updateUser", + "command": { + "updateUser": "private", + "pwd": {}, + "roles": [] + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "updateUser", + "command": { + "updateUser": { + "$$exists": false + }, + "pwd": { + "$$exists": false + }, + "roles": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "copydbgetnonce", + "runOnRequirements": [ + { + "maxServerVersion": "3.6.99" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "copydbgetnonce", + "command": { + "copydbgetnonce": "private" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "copydbgetnonce", + "command": { + "copydbgetnonce": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "copydbsaslstart", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "copydbsaslstart", + "command": { + "copydbsaslstart": "private" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "copydbsaslstart", + "command": { + "copydbsaslstart": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "copydb", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "copydb", + "command": { + "copydb": "private" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "copydb", + "command": { + "copydb": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "hello with speculative authenticate", + "runOnRequirements": [ + { + "minServerVersion": "4.9" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "hello", + "command": { + "hello": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "hello", + "command": { + "hello": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + }, + { + "commandSucceededEvent": { + "commandName": "hello", + "reply": { + "isWritablePrimary": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "legacy hello with speculative authenticate", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ismaster", + "command": { + "ismaster": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "isMaster", + "command": { + "isMaster": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ismaster", + "command": { + "ismaster": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + }, + { + "commandSucceededEvent": { + "commandName": "ismaster", + "reply": { + "ismaster": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "isMaster", + "command": { + "isMaster": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + }, + { + "commandSucceededEvent": { + "commandName": "isMaster", + "reply": { + "ismaster": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "hello without speculative authenticate is not redacted", + "runOnRequirements": [ + { + "minServerVersion": "4.9" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "hello", + "command": { + "hello": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "hello", + "command": { + "hello": 1 + } + } + }, + { + "commandSucceededEvent": { + "commandName": "hello", + "reply": { + "isWritablePrimary": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "legacy hello without speculative authenticate is not redacted", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ismaster", + "command": { + "ismaster": 1 + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "isMaster", + "command": { + "isMaster": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ismaster", + "command": { + "ismaster": 1 + } + } + }, + { + "commandSucceededEvent": { + "commandName": "ismaster", + "reply": { + "ismaster": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "isMaster", + "command": { + "isMaster": 1 + } + } + }, + { + "commandSucceededEvent": { + "commandName": "isMaster", + "reply": { + "ismaster": { + "$$exists": true + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/unacknowledgedBulkWrite.json b/test/command_monitoring/unacknowledgedBulkWrite.json index ae116289eb..4c16d6df11 100644 --- a/test/command_monitoring/unacknowledgedBulkWrite.json +++ b/test/command_monitoring/unacknowledgedBulkWrite.json @@ -1,67 +1,106 @@ { - "data": [ + "description": "unacknowledgedBulkWrite", + "schemaVersion": "1.0", + "createEntities": [ { - "_id": 1, - "x": 11 - } - ], - "collection_name": "test-unacknowledged-bulk-write", - "database_name": "command-monitoring-tests", - "tests": [ + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, { - "description": "A successful unordered bulk write with an unacknowledged write concern", - "comment": "On a 2.4 server, no GLE is sent and requires a client-side manufactured reply", - "operation": { - "name": "bulkWrite", + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test", "collectionOptions": { "writeConcern": { "w": 0 } - }, - "arguments": { - "requests": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": "unorderedBulkWriteInsertW0", - "x": 44 + } + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "A successful unordered bulk write with an unacknowledged write concern", + "operations": [ + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": "unorderedBulkWriteInsertW0", + "x": 44 + } } } - } - ], - "options": { + ], "ordered": false } } - }, - "expectations": [ + ], + "expectEvents": [ { - "command_started_event": { - "command": { - "insert": "test-unacknowledged-bulk-write", - "documents": [ - { - "_id": "unorderedBulkWriteInsertW0", - "x": 44 - } - ], - "ordered": false, - "writeConcern": { - "w": 0 + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": "unorderedBulkWriteInsertW0", + "x": 44 + } + ], + "ordered": false, + "writeConcern": { + "w": 0 + } + }, + "commandName": "insert", + "databaseName": "command-monitoring-tests" } }, - "command_name": "insert", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1 - }, - "command_name": "insert" - } + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": { + "$$exists": false + } + }, + "commandName": "insert" + } + } + ] } ] } diff --git a/test/command_monitoring/updateMany.json b/test/command_monitoring/updateMany.json index 8e98fc92fd..b15434226c 100644 --- a/test/command_monitoring/updateMany.json +++ b/test/command_monitoring/updateMany.json @@ -1,135 +1,186 @@ { - "data": [ + "description": "updateMany", + "schemaVersion": "1.0", + "createEntities": [ { - "_id": 1, - "x": 11 + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } }, { - "_id": 2, - "x": 22 + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } }, { - "_id": 3, - "x": 33 + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] } ], - "collection_name": "test", - "database_name": "command-monitoring-tests", "tests": [ { - "description": "A successful update many", - "operation": { - "name": "updateMany", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 + "description": "A successful updateMany", + "operations": [ + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } } } } - }, - "expectations": [ + ], + "expectEvents": [ { - "command_started_event": { - "command": { - "update": "test", - "ordered": true, - "updates": [ - { - "q": { - "_id": { - "$gt": 1 + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "upsert": { + "$$unsetOrMatches": false + }, + "multi": true } - }, - "u": { - "$inc": { - "x": 1 - } - }, - "multi": true, - "upsert": false - } - ] + ], + "ordered": true + }, + "commandName": "update", + "databaseName": "command-monitoring-tests" + } }, - "command_name": "update", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 2 - }, - "command_name": "update" - } + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 2 + }, + "commandName": "update" + } + } + ] } ] }, { - "description": "A successful update many command with write errors", - "operation": { - "name": "updateMany", - "arguments": { - "filter": { - "_id": { - "$gt": 1 + "description": "A successful updateMany with write errors", + "operations": [ + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$unsupported": { + "x": 1 + } } }, - "update": { - "$nothing": { - "x": 1 - } + "expectError": { + "isClientError": false } } - }, - "expectations": [ + ], + "expectEvents": [ { - "command_started_event": { - "command": { - "update": "test", - "ordered": true, - "updates": [ - { - "q": { - "_id": { - "$gt": 1 + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$unsupported": { + "x": 1 + } + }, + "upsert": { + "$$unsetOrMatches": false + }, + "multi": true } - }, - "u": { - "$nothing": { - "x": 1 - } - }, - "multi": true, - "upsert": false - } - ] + ], + "ordered": true + }, + "commandName": "update", + "databaseName": "command-monitoring-tests" + } }, - "command_name": "update", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 0, - "writeErrors": [ - { - "index": 0, - "code": 42, - "errmsg": "" - } - ] - }, - "command_name": "update" - } + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 0, + "writeErrors": { + "$$type": "array" + } + }, + "commandName": "update" + } + } + ] } ] } diff --git a/test/command_monitoring/updateOne.json b/test/command_monitoring/updateOne.json index 565b749704..a0ae99e88d 100644 --- a/test/command_monitoring/updateOne.json +++ b/test/command_monitoring/updateOne.json @@ -1,193 +1,258 @@ { - "data": [ + "description": "updateOne", + "schemaVersion": "1.0", + "createEntities": [ { - "_id": 1, - "x": 11 + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } }, { - "_id": 2, - "x": 22 + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } }, { - "_id": 3, - "x": 33 + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] } ], - "collection_name": "test", - "database_name": "command-monitoring-tests", "tests": [ { - "description": "A successful update one", - "operation": { - "name": "updateOne", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 + "description": "A successful updateOne", + "operations": [ + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } } } } - }, - "expectations": [ + ], + "expectEvents": [ { - "command_started_event": { - "command": { - "update": "test", - "ordered": true, - "updates": [ - { - "q": { - "_id": { - "$gt": 1 + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "upsert": { + "$$unsetOrMatches": false + }, + "multi": { + "$$unsetOrMatches": false + } } - }, - "u": { - "$inc": { - "x": 1 - } - }, - "multi": false, - "upsert": false - } - ] + ], + "ordered": true + }, + "commandName": "update", + "databaseName": "command-monitoring-tests" + } }, - "command_name": "update", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1 - }, - "command_name": "update" - } + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "update" + } + } + ] } ] }, { - "description": "A successful update one with upsert when the upserted id is not an object id", - "operation": { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 4 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "upsert": true - } - }, - "expectations": [ + "description": "A successful updateOne with upsert where the upserted id is not an ObjectId", + "operations": [ { - "command_started_event": { - "command": { - "update": "test", - "ordered": true, - "updates": [ - { - "q": { - "_id": 4 - }, - "u": { - "$inc": { - "x": 1 - } - }, - "multi": false, - "upsert": true - } - ] + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": { + "_id": 4 + }, + "update": { + "$inc": { + "x": 1 + } }, - "command_name": "update", - "database_name": "command-monitoring-tests" + "upsert": true } - }, + } + ], + "expectEvents": [ { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1, - "upserted": [ - { - "index": 0, - "_id": 4 - } - ] + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 4 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "upsert": true, + "multi": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true + }, + "commandName": "update", + "databaseName": "command-monitoring-tests" + } }, - "command_name": "update" - } + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1, + "upserted": [ + { + "index": 0, + "_id": 4 + } + ] + }, + "commandName": "update" + } + } + ] } ] }, { - "description": "A successful update one command with write errors", - "operation": { - "name": "updateOne", - "arguments": { - "filter": { - "_id": { - "$gt": 1 + "description": "A successful updateOne with write errors", + "operations": [ + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$unsupported": { + "x": 1 + } } }, - "update": { - "$nothing": { - "x": 1 - } + "expectError": { + "isClientError": false } } - }, - "expectations": [ + ], + "expectEvents": [ { - "command_started_event": { - "command": { - "update": "test", - "ordered": true, - "updates": [ - { - "q": { - "_id": { - "$gt": 1 + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$unsupported": { + "x": 1 + } + }, + "upsert": { + "$$unsetOrMatches": false + }, + "multi": { + "$$unsetOrMatches": false + } } - }, - "u": { - "$nothing": { - "x": 1 - } - }, - "multi": false, - "upsert": false - } - ] - }, - "command_name": "update", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 0, - "writeErrors": [ - { - "index": 0, - "code": 42, - "errmsg": "" - } - ] + ], + "ordered": true + }, + "commandName": "update", + "databaseName": "command-monitoring-tests" + } }, - "command_name": "update" - } + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 0, + "writeErrors": { + "$$type": "array" + } + }, + "commandName": "update" + } + } + ] } ] } diff --git a/test/command_monitoring/writeConcernError.json b/test/command_monitoring/writeConcernError.json new file mode 100644 index 0000000000..7bc16f2ab7 --- /dev/null +++ b/test/command_monitoring/writeConcernError.json @@ -0,0 +1,155 @@ +{ + "description": "writeConcernError", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.1.0", + "topologies": [ + "replicaset" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "A retryable write with write concern errors publishes success event", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "writeConcernError": { + "code": 91, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "ordered": true + }, + "commandName": "insert", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1, + "writeConcernError": { + "code": 91, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "ordered": true + }, + "commandName": "insert", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/conftest.py b/test/conftest.py new file mode 100644 index 0000000000..b65c641866 --- /dev/null +++ b/test/conftest.py @@ -0,0 +1,12 @@ +from __future__ import annotations + +from test import setup, teardown + +import pytest + + +@pytest.fixture(scope="session", autouse=True) +def test_setup_and_teardown(): + setup() + yield + teardown() diff --git a/test/connection_string/test/invalid-uris.json b/test/connection_string/test/invalid-uris.json index 677cb5384c..a7accbd27d 100644 --- a/test/connection_string/test/invalid-uris.json +++ b/test/connection_string/test/invalid-uris.json @@ -162,15 +162,6 @@ "auth": null, "options": null }, - { - "description": "Missing delimiting slash between hosts and options", - "uri": "mongodb://example.com?w=1", - "valid": false, - "warning": null, - "hosts": null, - "auth": null, - "options": null - }, { "description": "Incomplete key value pair for option", "uri": "mongodb://example.com/?w", @@ -189,15 +180,6 @@ "auth": null, "options": null }, - { - "description": "Username with password containing an unescaped colon", - "uri": "mongodb://alice:foo:bar@127.0.0.1", - "valid": false, - "warning": null, - "hosts": null, - "auth": null, - "options": null - }, { "description": "Username containing an unescaped at-sign", "uri": "mongodb://alice@@127.0.0.1", @@ -251,6 +233,51 @@ "hosts": null, "auth": null, "options": null + }, + { + "description": "mongodb+srv with multiple service names", + "uri": "mongodb+srv://test5.test.mongodb.com,test6.test.mongodb.com", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "mongodb+srv with port number", + "uri": "mongodb+srv://test7.test.mongodb.com:27018", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Username with password containing an unescaped percent sign", + "uri": "mongodb://alice%foo:bar@127.0.0.1", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Username with password containing an unescaped percent sign and an escaped one", + "uri": "mongodb://user%20%:password@localhost", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Username with password containing an unescaped percent sign (non hex digit)", + "uri": "mongodb://user%w:password@localhost", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null } ] } diff --git a/test/connection_string/test/valid-auth.json b/test/connection_string/test/valid-auth.json index 672777ff84..4f684ff185 100644 --- a/test/connection_string/test/valid-auth.json +++ b/test/connection_string/test/valid-auth.json @@ -240,6 +240,27 @@ "authmechanism": "MONGODB-CR" } }, + { + "description": "Subdelimiters in user/pass don't need escaping (MONGODB-CR)", + "uri": "mongodb://!$&'()*+,;=:!$&'()*+,;=@127.0.0.1/admin?authMechanism=MONGODB-CR", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "ipv4", + "host": "127.0.0.1", + "port": null + } + ], + "auth": { + "username": "!$&'()*+,;=", + "password": "!$&'()*+,;=", + "db": "admin" + }, + "options": { + "authmechanism": "MONGODB-CR" + } + }, { "description": "Escaped username (MONGODB-X509)", "uri": "mongodb://CN%3DmyName%2COU%3DmyOrgUnit%2CO%3DmyOrg%2CL%3DmyLocality%2CST%3DmyState%2CC%3DmyCountry@localhost/?authMechanism=MONGODB-X509", diff --git a/test/connection_string/test/valid-host_identifiers.json b/test/connection_string/test/valid-host_identifiers.json index f33358725c..e8833b4af2 100644 --- a/test/connection_string/test/valid-host_identifiers.json +++ b/test/connection_string/test/valid-host_identifiers.json @@ -132,18 +132,18 @@ }, { "description": "UTF-8 hosts", - "uri": "mongodb://b\u00fccher.example.com,uml\u00e4ut.example.com/", + "uri": "mongodb://bücher.example.com,umläut.example.com/", "valid": true, "warning": false, "hosts": [ { "type": "hostname", - "host": "b\u00fccher.example.com", + "host": "bücher.example.com", "port": null }, { "type": "hostname", - "host": "uml\u00e4ut.example.com", + "host": "umläut.example.com", "port": null } ], diff --git a/test/connection_string/test/valid-options.json b/test/connection_string/test/valid-options.json index 4c2bded9e7..01bc2264bb 100644 --- a/test/connection_string/test/valid-options.json +++ b/test/connection_string/test/valid-options.json @@ -20,6 +20,23 @@ "options": { "authmechanism": "MONGODB-CR" } + }, + { + "description": "Missing delimiting slash between hosts and options", + "uri": "mongodb://example.com?tls=true", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "hostname", + "host": "example.com", + "port": null + } + ], + "auth": null, + "options": { + "tls": true + } } ] } diff --git a/test/connection_string/test/valid-warnings.json b/test/connection_string/test/valid-warnings.json index 87f7248f21..1eacbf8fcb 100644 --- a/test/connection_string/test/valid-warnings.json +++ b/test/connection_string/test/valid-warnings.json @@ -63,6 +63,36 @@ "options": { "wtimeoutms": 10 } + }, + { + "description": "Empty integer option values are ignored", + "uri": "mongodb://localhost/?maxIdleTimeMS=", + "valid": true, + "warning": true, + "hosts": [ + { + "type": "hostname", + "host": "localhost", + "port": null + } + ], + "auth": null, + "options": null + }, + { + "description": "Empty boolean option value are ignored", + "uri": "mongodb://localhost/?journal=", + "valid": true, + "warning": true, + "hosts": [ + { + "type": "hostname", + "host": "localhost", + "port": null + } + ], + "auth": null, + "options": null } ] } diff --git a/test/crud/unified/aggregate-allowdiskuse.json b/test/crud/unified/aggregate-allowdiskuse.json new file mode 100644 index 0000000000..2e54175b8a --- /dev/null +++ b/test/crud/unified/aggregate-allowdiskuse.json @@ -0,0 +1,155 @@ +{ + "description": "aggregate-allowdiskuse", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Aggregate does not send allowDiskUse when value is not specified", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": {} + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": {} + } + ], + "allowDiskUse": { + "$$exists": false + } + }, + "commandName": "aggregate", + "databaseName": "crud-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate sends allowDiskUse false when false is specified", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": {} + } + ], + "allowDiskUse": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": {} + } + ], + "allowDiskUse": false + }, + "commandName": "aggregate", + "databaseName": "crud-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate sends allowDiskUse true when true is specified", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": {} + } + ], + "allowDiskUse": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": {} + } + ], + "allowDiskUse": true + }, + "commandName": "aggregate", + "databaseName": "crud-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/aggregate-let.json b/test/crud/unified/aggregate-let.json new file mode 100644 index 0000000000..039900920f --- /dev/null +++ b/test/crud/unified/aggregate-let.json @@ -0,0 +1,376 @@ +{ + "description": "aggregate-let", + "schemaVersion": "1.4", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + }, + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "coll1" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + }, + { + "collectionName": "coll1", + "databaseName": "crud-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Aggregate with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + } + }, + { + "$project": { + "_id": 0, + "x": "$$x", + "y": "$$y", + "rand": "$$rand" + } + } + ], + "let": { + "id": 1, + "x": "foo", + "y": { + "$literal": "$bar" + }, + "rand": { + "$rand": {} + } + } + }, + "expectResult": [ + { + "x": "foo", + "y": "$bar", + "rand": { + "$$type": "double" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + } + }, + { + "$project": { + "_id": 0, + "x": "$$x", + "y": "$$y", + "rand": "$$rand" + } + } + ], + "let": { + "id": 1, + "x": "foo", + "y": { + "$literal": "$bar" + }, + "rand": { + "$rand": {} + } + } + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "2.6.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": 1 + } + } + ], + "let": { + "x": "foo" + } + }, + "expectError": { + "errorContains": "unrecognized field 'let'", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": 1 + } + } + ], + "let": { + "x": "foo" + } + } + } + } + ] + } + ] + }, + { + "description": "Aggregate to collection with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0", + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$out": "coll1" + } + ], + "let": { + "id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$out": "coll1" + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll1", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Aggregate to collection with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "2.6.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$out": "coll1" + } + ], + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "unrecognized field 'let'", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$out": "coll1" + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/aggregate-merge-errorResponse.json b/test/crud/unified/aggregate-merge-errorResponse.json new file mode 100644 index 0000000000..6c7305fd91 --- /dev/null +++ b/test/crud/unified/aggregate-merge-errorResponse.json @@ -0,0 +1,90 @@ +{ + "description": "aggregate-merge-errorResponse", + "schemaVersion": "1.12", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1 + }, + { + "_id": 2, + "x": 1 + } + ] + } + ], + "tests": [ + { + "description": "aggregate $merge DuplicateKey error is accessible", + "runOnRequirements": [ + { + "minServerVersion": "5.1", + "topologies": [ + "single", + "replicaset" + ] + } + ], + "operations": [ + { + "name": "aggregate", + "object": "database0", + "arguments": { + "pipeline": [ + { + "$documents": [ + { + "_id": 2, + "x": 1 + } + ] + }, + { + "$merge": { + "into": "test", + "whenMatched": "fail" + } + } + ] + }, + "expectError": { + "errorCode": 11000, + "errorResponse": { + "keyPattern": { + "_id": 1 + }, + "keyValue": { + "_id": 2 + } + } + } + } + ] + } + ] +} diff --git a/test/crud/unified/aggregate-merge.json b/test/crud/unified/aggregate-merge.json new file mode 100644 index 0000000000..ac61ceb8a6 --- /dev/null +++ b/test/crud/unified/aggregate-merge.json @@ -0,0 +1,497 @@ +{ + "description": "aggregate-merge", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.1.11" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_aggregate_merge" + } + }, + { + "collection": { + "id": "collection_readConcern_majority", + "database": "database0", + "collectionName": "test_aggregate_merge", + "collectionOptions": { + "readConcern": { + "level": "majority" + } + } + } + }, + { + "collection": { + "id": "collection_readConcern_local", + "database": "database0", + "collectionName": "test_aggregate_merge", + "collectionOptions": { + "readConcern": { + "level": "local" + } + } + } + }, + { + "collection": { + "id": "collection_readConcern_available", + "database": "database0", + "collectionName": "test_aggregate_merge", + "collectionOptions": { + "readConcern": { + "level": "available" + } + } + } + } + ], + "initialData": [ + { + "collectionName": "test_aggregate_merge", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Aggregate with $merge", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_test_collection" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test_aggregate_merge", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_test_collection" + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "other_test_collection", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "Aggregate with $merge and batch size of 0", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_test_collection" + } + } + ], + "batchSize": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test_aggregate_merge", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_test_collection" + } + } + ], + "cursor": {} + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "other_test_collection", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "Aggregate with $merge and majority readConcern", + "operations": [ + { + "object": "collection_readConcern_majority", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_test_collection" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test_aggregate_merge", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_test_collection" + } + } + ], + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "other_test_collection", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "Aggregate with $merge and local readConcern", + "operations": [ + { + "object": "collection_readConcern_local", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_test_collection" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test_aggregate_merge", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_test_collection" + } + } + ], + "readConcern": { + "level": "local" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "other_test_collection", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "Aggregate with $merge and available readConcern", + "operations": [ + { + "object": "collection_readConcern_available", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_test_collection" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test_aggregate_merge", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_test_collection" + } + } + ], + "readConcern": { + "level": "available" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "other_test_collection", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/aggregate-out-readConcern.json b/test/crud/unified/aggregate-out-readConcern.json new file mode 100644 index 0000000000..e293457c1c --- /dev/null +++ b/test/crud/unified/aggregate-out-readConcern.json @@ -0,0 +1,407 @@ +{ + "description": "aggregate-out-readConcern", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.1.0", + "topologies": [ + "replicaset", + "sharded" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_aggregate_out_readconcern" + } + }, + { + "collection": { + "id": "collection_readConcern_majority", + "database": "database0", + "collectionName": "test_aggregate_out_readconcern", + "collectionOptions": { + "readConcern": { + "level": "majority" + } + } + } + }, + { + "collection": { + "id": "collection_readConcern_local", + "database": "database0", + "collectionName": "test_aggregate_out_readconcern", + "collectionOptions": { + "readConcern": { + "level": "local" + } + } + } + }, + { + "collection": { + "id": "collection_readConcern_available", + "database": "database0", + "collectionName": "test_aggregate_out_readconcern", + "collectionOptions": { + "readConcern": { + "level": "available" + } + } + } + }, + { + "collection": { + "id": "collection_readConcern_linearizable", + "database": "database0", + "collectionName": "test_aggregate_out_readconcern", + "collectionOptions": { + "readConcern": { + "level": "linearizable" + } + } + } + } + ], + "initialData": [ + { + "collectionName": "test_aggregate_out_readconcern", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "readConcern majority with out stage", + "operations": [ + { + "object": "collection_readConcern_majority", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_test_collection" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test_aggregate_out_readconcern", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_test_collection" + } + ], + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "other_test_collection", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "readConcern local with out stage", + "operations": [ + { + "object": "collection_readConcern_local", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_test_collection" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test_aggregate_out_readconcern", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_test_collection" + } + ], + "readConcern": { + "level": "local" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "other_test_collection", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "readConcern available with out stage", + "operations": [ + { + "object": "collection_readConcern_available", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_test_collection" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test_aggregate_out_readconcern", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_test_collection" + } + ], + "readConcern": { + "level": "available" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "other_test_collection", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "readConcern linearizable with out stage", + "operations": [ + { + "object": "collection_readConcern_linearizable", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_test_collection" + } + ] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test_aggregate_out_readconcern", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_test_collection" + } + ], + "readConcern": { + "level": "linearizable" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/aggregate-write-readPreference.json b/test/crud/unified/aggregate-write-readPreference.json new file mode 100644 index 0000000000..bc887e83cb --- /dev/null +++ b/test/crud/unified/aggregate-write-readPreference.json @@ -0,0 +1,460 @@ +{ + "description": "aggregate-write-readPreference", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "_yamlAnchors": { + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + }, + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "readConcernLevel": "local", + "w": 1 + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "readPreference": { + "mode": "secondaryPreferred", + "maxStalenessSeconds": 600 + } + } + } + }, + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "coll1" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + }, + { + "collectionName": "coll1", + "databaseName": "db0", + "documents": [] + } + ], + "tests": [ + { + "description": "Aggregate with $out includes read preference for 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0", + "serverless": "forbid" + } + ], + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$out": "coll1" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$out": "coll1" + } + ], + "$readPreference": { + "mode": "secondaryPreferred", + "maxStalenessSeconds": 600 + }, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll1", + "databaseName": "db0", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "Aggregate with $out omits read preference for pre-5.0 server", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.4.99", + "serverless": "forbid" + } + ], + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$out": "coll1" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$out": "coll1" + } + ], + "$readPreference": { + "$$exists": false + }, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll1", + "databaseName": "db0", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "Aggregate with $merge includes read preference for 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$merge": { + "into": "coll1" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$merge": { + "into": "coll1" + } + } + ], + "$readPreference": { + "mode": "secondaryPreferred", + "maxStalenessSeconds": 600 + }, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll1", + "databaseName": "db0", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "Aggregate with $merge omits read preference for pre-5.0 server", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$merge": { + "into": "coll1" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$merge": { + "into": "coll1" + } + } + ], + "$readPreference": { + "$$exists": false + }, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll1", + "databaseName": "db0", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/aggregate.json b/test/crud/unified/aggregate.json new file mode 100644 index 0000000000..0cbfb4e6e9 --- /dev/null +++ b/test/crud/unified/aggregate.json @@ -0,0 +1,567 @@ +{ + "description": "aggregate", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "aggregate-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "aggregate-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ], + "tests": [ + { + "description": "aggregate with multiple batches works", + "operations": [ + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "batchSize": 2 + }, + "object": "collection0", + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "cursor": { + "batchSize": 2 + } + }, + "commandName": "aggregate", + "databaseName": "aggregate-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2 + }, + "commandName": "getMore", + "databaseName": "aggregate-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2 + }, + "commandName": "getMore", + "databaseName": "aggregate-tests" + } + } + ] + } + ] + }, + { + "description": "aggregate with a string comment", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0" + } + ], + "operations": [ + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "comment": "comment" + }, + "object": "collection0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "comment": "comment" + } + } + } + ] + } + ] + }, + { + "description": "aggregate with a document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "comment": { + "content": "test" + } + }, + "object": "collection0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "comment": { + "content": "test" + } + } + } + } + ] + } + ] + }, + { + "description": "aggregate with a document comment - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "comment": { + "content": "test" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "comment": { + "content": "test" + } + }, + "commandName": "aggregate", + "databaseName": "aggregate-tests" + } + } + ] + } + ] + }, + { + "description": "aggregate with comment sets comment on getMore", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "batchSize": 2, + "comment": { + "content": "test" + } + }, + "object": "collection0", + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "cursor": { + "batchSize": 2 + }, + "comment": { + "content": "test" + } + }, + "commandName": "aggregate", + "databaseName": "aggregate-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2, + "comment": { + "content": "test" + } + }, + "commandName": "getMore", + "databaseName": "aggregate-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2, + "comment": { + "content": "test" + } + }, + "commandName": "getMore", + "databaseName": "aggregate-tests" + } + } + ] + } + ] + }, + { + "description": "aggregate with comment does not set comment on getMore - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.3.99" + } + ], + "operations": [ + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "batchSize": 2, + "comment": "comment" + }, + "object": "collection0", + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "cursor": { + "batchSize": 2 + }, + "comment": "comment" + }, + "commandName": "aggregate", + "databaseName": "aggregate-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2, + "comment": { + "$$exists": false + } + }, + "commandName": "getMore", + "databaseName": "aggregate-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2, + "comment": { + "$$exists": false + } + }, + "commandName": "getMore", + "databaseName": "aggregate-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-arrayFilters-clientError.json b/test/crud/unified/bulkWrite-arrayFilters-clientError.json new file mode 100644 index 0000000000..63815e3233 --- /dev/null +++ b/test/crud/unified/bulkWrite-arrayFilters-clientError.json @@ -0,0 +1,151 @@ +{ + "description": "bulkWrite-arrayFilters-clientError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "maxServerVersion": "3.5.5" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "crud-v2" + } + } + ], + "initialData": [ + { + "collectionName": "crud-v2", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 3 + }, + { + "b": 1 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 1 + } + ] + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite on server that doesn't support arrayFilters", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": {}, + "update": { + "$set": { + "y.0.b": 2 + } + }, + "arrayFilters": [ + { + "i.b": 1 + } + ] + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "BulkWrite on server that doesn't support arrayFilters with arrayFilters on second op", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": {}, + "update": { + "$set": { + "y.0.b": 2 + } + } + } + }, + { + "updateMany": { + "filter": {}, + "update": { + "$set": { + "y.$[i].b": 2 + } + }, + "arrayFilters": [ + { + "i.b": 1 + } + ] + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-arrayFilters.json b/test/crud/unified/bulkWrite-arrayFilters.json new file mode 100644 index 0000000000..70ee014f7a --- /dev/null +++ b/test/crud/unified/bulkWrite-arrayFilters.json @@ -0,0 +1,279 @@ +{ + "description": "bulkWrite-arrayFilters", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.5.6" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 3 + }, + { + "b": 1 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 1 + } + ] + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite updateOne with arrayFilters", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": {}, + "update": { + "$set": { + "y.$[i].b": 2 + } + }, + "arrayFilters": [ + { + "i.b": 3 + } + ] + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": {}, + "u": { + "$set": { + "y.$[i].b": 2 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "arrayFilters": [ + { + "i.b": 3 + } + ] + } + ], + "ordered": true + }, + "commandName": "update", + "databaseName": "crud-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 2 + }, + { + "b": 1 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 1 + } + ] + } + ] + } + ] + }, + { + "description": "BulkWrite updateMany with arrayFilters", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": {}, + "update": { + "$set": { + "y.$[i].b": 2 + } + }, + "arrayFilters": [ + { + "i.b": 1 + } + ] + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": {}, + "u": { + "$set": { + "y.$[i].b": 2 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + }, + "arrayFilters": [ + { + "i.b": 1 + } + ] + } + ], + "ordered": true + }, + "commandName": "update", + "databaseName": "crud-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 3 + }, + { + "b": 2 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 2 + } + ] + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-comment.json b/test/crud/unified/bulkWrite-comment.json new file mode 100644 index 0000000000..0b2addc850 --- /dev/null +++ b/test/crud/unified/bulkWrite-comment.json @@ -0,0 +1,519 @@ +{ + "description": "bulkWrite-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "BulkWrite_comment" + } + } + ], + "initialData": [ + { + "collectionName": "BulkWrite_comment", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 5, + "x": "inserted" + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": "replaced" + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$set": { + "x": "updated" + } + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 3 + } + } + } + ], + "comment": "comment" + }, + "expectResult": { + "deletedCount": 1, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "0": 5 + } + }, + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "BulkWrite_comment", + "documents": [ + { + "_id": 5, + "x": "inserted" + } + ], + "ordered": true, + "comment": "comment" + } + } + }, + { + "commandStartedEvent": { + "command": { + "update": "BulkWrite_comment", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "x": "replaced" + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": 2 + }, + "u": { + "$set": { + "x": "updated" + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "comment": "comment" + } + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "BulkWrite_comment", + "deletes": [ + { + "q": { + "_id": 3 + }, + "limit": 1 + } + ], + "ordered": true, + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "BulkWrite_comment", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": "replaced" + }, + { + "_id": 2, + "x": "updated" + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": "inserted" + } + ] + } + ] + }, + { + "description": "BulkWrite with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 5, + "x": "inserted" + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": "replaced" + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$set": { + "x": "updated" + } + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 3 + } + } + } + ], + "comment": { + "key": "value" + } + }, + "expectResult": { + "deletedCount": 1, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "0": 5 + } + }, + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "BulkWrite_comment", + "documents": [ + { + "_id": 5, + "x": "inserted" + } + ], + "ordered": true, + "comment": { + "key": "value" + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "update": "BulkWrite_comment", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "x": "replaced" + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": 2 + }, + "u": { + "$set": { + "x": "updated" + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "comment": { + "key": "value" + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "BulkWrite_comment", + "deletes": [ + { + "q": { + "_id": 3 + }, + "limit": 1 + } + ], + "ordered": true, + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "BulkWrite_comment", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": "replaced" + }, + { + "_id": 2, + "x": "updated" + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": "inserted" + } + ] + } + ] + }, + { + "description": "BulkWrite with comment - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 5, + "x": "inserted" + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": "replaced" + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$set": { + "x": "updated" + } + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 3 + } + } + } + ], + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "BulkWrite_comment", + "documents": [ + { + "_id": 5, + "x": "inserted" + } + ], + "ordered": true, + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "BulkWrite_comment", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-delete-hint-clientError.json b/test/crud/unified/bulkWrite-delete-hint-clientError.json new file mode 100644 index 0000000000..2961b55dc0 --- /dev/null +++ b/test/crud/unified/bulkWrite-delete-hint-clientError.json @@ -0,0 +1,193 @@ +{ + "description": "bulkWrite-delete-hint-clientError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "maxServerVersion": "3.3.99" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "BulkWrite_delete_hint" + } + } + ], + "initialData": [ + { + "collectionName": "BulkWrite_delete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite deleteOne with hints unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + } + }, + { + "deleteOne": { + "filter": { + "_id": 2 + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "BulkWrite_delete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "BulkWrite deleteMany with hints unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteMany": { + "filter": { + "_id": { + "$lt": 3 + } + }, + "hint": "_id_" + } + }, + { + "deleteMany": { + "filter": { + "_id": { + "$gte": 4 + } + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "BulkWrite_delete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-delete-hint-serverError.json b/test/crud/unified/bulkWrite-delete-hint-serverError.json new file mode 100644 index 0000000000..fa99522093 --- /dev/null +++ b/test/crud/unified/bulkWrite-delete-hint-serverError.json @@ -0,0 +1,252 @@ +{ + "description": "bulkWrite-delete-hint-serverError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.3.3" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "BulkWrite_delete_hint" + } + } + ], + "initialData": [ + { + "collectionName": "BulkWrite_delete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite deleteOne with hints unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + } + }, + { + "deleteOne": { + "filter": { + "_id": 2 + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "BulkWrite_delete_hint", + "deletes": [ + { + "q": { + "_id": 1 + }, + "hint": "_id_", + "limit": 1 + }, + { + "q": { + "_id": 2 + }, + "hint": { + "_id": 1 + }, + "limit": 1 + } + ], + "ordered": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "BulkWrite_delete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "BulkWrite deleteMany with hints unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteMany": { + "filter": { + "_id": { + "$lt": 3 + } + }, + "hint": "_id_" + } + }, + { + "deleteMany": { + "filter": { + "_id": { + "$gte": 4 + } + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "BulkWrite_delete_hint", + "deletes": [ + { + "q": { + "_id": { + "$lt": 3 + } + }, + "hint": "_id_", + "limit": 0 + }, + { + "q": { + "_id": { + "$gte": 4 + } + }, + "hint": { + "_id": 1 + }, + "limit": 0 + } + ], + "ordered": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "BulkWrite_delete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-delete-hint.json b/test/crud/unified/bulkWrite-delete-hint.json new file mode 100644 index 0000000000..9fcdecefd7 --- /dev/null +++ b/test/crud/unified/bulkWrite-delete-hint.json @@ -0,0 +1,247 @@ +{ + "description": "bulkWrite-delete-hint", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.3.4" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "BulkWrite_delete_hint" + } + } + ], + "initialData": [ + { + "collectionName": "BulkWrite_delete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite deleteOne with hints", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + } + }, + { + "deleteOne": { + "filter": { + "_id": 2 + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 2, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "BulkWrite_delete_hint", + "deletes": [ + { + "q": { + "_id": 1 + }, + "hint": "_id_", + "limit": 1 + }, + { + "q": { + "_id": 2 + }, + "hint": { + "_id": 1 + }, + "limit": 1 + } + ], + "ordered": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "BulkWrite_delete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "BulkWrite deleteMany with hints", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteMany": { + "filter": { + "_id": { + "$lt": 3 + } + }, + "hint": "_id_" + } + }, + { + "deleteMany": { + "filter": { + "_id": { + "$gte": 4 + } + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 3, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "BulkWrite_delete_hint", + "deletes": [ + { + "q": { + "_id": { + "$lt": 3 + } + }, + "hint": "_id_", + "limit": 0 + }, + { + "q": { + "_id": { + "$gte": 4 + } + }, + "hint": { + "_id": 1 + }, + "limit": 0 + } + ], + "ordered": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "BulkWrite_delete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-deleteMany-hint-unacknowledged.json b/test/crud/unified/bulkWrite-deleteMany-hint-unacknowledged.json new file mode 100644 index 0000000000..2dda9486e8 --- /dev/null +++ b/test/crud/unified/bulkWrite-deleteMany-hint-unacknowledged.json @@ -0,0 +1,269 @@ +{ + "description": "bulkWrite-deleteMany-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged deleteMany with hint string fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged deleteMany with hint document fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged deleteMany with hint string on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "limit": 0 + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged deleteMany with hint document on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "limit": 0 + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-deleteMany-let.json b/test/crud/unified/bulkWrite-deleteMany-let.json new file mode 100644 index 0000000000..45c20ea49a --- /dev/null +++ b/test/crud/unified/bulkWrite-deleteMany-let.json @@ -0,0 +1,200 @@ +{ + "description": "BulkWrite deleteMany-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite deleteMany with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteMany": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + } + } + } + ], + "let": { + "id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "limit": 0 + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "BulkWrite deleteMany with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + } + } + } + ], + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "'delete.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "limit": 1 + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-deleteOne-hint-unacknowledged.json b/test/crud/unified/bulkWrite-deleteOne-hint-unacknowledged.json new file mode 100644 index 0000000000..aadf6d9e99 --- /dev/null +++ b/test/crud/unified/bulkWrite-deleteOne-hint-unacknowledged.json @@ -0,0 +1,265 @@ +{ + "description": "bulkWrite-deleteOne-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged deleteOne with hint string fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged deleteOne with hint document fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged deleteOne with hint string on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "limit": 1 + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged deleteOne with hint document on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "limit": 1 + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-deleteOne-let.json b/test/crud/unified/bulkWrite-deleteOne-let.json new file mode 100644 index 0000000000..f3268163cb --- /dev/null +++ b/test/crud/unified/bulkWrite-deleteOne-let.json @@ -0,0 +1,200 @@ +{ + "description": "BulkWrite deleteOne-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite deleteOne with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + } + } + } + ], + "let": { + "id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "limit": 1 + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "BulkWrite deleteOne with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.9" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + } + } + } + ], + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "'delete.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "limit": 1 + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-errorResponse.json b/test/crud/unified/bulkWrite-errorResponse.json new file mode 100644 index 0000000000..157637c713 --- /dev/null +++ b/test/crud/unified/bulkWrite-errorResponse.json @@ -0,0 +1,88 @@ +{ + "description": "bulkWrite-errorResponse", + "schemaVersion": "1.12", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "tests": [ + { + "description": "bulkWrite operations support errorResponse assertions", + "runOnRequirements": [ + { + "minServerVersion": "4.0.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.2.0", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 8 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "errorCode": 8, + "errorResponse": { + "code": 8 + } + } + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-insertOne-dots_and_dollars.json b/test/crud/unified/bulkWrite-insertOne-dots_and_dollars.json new file mode 100644 index 0000000000..92bbb1aaf2 --- /dev/null +++ b/test/crud/unified/bulkWrite-insertOne-dots_and_dollars.json @@ -0,0 +1,374 @@ +{ + "description": "bulkWrite-insertOne-dots_and_dollars", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Inserting document with top-level dollar-prefixed key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1, + "$a": 1 + } + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "0": 1 + } + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "$a": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "$a": 1 + } + ] + } + ] + }, + { + "description": "Inserting document with top-level dollar-prefixed key on pre-5.0 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "4.99" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1, + "$a": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "$a": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ] + }, + { + "description": "Inserting document with top-level dotted key", + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1, + "a.b": 1 + } + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "0": 1 + } + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "a.b": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a.b": 1 + } + ] + } + ] + }, + { + "description": "Inserting document with dollar-prefixed key in embedded doc", + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1, + "a": { + "$b": 1 + } + } + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "0": 1 + } + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "a": { + "$b": 1 + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "$b": 1 + } + } + ] + } + ] + }, + { + "description": "Inserting document with dotted key in embedded doc", + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1, + "a": { + "b.c": 1 + } + } + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "0": 1 + } + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "a": { + "b.c": 1 + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "b.c": 1 + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-replaceOne-dots_and_dollars.json b/test/crud/unified/bulkWrite-replaceOne-dots_and_dollars.json new file mode 100644 index 0000000000..fce647d8f4 --- /dev/null +++ b/test/crud/unified/bulkWrite-replaceOne-dots_and_dollars.json @@ -0,0 +1,532 @@ +{ + "description": "bulkWrite-replaceOne-dots_and_dollars", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "Replacing document with top-level dotted key on 3.6+ server", + "runOnRequirements": [ + { + "minServerVersion": "3.6" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a.b": 1 + } + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a.b": 1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a.b": 1 + } + ] + } + ] + }, + { + "description": "Replacing document with top-level dotted key on pre-3.6 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "3.4.99" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a.b": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a.b": 1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Replacing document with dollar-prefixed key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "$b": 1 + } + } + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a": { + "$b": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "$b": 1 + } + } + ] + } + ] + }, + { + "description": "Replacing document with dollar-prefixed key in embedded doc on pre-5.0 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "4.99" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "$b": 1 + } + } + } + } + ] + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a": { + "$b": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Replacing document with dotted key in embedded doc on 3.6+ server", + "runOnRequirements": [ + { + "minServerVersion": "3.6" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "b.c": 1 + } + } + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a": { + "b.c": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "b.c": 1 + } + } + ] + } + ] + }, + { + "description": "Replacing document with dotted key in embedded doc on pre-3.6 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "3.4.99" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "b.c": 1 + } + } + } + } + ] + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a": { + "b.c": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-replaceOne-hint-unacknowledged.json b/test/crud/unified/bulkWrite-replaceOne-hint-unacknowledged.json new file mode 100644 index 0000000000..e54cd704df --- /dev/null +++ b/test/crud/unified/bulkWrite-replaceOne-hint-unacknowledged.json @@ -0,0 +1,293 @@ +{ + "description": "bulkWrite-replaceOne-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged replaceOne with hint string fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": "_id_" + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged replaceOne with hint document fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged replaceOne with hint string on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": "_id_" + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 111 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged replaceOne with hint document on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 111 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-replaceOne-let.json b/test/crud/unified/bulkWrite-replaceOne-let.json new file mode 100644 index 0000000000..70f63837a8 --- /dev/null +++ b/test/crud/unified/bulkWrite-replaceOne-let.json @@ -0,0 +1,226 @@ +{ + "description": "BulkWrite replaceOne-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite replaceOne with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "replacement": { + "x": 3 + } + } + } + ], + "let": { + "id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "u": { + "x": 3 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 3 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "BulkWrite replaceOne with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.9" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "replacement": { + "x": 3 + } + } + } + ], + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "'update.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "u": { + "x": 3 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-update-hint-clientError.json b/test/crud/unified/bulkWrite-update-hint-clientError.json new file mode 100644 index 0000000000..d5eb71c29e --- /dev/null +++ b/test/crud/unified/bulkWrite-update-hint-clientError.json @@ -0,0 +1,284 @@ +{ + "description": "bulkWrite-update-hint-clientError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "maxServerVersion": "3.3.99" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_bulkwrite_update_hint" + } + } + ], + "initialData": [ + { + "collectionName": "test_bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite updateOne with update hints unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + }, + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "test_bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "BulkWrite updateMany with update hints unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "_id": { + "$lt": 3 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + }, + { + "updateMany": { + "filter": { + "_id": { + "$lt": 3 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "test_bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "BulkWrite replaceOne with update hints unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": 3 + }, + "replacement": { + "x": 333 + }, + "hint": "_id_" + } + }, + { + "replaceOne": { + "filter": { + "_id": 4 + }, + "replacement": { + "x": 444 + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "test_bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-update-hint-serverError.json b/test/crud/unified/bulkWrite-update-hint-serverError.json new file mode 100644 index 0000000000..b0f7e1b381 --- /dev/null +++ b/test/crud/unified/bulkWrite-update-hint-serverError.json @@ -0,0 +1,422 @@ +{ + "description": "bulkWrite-update-hint-serverError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.1.9" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_bulkwrite_update_hint" + } + } + ], + "initialData": [ + { + "collectionName": "test_bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite updateOne with update hints unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + }, + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_bulkwrite_update_hint", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_", + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": 1 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "BulkWrite updateMany with update hints unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "_id": { + "$lt": 3 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + }, + { + "updateMany": { + "filter": { + "_id": { + "$lt": 3 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_bulkwrite_update_hint", + "updates": [ + { + "q": { + "_id": { + "$lt": 3 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "hint": "_id_", + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$lt": 3 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "hint": { + "_id": 1 + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "BulkWrite replaceOne with update hints unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": 3 + }, + "replacement": { + "x": 333 + }, + "hint": "_id_" + } + }, + { + "replaceOne": { + "filter": { + "_id": 4 + }, + "replacement": { + "x": 444 + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_bulkwrite_update_hint", + "updates": [ + { + "q": { + "_id": 3 + }, + "u": { + "x": 333 + }, + "hint": "_id_", + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": 4 + }, + "u": { + "x": 444 + }, + "hint": { + "_id": 1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-update-hint.json b/test/crud/unified/bulkWrite-update-hint.json new file mode 100644 index 0000000000..4206359891 --- /dev/null +++ b/test/crud/unified/bulkWrite-update-hint.json @@ -0,0 +1,445 @@ +{ + "description": "bulkWrite-update-hint", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_bulkwrite_update_hint" + } + } + ], + "initialData": [ + { + "collectionName": "test_bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite updateOne with update hints", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + }, + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_bulkwrite_update_hint", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": "_id_" + }, + { + "q": { + "_id": 1 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "_id": 1 + } + } + ], + "ordered": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 13 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "BulkWrite updateMany with update hints", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "_id": { + "$lt": 3 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + }, + { + "updateMany": { + "filter": { + "_id": { + "$lt": 3 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 4, + "modifiedCount": 4, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_bulkwrite_update_hint", + "updates": [ + { + "q": { + "_id": { + "$lt": 3 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": "_id_" + }, + { + "q": { + "_id": { + "$lt": 3 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "_id": 1 + } + } + ], + "ordered": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 13 + }, + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "BulkWrite replaceOne with update hints", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": 3 + }, + "replacement": { + "x": 333 + }, + "hint": "_id_" + } + }, + { + "replaceOne": { + "filter": { + "_id": 4 + }, + "replacement": { + "x": 444 + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_bulkwrite_update_hint", + "updates": [ + { + "q": { + "_id": 3 + }, + "u": { + "x": 333 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": "_id_" + }, + { + "q": { + "_id": 4 + }, + "u": { + "x": 444 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "_id": 1 + } + } + ], + "ordered": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 333 + }, + { + "_id": 4, + "x": 444 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-update-validation.json b/test/crud/unified/bulkWrite-update-validation.json new file mode 100644 index 0000000000..f9bfda0edd --- /dev/null +++ b/test/crud/unified/bulkWrite-update-validation.json @@ -0,0 +1,210 @@ +{ + "description": "bulkWrite-update-validation", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite replaceOne prohibits atomic modifiers", + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "$set": { + "x": 22 + } + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "BulkWrite updateOne requires atomic modifiers", + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": { + "x": 22 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "BulkWrite updateMany requires atomic modifiers", + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "x": 44 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-updateMany-dots_and_dollars.json b/test/crud/unified/bulkWrite-updateMany-dots_and_dollars.json new file mode 100644 index 0000000000..35a5cdd52a --- /dev/null +++ b/test/crud/unified/bulkWrite-updateMany-dots_and_dollars.json @@ -0,0 +1,452 @@ +{ + "description": "bulkWrite-updateMany-dots_and_dollars", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {} + } + ] + } + ], + "tests": [ + { + "description": "Updating document to set top-level dollar-prefixed key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ] + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {}, + "$a": 1 + } + ] + } + ] + }, + { + "description": "Updating document to set top-level dotted key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ] + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {}, + "a.b": 1 + } + ] + } + ] + }, + { + "description": "Updating document to set dollar-prefixed key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ] + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": { + "$a": 1 + } + } + ] + } + ] + }, + { + "description": "Updating document to set dotted key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ] + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": { + "a.b": 1 + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-updateMany-hint-unacknowledged.json b/test/crud/unified/bulkWrite-updateMany-hint-unacknowledged.json new file mode 100644 index 0000000000..87478918d2 --- /dev/null +++ b/test/crud/unified/bulkWrite-updateMany-hint-unacknowledged.json @@ -0,0 +1,305 @@ +{ + "description": "bulkWrite-updateMany-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged updateMany with hint string fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged updateMany with hint document fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged updateMany with hint string on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged updateMany with hint document on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-updateMany-let.json b/test/crud/unified/bulkWrite-updateMany-let.json new file mode 100644 index 0000000000..fbeba1a607 --- /dev/null +++ b/test/crud/unified/bulkWrite-updateMany-let.json @@ -0,0 +1,243 @@ +{ + "description": "BulkWrite updateMany-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 20 + }, + { + "_id": 2, + "x": 21 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite updateMany with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": [ + { + "$set": { + "x": 21 + } + } + ] + } + } + ], + "let": { + "id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "u": [ + { + "$set": { + "x": 21 + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 21 + }, + { + "_id": 2, + "x": 21 + } + ] + } + ] + }, + { + "description": "BulkWrite updateMany with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.9" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": [ + { + "$set": { + "x": 21 + } + } + ] + } + } + ], + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "'update.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "u": [ + { + "$set": { + "x": 21 + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 20 + }, + { + "_id": 2, + "x": 21 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-updateOne-dots_and_dollars.json b/test/crud/unified/bulkWrite-updateOne-dots_and_dollars.json new file mode 100644 index 0000000000..cbbe113ce8 --- /dev/null +++ b/test/crud/unified/bulkWrite-updateOne-dots_and_dollars.json @@ -0,0 +1,460 @@ +{ + "description": "bulkWrite-updateOne-dots_and_dollars", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {} + } + ] + } + ], + "tests": [ + { + "description": "Updating document to set top-level dollar-prefixed key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ] + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {}, + "$a": 1 + } + ] + } + ] + }, + { + "description": "Updating document to set top-level dotted key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ] + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {}, + "a.b": 1 + } + ] + } + ] + }, + { + "description": "Updating document to set dollar-prefixed key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ] + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": { + "$a": 1 + } + } + ] + } + ] + }, + { + "description": "Updating document to set dotted key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ] + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": { + "a.b": 1 + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-updateOne-hint-unacknowledged.json b/test/crud/unified/bulkWrite-updateOne-hint-unacknowledged.json new file mode 100644 index 0000000000..1345f6b536 --- /dev/null +++ b/test/crud/unified/bulkWrite-updateOne-hint-unacknowledged.json @@ -0,0 +1,305 @@ +{ + "description": "bulkWrite-updateOne-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged updateOne with hint string fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged updateOne with hint document fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged updateOne with hint string on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged updateOne with hint document on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-updateOne-let.json b/test/crud/unified/bulkWrite-updateOne-let.json new file mode 100644 index 0000000000..96783c782f --- /dev/null +++ b/test/crud/unified/bulkWrite-updateOne-let.json @@ -0,0 +1,247 @@ +{ + "description": "BulkWrite updateOne-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 20 + }, + { + "_id": 2, + "x": 21 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite updateOne with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": [ + { + "$set": { + "x": 22 + } + } + ] + } + } + ], + "let": { + "id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "u": [ + { + "$set": { + "x": 22 + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 22 + }, + { + "_id": 2, + "x": 21 + } + ] + } + ] + }, + { + "description": "BulkWrite updateOne with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.9" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": [ + { + "$set": { + "x": 22 + } + } + ] + } + } + ], + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "'update.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "u": [ + { + "$set": { + "x": 22 + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 20 + }, + { + "_id": 2, + "x": 21 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/countDocuments-comment.json b/test/crud/unified/countDocuments-comment.json new file mode 100644 index 0000000000..e6c7ae8170 --- /dev/null +++ b/test/crud/unified/countDocuments-comment.json @@ -0,0 +1,208 @@ +{ + "description": "countDocuments-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "countDocuments-comments-test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "countDocuments-comments-test", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "countDocuments with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "name": "countDocuments", + "object": "collection0", + "arguments": { + "filter": {}, + "comment": { + "key": "value" + } + }, + "expectResult": 3 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ], + "comment": { + "key": "value" + } + }, + "commandName": "aggregate", + "databaseName": "countDocuments-comments-test" + } + } + ] + } + ] + }, + { + "description": "countDocuments with string comment", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0" + } + ], + "operations": [ + { + "name": "countDocuments", + "object": "collection0", + "arguments": { + "filter": {}, + "comment": "comment" + }, + "expectResult": 3 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ], + "comment": "comment" + }, + "commandName": "aggregate", + "databaseName": "countDocuments-comments-test" + } + } + ] + } + ] + }, + { + "description": "countDocuments with document comment on less than 4.4.0 - server error", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.3.99" + } + ], + "operations": [ + { + "name": "countDocuments", + "object": "collection0", + "arguments": { + "filter": {}, + "comment": { + "key": "value" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ], + "comment": { + "key": "value" + } + }, + "commandName": "aggregate", + "databaseName": "countDocuments-comments-test" + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/db-aggregate-write-readPreference.json b/test/crud/unified/db-aggregate-write-readPreference.json new file mode 100644 index 0000000000..2a81282de8 --- /dev/null +++ b/test/crud/unified/db-aggregate-write-readPreference.json @@ -0,0 +1,446 @@ +{ + "description": "db-aggregate-write-readPreference", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ], + "serverless": "forbid" + } + ], + "_yamlAnchors": { + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + }, + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "readConcernLevel": "local", + "w": 1 + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0", + "databaseOptions": { + "readPreference": { + "mode": "secondaryPreferred", + "maxStalenessSeconds": 600 + } + } + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [] + } + ], + "tests": [ + { + "description": "Database-level aggregate with $out includes read preference for 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0", + "serverless": "forbid" + } + ], + "operations": [ + { + "object": "database0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + }, + { + "$addFields": { + "_id": 1 + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$out": "coll0" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + }, + { + "$addFields": { + "_id": 1 + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$out": "coll0" + } + ], + "$readPreference": { + "mode": "secondaryPreferred", + "maxStalenessSeconds": 600 + }, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Database-level aggregate with $out omits read preference for pre-5.0 server", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.4.99", + "serverless": "forbid" + } + ], + "operations": [ + { + "object": "database0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + }, + { + "$addFields": { + "_id": 1 + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$out": "coll0" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + }, + { + "$addFields": { + "_id": 1 + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$out": "coll0" + } + ], + "$readPreference": { + "$$exists": false + }, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Database-level aggregate with $merge includes read preference for 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "object": "database0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + }, + { + "$addFields": { + "_id": 1 + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$merge": { + "into": "coll0" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + }, + { + "$addFields": { + "_id": 1 + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$merge": { + "into": "coll0" + } + } + ], + "$readPreference": { + "mode": "secondaryPreferred", + "maxStalenessSeconds": 600 + }, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Database-level aggregate with $merge omits read preference for pre-5.0 server", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "object": "database0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + }, + { + "$addFields": { + "_id": 1 + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$merge": { + "into": "coll0" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + }, + { + "$addFields": { + "_id": 1 + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$merge": { + "into": "coll0" + } + } + ], + "$readPreference": { + "$$exists": false + }, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/v2/db-aggregate.json b/test/crud/unified/db-aggregate.json similarity index 68% rename from test/crud/v2/db-aggregate.json rename to test/crud/unified/db-aggregate.json index d88b9e1819..5015405bfc 100644 --- a/test/crud/v2/db-aggregate.json +++ b/test/crud/unified/db-aggregate.json @@ -1,17 +1,43 @@ { - "runOn": [ + "description": "db-aggregate", + "schemaVersion": "1.4", + "runOnRequirements": [ { - "minServerVersion": "3.6.0" + "minServerVersion": "3.6.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "admin" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "crud-v2" + } } ], - "database_name": "admin", "tests": [ { "description": "Aggregate with $listLocalSessions", "operations": [ { + "object": "database0", "name": "aggregate", - "object": "database", "arguments": { "pipeline": [ { @@ -33,7 +59,7 @@ } ] }, - "result": [ + "expectResult": [ { "dummy": "dummy field" } @@ -45,8 +71,8 @@ "description": "Aggregate with $listLocalSessions and allowDiskUse", "operations": [ { + "object": "database0", "name": "aggregate", - "object": "database", "arguments": { "pipeline": [ { @@ -69,7 +95,7 @@ ], "allowDiskUse": true }, - "result": [ + "expectResult": [ { "dummy": "dummy field" } diff --git a/test/crud/unified/deleteMany-comment.json b/test/crud/unified/deleteMany-comment.json new file mode 100644 index 0000000000..6abc5fd58a --- /dev/null +++ b/test/crud/unified/deleteMany-comment.json @@ -0,0 +1,245 @@ +{ + "description": "deleteMany-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2, + "name": "name2" + }, + { + "_id": 3, + "name": "name3" + } + ] + } + ], + "tests": [ + { + "description": "deleteMany with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "deleteMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "comment": "comment" + }, + "expectResult": { + "deletedCount": 2 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "limit": 0 + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "deleteMany with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "deleteMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "comment": { + "key": "value" + } + }, + "expectResult": { + "deletedCount": 2 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "limit": 0 + } + ], + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "deleteMany with comment - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "deleteMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "limit": 0 + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2, + "name": "name2" + }, + { + "_id": 3, + "name": "name3" + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteMany-hint-clientError.json b/test/crud/unified/deleteMany-hint-clientError.json new file mode 100644 index 0000000000..66320122b5 --- /dev/null +++ b/test/crud/unified/deleteMany-hint-clientError.json @@ -0,0 +1,149 @@ +{ + "description": "deleteMany-hint-clientError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "maxServerVersion": "3.3.99" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "DeleteMany_hint" + } + } + ], + "initialData": [ + { + "collectionName": "DeleteMany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "DeleteMany with hint string unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "DeleteMany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "DeleteMany with hint document unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "DeleteMany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteMany-hint-serverError.json b/test/crud/unified/deleteMany-hint-serverError.json new file mode 100644 index 0000000000..88d4a65576 --- /dev/null +++ b/test/crud/unified/deleteMany-hint-serverError.json @@ -0,0 +1,190 @@ +{ + "description": "deleteMany-hint-serverError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.3.3" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "DeleteMany_hint" + } + } + ], + "initialData": [ + { + "collectionName": "DeleteMany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "DeleteMany with hint string unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "DeleteMany_hint", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_", + "limit": 0 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "DeleteMany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "DeleteMany with hint document unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "DeleteMany_hint", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + }, + "limit": 0 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "DeleteMany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteMany-hint-unacknowledged.json b/test/crud/unified/deleteMany-hint-unacknowledged.json new file mode 100644 index 0000000000..ab7e9c7c09 --- /dev/null +++ b/test/crud/unified/deleteMany-hint-unacknowledged.json @@ -0,0 +1,245 @@ +{ + "description": "deleteMany-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged deleteMany with hint string fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged deleteMany with hint document fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged deleteMany with hint string on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "limit": 0 + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged deleteMany with hint document on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "limit": 0 + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteMany-hint.json b/test/crud/unified/deleteMany-hint.json new file mode 100644 index 0000000000..59d903d201 --- /dev/null +++ b/test/crud/unified/deleteMany-hint.json @@ -0,0 +1,173 @@ +{ + "description": "deleteMany-hint", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.3.4" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "DeleteMany_hint" + } + } + ], + "initialData": [ + { + "collectionName": "DeleteMany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "DeleteMany with hint string", + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + }, + "expectResult": { + "deletedCount": 2 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "DeleteMany_hint", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_", + "limit": 0 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "DeleteMany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "DeleteMany with hint document", + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "deletedCount": 2 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "DeleteMany_hint", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + }, + "limit": 0 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "DeleteMany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteMany-let.json b/test/crud/unified/deleteMany-let.json new file mode 100644 index 0000000000..71bf26a013 --- /dev/null +++ b/test/crud/unified/deleteMany-let.json @@ -0,0 +1,201 @@ +{ + "description": "deleteMany-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2, + "name": "name" + }, + { + "_id": 3, + "name": "name" + } + ] + } + ], + "tests": [ + { + "description": "deleteMany with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "deleteMany", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$name", + "$$name" + ] + } + }, + "let": { + "name": "name" + } + }, + "expectResult": { + "deletedCount": 2 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "$expr": { + "$eq": [ + "$name", + "$$name" + ] + } + }, + "limit": 0 + } + ], + "let": { + "name": "name" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "deleteMany with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "deleteMany", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$name", + "$$name" + ] + } + }, + "let": { + "name": "name" + } + }, + "expectError": { + "errorContains": "'delete.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "$expr": { + "$eq": [ + "$name", + "$$name" + ] + } + }, + "limit": 0 + } + ], + "let": { + "name": "name" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2, + "name": "name" + }, + { + "_id": 3, + "name": "name" + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteOne-comment.json b/test/crud/unified/deleteOne-comment.json new file mode 100644 index 0000000000..0f42b086a3 --- /dev/null +++ b/test/crud/unified/deleteOne-comment.json @@ -0,0 +1,243 @@ +{ + "description": "deleteOne-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2, + "name": "name" + }, + { + "_id": 3, + "name": "name" + } + ] + } + ], + "tests": [ + { + "description": "deleteOne with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "deleteOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "comment": "comment" + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": 1 + }, + "limit": 1 + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2, + "name": "name" + }, + { + "_id": 3, + "name": "name" + } + ] + } + ] + }, + { + "description": "deleteOne with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "deleteOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "comment": { + "key": "value" + } + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": 1 + }, + "limit": 1 + } + ], + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2, + "name": "name" + }, + { + "_id": 3, + "name": "name" + } + ] + } + ] + }, + { + "description": "deleteOne with comment - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "deleteOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": 1 + }, + "limit": 1 + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2, + "name": "name" + }, + { + "_id": 3, + "name": "name" + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteOne-errorResponse.json b/test/crud/unified/deleteOne-errorResponse.json new file mode 100644 index 0000000000..1f3a266f1e --- /dev/null +++ b/test/crud/unified/deleteOne-errorResponse.json @@ -0,0 +1,82 @@ +{ + "description": "deleteOne-errorResponse", + "schemaVersion": "1.12", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "tests": [ + { + "description": "delete operations support errorResponse assertions", + "runOnRequirements": [ + { + "minServerVersion": "4.0.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.2.0", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 8 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "errorCode": 8, + "errorResponse": { + "code": 8 + } + } + } + ] + } + ] +} diff --git a/test/crud/unified/deleteOne-hint-clientError.json b/test/crud/unified/deleteOne-hint-clientError.json new file mode 100644 index 0000000000..cf629f59e0 --- /dev/null +++ b/test/crud/unified/deleteOne-hint-clientError.json @@ -0,0 +1,133 @@ +{ + "description": "deleteOne-hint-clientError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "maxServerVersion": "3.3.99" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "DeleteOne_hint" + } + } + ], + "initialData": [ + { + "collectionName": "DeleteOne_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "DeleteOne with hint string unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "DeleteOne_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "DeleteOne with hint document unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "DeleteOne_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteOne-hint-serverError.json b/test/crud/unified/deleteOne-hint-serverError.json new file mode 100644 index 0000000000..15541ed857 --- /dev/null +++ b/test/crud/unified/deleteOne-hint-serverError.json @@ -0,0 +1,170 @@ +{ + "description": "deleteOne-hint-serverError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.3.3" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "DeleteOne_hint" + } + } + ], + "initialData": [ + { + "collectionName": "DeleteOne_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "DeleteOne with hint string unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "DeleteOne_hint", + "deletes": [ + { + "q": { + "_id": 1 + }, + "hint": "_id_", + "limit": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "DeleteOne_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "DeleteOne with hint document unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "DeleteOne_hint", + "deletes": [ + { + "q": { + "_id": 1 + }, + "hint": { + "_id": 1 + }, + "limit": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "DeleteOne_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteOne-hint-unacknowledged.json b/test/crud/unified/deleteOne-hint-unacknowledged.json new file mode 100644 index 0000000000..1782f0f525 --- /dev/null +++ b/test/crud/unified/deleteOne-hint-unacknowledged.json @@ -0,0 +1,241 @@ +{ + "description": "deleteOne-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged deleteOne with hint string fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged deleteOne with hint document fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged deleteOne with hint string on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "limit": 1 + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged deleteOne with hint document on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "limit": 1 + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteOne-hint.json b/test/crud/unified/deleteOne-hint.json new file mode 100644 index 0000000000..bcc4bc2347 --- /dev/null +++ b/test/crud/unified/deleteOne-hint.json @@ -0,0 +1,161 @@ +{ + "description": "deleteOne-hint", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.3.4" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "DeleteOne_hint" + } + } + ], + "initialData": [ + { + "collectionName": "DeleteOne_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "DeleteOne with hint string", + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "DeleteOne_hint", + "deletes": [ + { + "q": { + "_id": 1 + }, + "hint": "_id_", + "limit": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "DeleteOne_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "deleteOne with hint document", + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "DeleteOne_hint", + "deletes": [ + { + "q": { + "_id": 1 + }, + "hint": { + "_id": 1 + }, + "limit": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "DeleteOne_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteOne-let.json b/test/crud/unified/deleteOne-let.json new file mode 100644 index 0000000000..9718682235 --- /dev/null +++ b/test/crud/unified/deleteOne-let.json @@ -0,0 +1,191 @@ +{ + "description": "deleteOne-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "deleteOne with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "deleteOne", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "let": { + "id": 1 + } + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "limit": 1 + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "deleteOne with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "deleteOne", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "'delete.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "limit": 1 + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/distinct-comment.json b/test/crud/unified/distinct-comment.json new file mode 100644 index 0000000000..11bce9ac9d --- /dev/null +++ b/test/crud/unified/distinct-comment.json @@ -0,0 +1,186 @@ +{ + "description": "distinct-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "distinct-comment-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "distinct-comment-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "distinct with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4.14" + } + ], + "operations": [ + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": {}, + "comment": { + "key": "value" + } + }, + "expectResult": [ + 11, + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll0", + "key": "x", + "query": {}, + "comment": { + "key": "value" + } + }, + "commandName": "distinct", + "databaseName": "distinct-comment-tests" + } + } + ] + } + ] + }, + { + "description": "distinct with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": {}, + "comment": "comment" + }, + "expectResult": [ + 11, + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll0", + "key": "x", + "query": {}, + "comment": "comment" + }, + "commandName": "distinct", + "databaseName": "distinct-comment-tests" + } + } + ] + } + ] + }, + { + "description": "distinct with document comment - pre 4.4, server error", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.4.13" + } + ], + "operations": [ + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": {}, + "comment": { + "key": "value" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll0", + "key": "x", + "query": {}, + "comment": { + "key": "value" + } + }, + "commandName": "distinct", + "databaseName": "distinct-comment-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/estimatedDocumentCount-comment.json b/test/crud/unified/estimatedDocumentCount-comment.json new file mode 100644 index 0000000000..6c0adacc8f --- /dev/null +++ b/test/crud/unified/estimatedDocumentCount-comment.json @@ -0,0 +1,170 @@ +{ + "description": "estimatedDocumentCount-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "edc-comment-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "edc-comment-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "estimatedDocumentCount with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4.14" + } + ], + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection0", + "arguments": { + "comment": { + "key": "value" + } + }, + "expectResult": 3 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll0", + "comment": { + "key": "value" + } + }, + "commandName": "count", + "databaseName": "edc-comment-tests" + } + } + ] + } + ] + }, + { + "description": "estimatedDocumentCount with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection0", + "arguments": { + "comment": "comment" + }, + "expectResult": 3 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll0", + "comment": "comment" + }, + "commandName": "count", + "databaseName": "edc-comment-tests" + } + } + ] + } + ] + }, + { + "description": "estimatedDocumentCount with document comment - pre 4.4.14, server error", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.4.13", + "topologies": [ + "single", + "replicaset" + ] + } + ], + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection0", + "arguments": { + "comment": { + "key": "value" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll0", + "comment": { + "key": "value" + } + }, + "commandName": "count", + "databaseName": "edc-comment-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/estimatedDocumentCount.json b/test/crud/unified/estimatedDocumentCount.json new file mode 100644 index 0000000000..1b650c1cb6 --- /dev/null +++ b/test/crud/unified/estimatedDocumentCount.json @@ -0,0 +1,357 @@ +{ + "description": "estimatedDocumentCount", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "edc-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + }, + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "coll1" + } + }, + { + "collection": { + "id": "collection0View", + "database": "database0", + "collectionName": "coll0view" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "edc-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "estimatedDocumentCount always uses count", + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection0", + "expectResult": 3 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll0" + }, + "commandName": "count", + "databaseName": "edc-tests" + } + } + ] + } + ] + }, + { + "description": "estimatedDocumentCount with maxTimeMS", + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection0", + "arguments": { + "maxTimeMS": 6000 + }, + "expectResult": 3 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll0", + "maxTimeMS": 6000 + }, + "commandName": "count", + "databaseName": "edc-tests" + } + } + ] + } + ] + }, + { + "description": "estimatedDocumentCount on non-existent collection", + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection1", + "expectResult": 0 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll1" + }, + "commandName": "count", + "databaseName": "edc-tests" + } + } + ] + } + ] + }, + { + "description": "estimatedDocumentCount errors correctly--command error", + "runOnRequirements": [ + { + "minServerVersion": "4.0.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.2.0", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 8 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection0", + "expectError": { + "errorCode": 8 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll0" + }, + "commandName": "count", + "databaseName": "edc-tests" + } + } + ] + } + ] + }, + { + "description": "estimatedDocumentCount errors correctly--socket error", + "runOnRequirements": [ + { + "minServerVersion": "4.0.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.2.0", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "closeConnection": true + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection0", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll0" + }, + "commandName": "count", + "databaseName": "edc-tests" + } + } + ] + } + ] + }, + { + "description": "estimatedDocumentCount works correctly on views", + "runOnRequirements": [ + { + "minServerVersion": "3.4.0" + } + ], + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "coll0view" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "coll0view", + "viewOn": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ] + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection0View", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "coll0view" + }, + "commandName": "drop", + "databaseName": "edc-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "coll0view", + "viewOn": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ] + }, + "commandName": "create", + "databaseName": "edc-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll0view" + }, + "commandName": "count", + "databaseName": "edc-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/find-allowdiskuse-clientError.json b/test/crud/unified/find-allowdiskuse-clientError.json new file mode 100644 index 0000000000..5bd954e79d --- /dev/null +++ b/test/crud/unified/find-allowdiskuse-clientError.json @@ -0,0 +1,79 @@ +{ + "description": "find-allowdiskuse-clientError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "maxServerVersion": "3.0.99" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_find_allowdiskuse_clienterror" + } + } + ], + "tests": [ + { + "description": "Find fails when allowDiskUse true is specified against pre 3.2 server", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "allowDiskUse": true + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Find fails when allowDiskUse false is specified against pre 3.2 server", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "allowDiskUse": false + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + } + ] +} diff --git a/test/crud/unified/find-allowdiskuse-serverError.json b/test/crud/unified/find-allowdiskuse-serverError.json new file mode 100644 index 0000000000..dc58f8f0e3 --- /dev/null +++ b/test/crud/unified/find-allowdiskuse-serverError.json @@ -0,0 +1,100 @@ +{ + "description": "find-allowdiskuse-serverError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.2", + "maxServerVersion": "4.3.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_find_allowdiskuse_servererror" + } + } + ], + "tests": [ + { + "description": "Find fails when allowDiskUse true is specified against pre 4.4 server (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "allowDiskUse": true + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test_find_allowdiskuse_servererror", + "filter": {}, + "allowDiskUse": true + } + } + } + ] + } + ] + }, + { + "description": "Find fails when allowDiskUse false is specified against pre 4.4 server (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "allowDiskUse": false + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test_find_allowdiskuse_servererror", + "filter": {}, + "allowDiskUse": false + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/find-allowdiskuse.json b/test/crud/unified/find-allowdiskuse.json new file mode 100644 index 0000000000..eb238ab93a --- /dev/null +++ b/test/crud/unified/find-allowdiskuse.json @@ -0,0 +1,120 @@ +{ + "description": "find-allowdiskuse", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_find_allowdiskuse" + } + } + ], + "tests": [ + { + "description": "Find does not send allowDiskUse when value is not specified", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test_find_allowdiskuse", + "allowDiskUse": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "Find sends allowDiskUse false when false is specified", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "allowDiskUse": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test_find_allowdiskuse", + "allowDiskUse": false + } + } + } + ] + } + ] + }, + { + "description": "Find sends allowDiskUse true when true is specified", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "allowDiskUse": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test_find_allowdiskuse", + "allowDiskUse": true + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/find-comment.json b/test/crud/unified/find-comment.json new file mode 100644 index 0000000000..600a3723f1 --- /dev/null +++ b/test/crud/unified/find-comment.json @@ -0,0 +1,403 @@ +{ + "description": "find-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ], + "tests": [ + { + "description": "find with string comment", + "runOnRequirements": [ + { + "minServerVersion": "3.6" + } + ], + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "comment": "comment" + }, + "expectResult": [ + { + "_id": 1 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "_id": 1 + }, + "comment": "comment" + } + } + } + ] + } + ] + }, + { + "description": "find with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "comment": { + "key": "value" + } + }, + "expectResult": [ + { + "_id": 1 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "_id": 1 + }, + "comment": { + "key": "value" + } + } + } + } + ] + } + ] + }, + { + "description": "find with document comment - pre 4.4", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99", + "minServerVersion": "3.6" + } + ], + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "comment": { + "key": "value" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "_id": 1 + }, + "comment": { + "key": "value" + } + } + } + } + ] + } + ] + }, + { + "description": "find with comment sets comment on getMore", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "batchSize": 2, + "comment": { + "key": "value" + } + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "batchSize": 2, + "comment": { + "key": "value" + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2, + "comment": { + "key": "value" + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2, + "comment": { + "key": "value" + } + } + } + } + ] + } + ] + }, + { + "description": "find with comment does not set comment on getMore - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.3.99" + } + ], + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "batchSize": 2, + "comment": "comment" + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "batchSize": 2, + "comment": "comment" + } + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2, + "comment": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2, + "comment": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/find-let.json b/test/crud/unified/find-let.json new file mode 100644 index 0000000000..4e9c9c99f4 --- /dev/null +++ b/test/crud/unified/find-let.json @@ -0,0 +1,148 @@ +{ + "description": "find-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Find with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "let": { + "id": 1 + } + }, + "expectResult": [ + { + "_id": 1 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "let": { + "id": 1 + } + } + } + } + ] + } + ] + }, + { + "description": "Find with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "let": { + "x": 1 + } + }, + "expectError": { + "errorContains": "Unrecognized field 'let'", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "_id": 1 + }, + "let": { + "x": 1 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/find.json b/test/crud/unified/find.json new file mode 100644 index 0000000000..275d5d351a --- /dev/null +++ b/test/crud/unified/find.json @@ -0,0 +1,156 @@ +{ + "description": "find", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "find-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "find-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ], + "tests": [ + { + "description": "find with multiple batches works", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "batchSize": 2 + }, + "object": "collection0", + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "batchSize": 2 + }, + "commandName": "find", + "databaseName": "find-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2 + }, + "commandName": "getMore", + "databaseName": "find-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2 + }, + "commandName": "getMore", + "databaseName": "find-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndDelete-comment.json b/test/crud/unified/findOneAndDelete-comment.json new file mode 100644 index 0000000000..6853b9cc2d --- /dev/null +++ b/test/crud/unified/findOneAndDelete-comment.json @@ -0,0 +1,211 @@ +{ + "description": "findOneAndDelete-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "findOneAndDelete with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "findOneAndDelete", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "comment": "comment" + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "remove": true, + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "findOneAndDelete with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "findOneAndDelete", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "comment": { + "key": "value" + } + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "remove": true, + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "findOneAndDelete with comment - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "findOneAndDelete", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "remove": true, + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndDelete-hint-clientError.json b/test/crud/unified/findOneAndDelete-hint-clientError.json new file mode 100644 index 0000000000..c6ff467866 --- /dev/null +++ b/test/crud/unified/findOneAndDelete-hint-clientError.json @@ -0,0 +1,133 @@ +{ + "description": "findOneAndDelete-hint-clientError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "findOneAndDelete_hint" + } + } + ], + "initialData": [ + { + "collectionName": "findOneAndDelete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndDelete with hint string unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "findOneAndDelete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndDelete with hint document", + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "findOneAndDelete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndDelete-hint-serverError.json b/test/crud/unified/findOneAndDelete-hint-serverError.json new file mode 100644 index 0000000000..b874102728 --- /dev/null +++ b/test/crud/unified/findOneAndDelete-hint-serverError.json @@ -0,0 +1,162 @@ +{ + "description": "findOneAndDelete-hint-serverError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.3.3" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "findOneAndDelete_hint" + } + } + ], + "initialData": [ + { + "collectionName": "findOneAndDelete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndDelete with hint string unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "findOneAndDelete_hint", + "query": { + "_id": 1 + }, + "hint": "_id_", + "remove": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "findOneAndDelete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndDelete with hint document unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "findOneAndDelete_hint", + "query": { + "_id": 1 + }, + "hint": { + "_id": 1 + }, + "remove": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "findOneAndDelete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndDelete-hint-unacknowledged.json b/test/crud/unified/findOneAndDelete-hint-unacknowledged.json new file mode 100644 index 0000000000..077f9892b9 --- /dev/null +++ b/test/crud/unified/findOneAndDelete-hint-unacknowledged.json @@ -0,0 +1,225 @@ +{ + "description": "findOneAndDelete-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged findOneAndDelete with hint string fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged findOneAndDelete with hint document fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged findOneAndDelete with hint string on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + }, + "expectResult": { + "$$unsetOrMatches": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": { + "$gt": 1 + } + }, + "remove": true, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged findOneAndDelete with hint document on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": { + "$gt": 1 + } + }, + "remove": true, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndDelete-hint.json b/test/crud/unified/findOneAndDelete-hint.json new file mode 100644 index 0000000000..8b53f2bd3f --- /dev/null +++ b/test/crud/unified/findOneAndDelete-hint.json @@ -0,0 +1,155 @@ +{ + "description": "findOneAndDelete-hint", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.3.4" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "findOneAndDelete_hint" + } + } + ], + "initialData": [ + { + "collectionName": "findOneAndDelete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndDelete with hint string", + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "findOneAndDelete_hint", + "query": { + "_id": 1 + }, + "hint": "_id_", + "remove": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "findOneAndDelete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndDelete with hint document", + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "findOneAndDelete_hint", + "query": { + "_id": 1 + }, + "hint": { + "_id": 1 + }, + "remove": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "findOneAndDelete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndDelete-let.json b/test/crud/unified/findOneAndDelete-let.json new file mode 100644 index 0000000000..ba8e681c0e --- /dev/null +++ b/test/crud/unified/findOneAndDelete-let.json @@ -0,0 +1,180 @@ +{ + "description": "findOneAndDelete-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "findOneAndDelete with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "findOneAndDelete", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "let": { + "id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "remove": true, + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "findOneAndDelete with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "findOneAndDelete", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "field 'let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "remove": true, + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndReplace-comment.json b/test/crud/unified/findOneAndReplace-comment.json new file mode 100644 index 0000000000..f817bb6937 --- /dev/null +++ b/test/crud/unified/findOneAndReplace-comment.json @@ -0,0 +1,234 @@ +{ + "description": "findOneAndReplace-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "findOneAndReplace with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 5 + }, + "comment": "comment" + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": { + "x": 5 + }, + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 5 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "findOneAndReplace with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 5 + }, + "comment": { + "key": "value" + } + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": { + "x": 5 + }, + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 5 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "findOneAndReplace with comment - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 5 + }, + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": { + "x": 5 + }, + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndReplace-dots_and_dollars.json b/test/crud/unified/findOneAndReplace-dots_and_dollars.json new file mode 100644 index 0000000000..19ac447f84 --- /dev/null +++ b/test/crud/unified/findOneAndReplace-dots_and_dollars.json @@ -0,0 +1,430 @@ +{ + "description": "findOneAndReplace-dots_and_dollars", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "Replacing document with top-level dotted key on 3.6+ server", + "runOnRequirements": [ + { + "minServerVersion": "3.6" + } + ], + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a.b": 1 + } + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": { + "_id": 1, + "a.b": 1 + }, + "new": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a.b": 1 + } + ] + } + ] + }, + { + "description": "Replacing document with top-level dotted key on pre-3.6 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "3.4.99" + } + ], + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a.b": 1 + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": { + "_id": 1, + "a.b": 1 + }, + "new": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Replacing document with dollar-prefixed key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "$b": 1 + } + } + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": { + "_id": 1, + "a": { + "$b": 1 + } + }, + "new": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "$b": 1 + } + } + ] + } + ] + }, + { + "description": "Replacing document with dollar-prefixed key in embedded doc on pre-5.0 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "4.99" + } + ], + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "$b": 1 + } + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": { + "_id": 1, + "a": { + "$b": 1 + } + }, + "new": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Replacing document with dotted key in embedded doc on 3.6+ server", + "runOnRequirements": [ + { + "minServerVersion": "3.6" + } + ], + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "b.c": 1 + } + } + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": { + "_id": 1, + "a": { + "b.c": 1 + } + }, + "new": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "b.c": 1 + } + } + ] + } + ] + }, + { + "description": "Replacing document with dotted key in embedded doc on pre-3.6 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "3.4.99" + } + ], + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "b.c": 1 + } + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": { + "_id": 1, + "a": { + "b.c": 1 + } + }, + "new": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndReplace-hint-clientError.json b/test/crud/unified/findOneAndReplace-hint-clientError.json new file mode 100644 index 0000000000..6b07eb1f4d --- /dev/null +++ b/test/crud/unified/findOneAndReplace-hint-clientError.json @@ -0,0 +1,139 @@ +{ + "description": "findOneAndReplace-hint-clientError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "findOneAndReplace_hint" + } + } + ], + "initialData": [ + { + "collectionName": "findOneAndReplace_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndReplace with hint string unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "findOneAndReplace_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndReplace with hint document unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "findOneAndReplace_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndReplace-hint-serverError.json b/test/crud/unified/findOneAndReplace-hint-serverError.json new file mode 100644 index 0000000000..7fbf5a0ea3 --- /dev/null +++ b/test/crud/unified/findOneAndReplace-hint-serverError.json @@ -0,0 +1,172 @@ +{ + "description": "findOneAndReplace-hint-serverError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.3.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "findOneAndReplace_hint" + } + } + ], + "initialData": [ + { + "collectionName": "findOneAndReplace_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndReplace with hint string unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "findOneAndReplace_hint", + "query": { + "_id": 1 + }, + "update": { + "x": 33 + }, + "hint": "_id_" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "findOneAndReplace_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndReplace with hint document unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "findOneAndReplace_hint", + "query": { + "_id": 1 + }, + "update": { + "x": 33 + }, + "hint": { + "_id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "findOneAndReplace_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndReplace-hint-unacknowledged.json b/test/crud/unified/findOneAndReplace-hint-unacknowledged.json new file mode 100644 index 0000000000..8228d8a2aa --- /dev/null +++ b/test/crud/unified/findOneAndReplace-hint-unacknowledged.json @@ -0,0 +1,248 @@ +{ + "description": "findOneAndReplace-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + }, + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged findOneAndReplace with hint string fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": "_id_" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged findOneAndReplace with hint document fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged findOneAndReplace with hint string on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": "_id_" + }, + "expectResult": { + "$$unsetOrMatches": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": { + "$gt": 1 + } + }, + "update": { + "x": 111 + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged findOneAndReplace with hint document on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": { + "$gt": 1 + } + }, + "update": { + "x": 111 + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndReplace-hint.json b/test/crud/unified/findOneAndReplace-hint.json new file mode 100644 index 0000000000..d07c5921a7 --- /dev/null +++ b/test/crud/unified/findOneAndReplace-hint.json @@ -0,0 +1,173 @@ +{ + "description": "findOneAndReplace-hint", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "findOneAndReplace_hint" + } + } + ], + "initialData": [ + { + "collectionName": "findOneAndReplace_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndReplace with hint string", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + }, + "hint": "_id_" + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "findOneAndReplace_hint", + "query": { + "_id": 1 + }, + "update": { + "x": 33 + }, + "hint": "_id_" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "findOneAndReplace_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 33 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndReplace with hint document", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "findOneAndReplace_hint", + "query": { + "_id": 1 + }, + "update": { + "x": 33 + }, + "hint": { + "_id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "findOneAndReplace_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 33 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndReplace-let.json b/test/crud/unified/findOneAndReplace-let.json new file mode 100644 index 0000000000..5e5de44b31 --- /dev/null +++ b/test/crud/unified/findOneAndReplace-let.json @@ -0,0 +1,197 @@ +{ + "description": "findOneAndReplace-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "findOneAndReplace with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "replacement": { + "x": "x" + }, + "let": { + "id": 1 + } + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": { + "x": "x" + }, + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": "x" + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "findOneAndReplace with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "replacement": { + "x": "x" + }, + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "field 'let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": { + "x": "x" + }, + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndUpdate-comment.json b/test/crud/unified/findOneAndUpdate-comment.json new file mode 100644 index 0000000000..6dec5b39ee --- /dev/null +++ b/test/crud/unified/findOneAndUpdate-comment.json @@ -0,0 +1,228 @@ +{ + "description": "findOneAndUpdate-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "findOneAndUpdate with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "x": 5 + } + } + ], + "comment": "comment" + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": [ + { + "$set": { + "x": 5 + } + } + ], + "comment": "comment" + } + } + } + ] + } + ] + }, + { + "description": "findOneAndUpdate with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "x": 5 + } + } + ], + "comment": { + "key": "value" + } + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": [ + { + "$set": { + "x": 5 + } + } + ], + "comment": { + "key": "value" + } + } + } + } + ] + } + ] + }, + { + "description": "findOneAndUpdate with comment - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "x": 5 + } + } + ], + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": [ + { + "$set": { + "x": 5 + } + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndUpdate-dots_and_dollars.json b/test/crud/unified/findOneAndUpdate-dots_and_dollars.json new file mode 100644 index 0000000000..40eb547392 --- /dev/null +++ b/test/crud/unified/findOneAndUpdate-dots_and_dollars.json @@ -0,0 +1,380 @@ +{ + "description": "findOneAndUpdate-dots_and_dollars", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {} + } + ] + } + ], + "tests": [ + { + "description": "Updating document to set top-level dollar-prefixed key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ] + }, + "expectResult": { + "_id": 1, + "foo": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ], + "new": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {}, + "$a": 1 + } + ] + } + ] + }, + { + "description": "Updating document to set top-level dotted key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ] + }, + "expectResult": { + "_id": 1, + "foo": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ], + "new": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {}, + "a.b": 1 + } + ] + } + ] + }, + { + "description": "Updating document to set dollar-prefixed key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ] + }, + "expectResult": { + "_id": 1, + "foo": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ], + "new": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": { + "$a": 1 + } + } + ] + } + ] + }, + { + "description": "Updating document to set dotted key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ] + }, + "expectResult": { + "_id": 1, + "foo": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ], + "new": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": { + "a.b": 1 + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndUpdate-errorResponse.json b/test/crud/unified/findOneAndUpdate-errorResponse.json new file mode 100644 index 0000000000..5023a450f3 --- /dev/null +++ b/test/crud/unified/findOneAndUpdate-errorResponse.json @@ -0,0 +1,132 @@ +{ + "description": "findOneAndUpdate-errorResponse", + "schemaVersion": "1.12", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": "foo" + } + ] + } + ], + "tests": [ + { + "description": "findOneAndUpdate DuplicateKey error is accessible", + "runOnRequirements": [ + { + "minServerVersion": "4.2" + } + ], + "operations": [ + { + "name": "createIndex", + "object": "collection0", + "arguments": { + "keys": { + "x": 1 + }, + "unique": true + } + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 2 + }, + "update": { + "$set": { + "x": "foo" + } + }, + "upsert": true + }, + "expectError": { + "errorCode": 11000, + "errorResponse": { + "keyPattern": { + "x": 1 + }, + "keyValue": { + "x": "foo" + } + } + } + } + ] + }, + { + "description": "findOneAndUpdate document validation errInfo is accessible", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "modifyCollection", + "object": "database0", + "arguments": { + "collection": "test", + "validator": { + "x": { + "$type": "string" + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "errorCode": 121, + "errorResponse": { + "errInfo": { + "failingDocumentId": 1, + "details": { + "$$type": "object" + } + } + } + } + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndUpdate-hint-clientError.json b/test/crud/unified/findOneAndUpdate-hint-clientError.json new file mode 100644 index 0000000000..d0b51313c9 --- /dev/null +++ b/test/crud/unified/findOneAndUpdate-hint-clientError.json @@ -0,0 +1,143 @@ +{ + "description": "findOneAndUpdate-hint-clientError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "findOneAndUpdate_hint" + } + } + ], + "initialData": [ + { + "collectionName": "findOneAndUpdate_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndUpdate with hint string unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "findOneAndUpdate_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate with hint document unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "findOneAndUpdate_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndUpdate-hint-serverError.json b/test/crud/unified/findOneAndUpdate-hint-serverError.json new file mode 100644 index 0000000000..99fd9938f8 --- /dev/null +++ b/test/crud/unified/findOneAndUpdate-hint-serverError.json @@ -0,0 +1,180 @@ +{ + "description": "findOneAndUpdate-hint-serverError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.3.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "findOneAndUpdate_hint" + } + } + ], + "initialData": [ + { + "collectionName": "findOneAndUpdate_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndUpdate with hint string unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "findOneAndUpdate_hint", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "findOneAndUpdate_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate with hint document unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "findOneAndUpdate_hint", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "findOneAndUpdate_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndUpdate-hint-unacknowledged.json b/test/crud/unified/findOneAndUpdate-hint-unacknowledged.json new file mode 100644 index 0000000000..d116a06d0d --- /dev/null +++ b/test/crud/unified/findOneAndUpdate-hint-unacknowledged.json @@ -0,0 +1,253 @@ +{ + "description": "findOneAndUpdate-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged findOneAndUpdate with hint string fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged findOneAndUpdate with hint document fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged findOneAndUpdate with hint string on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectResult": { + "$$unsetOrMatches": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged findOneAndUpdate with hint document on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndUpdate-hint.json b/test/crud/unified/findOneAndUpdate-hint.json new file mode 100644 index 0000000000..5be6d2b3e8 --- /dev/null +++ b/test/crud/unified/findOneAndUpdate-hint.json @@ -0,0 +1,181 @@ +{ + "description": "findOneAndUpdate-hint", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "findOneAndUpdate_hint" + } + } + ], + "initialData": [ + { + "collectionName": "findOneAndUpdate_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndUpdate with hint string", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "findOneAndUpdate_hint", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "findOneAndUpdate_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate with hint document", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "findOneAndUpdate_hint", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "findOneAndUpdate_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndUpdate-let.json b/test/crud/unified/findOneAndUpdate-let.json new file mode 100644 index 0000000000..74d7d0e58b --- /dev/null +++ b/test/crud/unified/findOneAndUpdate-let.json @@ -0,0 +1,217 @@ +{ + "description": "findOneAndUpdate-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "findOneAndUpdate with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": [ + { + "$set": { + "x": "$$x" + } + } + ], + "let": { + "id": 1, + "x": "foo" + } + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": [ + { + "$set": { + "x": "$$x" + } + } + ], + "let": { + "id": 1, + "x": "foo" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": "foo" + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "findOneAndUpdate with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": [ + { + "$set": { + "x": "$$x" + } + } + ], + "let": { + "id": 1, + "x": "foo" + } + }, + "expectError": { + "errorContains": "field 'let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": [ + { + "$set": { + "x": "$$x" + } + } + ], + "let": { + "id": 1, + "x": "foo" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/insertMany-comment.json b/test/crud/unified/insertMany-comment.json new file mode 100644 index 0000000000..2b4c80b3f0 --- /dev/null +++ b/test/crud/unified/insertMany-comment.json @@ -0,0 +1,226 @@ +{ + "description": "insertMany-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "insertMany with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "comment": "comment" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "insertMany with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "comment": { + "key": "value" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "insertMany with comment - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/insertMany-dots_and_dollars.json b/test/crud/unified/insertMany-dots_and_dollars.json new file mode 100644 index 0000000000..eed8997df9 --- /dev/null +++ b/test/crud/unified/insertMany-dots_and_dollars.json @@ -0,0 +1,338 @@ +{ + "description": "insertMany-dots_and_dollars", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Inserting document with top-level dollar-prefixed key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "documents": [ + { + "_id": 1, + "$a": 1 + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 1 + } + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "$a": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "$a": 1 + } + ] + } + ] + }, + { + "description": "Inserting document with top-level dollar-prefixed key on pre-5.0 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "4.99" + } + ], + "operations": [ + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "documents": [ + { + "_id": 1, + "$a": 1 + } + ] + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "$a": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ] + }, + { + "description": "Inserting document with top-level dotted key", + "operations": [ + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "documents": [ + { + "_id": 1, + "a.b": 1 + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 1 + } + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "a.b": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a.b": 1 + } + ] + } + ] + }, + { + "description": "Inserting document with dollar-prefixed key in embedded doc", + "operations": [ + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "documents": [ + { + "_id": 1, + "a": { + "$b": 1 + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 1 + } + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "a": { + "$b": 1 + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "$b": 1 + } + } + ] + } + ] + }, + { + "description": "Inserting document with dotted key in embedded doc", + "operations": [ + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "documents": [ + { + "_id": 1, + "a": { + "b.c": 1 + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 1 + } + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "a": { + "b.c": 1 + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "b.c": 1 + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/insertOne-comment.json b/test/crud/unified/insertOne-comment.json new file mode 100644 index 0000000000..dbd83d9f64 --- /dev/null +++ b/test/crud/unified/insertOne-comment.json @@ -0,0 +1,220 @@ +{ + "description": "insertOne-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "insertOne with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 2, + "x": 22 + }, + "comment": "comment" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "insertOne with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 2, + "x": 22 + }, + "comment": { + "key": "value" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "insertOne with comment - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 2, + "x": 22 + }, + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/insertOne-dots_and_dollars.json b/test/crud/unified/insertOne-dots_and_dollars.json new file mode 100644 index 0000000000..fdc17af2e8 --- /dev/null +++ b/test/crud/unified/insertOne-dots_and_dollars.json @@ -0,0 +1,614 @@ +{ + "description": "insertOne-dots_and_dollars", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + }, + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "coll1", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Inserting document with top-level dollar-prefixed key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "$a": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "$a": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "$a": 1 + } + ] + } + ] + }, + { + "description": "Inserting document with top-level dollar-prefixed key on pre-5.0 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "4.99" + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "$a": 1 + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "$a": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ] + }, + { + "description": "Inserting document with top-level dotted key", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a.b": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "a.b": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a.b": 1 + } + ] + } + ] + }, + { + "description": "Inserting document with dollar-prefixed key in embedded doc", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": { + "$b": 1 + } + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "a": { + "$b": 1 + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "$b": 1 + } + } + ] + } + ] + }, + { + "description": "Inserting document with dotted key in embedded doc", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": { + "b.c": 1 + } + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "a": { + "b.c": 1 + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "b.c": 1 + } + } + ] + } + ] + }, + { + "description": "Inserting document with dollar-prefixed key in _id yields server-side error", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": { + "$a": 1 + } + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": { + "$a": 1 + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ] + }, + { + "description": "Inserting document with dotted key in _id on 3.6+ server", + "runOnRequirements": [ + { + "minServerVersion": "3.6" + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": { + "a.b": 1 + } + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": { + "a.b": 1 + } + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": { + "a.b": 1 + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": { + "a.b": 1 + } + } + ] + } + ] + }, + { + "description": "Inserting document with dotted key in _id on pre-3.6 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "3.4.99" + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": { + "a.b": 1 + } + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": { + "a.b": 1 + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ] + }, + { + "description": "Inserting document with DBRef-like keys", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": { + "$db": "foo" + } + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "a": { + "$db": "foo" + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "$db": "foo" + } + } + ] + } + ] + }, + { + "description": "Unacknowledged write using dollar-prefixed or dotted keys may be silently rejected on pre-5.0 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.99" + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection1", + "arguments": { + "document": { + "_id": { + "$a": 1 + } + } + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll1", + "documents": [ + { + "_id": { + "$a": 1 + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ] + } + ] +} diff --git a/test/crud/unified/insertOne-errorResponse.json b/test/crud/unified/insertOne-errorResponse.json new file mode 100644 index 0000000000..04ea6a7451 --- /dev/null +++ b/test/crud/unified/insertOne-errorResponse.json @@ -0,0 +1,82 @@ +{ + "description": "insertOne-errorResponse", + "schemaVersion": "1.12", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "tests": [ + { + "description": "insert operations support errorResponse assertions", + "runOnRequirements": [ + { + "minServerVersion": "4.0.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.2.0", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 8 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1 + } + }, + "expectError": { + "errorCode": 8, + "errorResponse": { + "code": 8 + } + } + } + ] + } + ] +} diff --git a/test/crud/unified/replaceOne-comment.json b/test/crud/unified/replaceOne-comment.json new file mode 100644 index 0000000000..88bee5d7b7 --- /dev/null +++ b/test/crud/unified/replaceOne-comment.json @@ -0,0 +1,248 @@ +{ + "description": "replaceOne-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "ReplaceOne with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 22 + }, + "comment": "comment" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "x": 22 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 22 + } + ] + } + ] + }, + { + "description": "ReplaceOne with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 22 + }, + "comment": { + "key": "value" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "x": 22 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 22 + } + ] + } + ] + }, + { + "description": "ReplaceOne with comment - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 22 + }, + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "x": 22 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/replaceOne-dots_and_dollars.json b/test/crud/unified/replaceOne-dots_and_dollars.json new file mode 100644 index 0000000000..d5003dc5ea --- /dev/null +++ b/test/crud/unified/replaceOne-dots_and_dollars.json @@ -0,0 +1,567 @@ +{ + "description": "replaceOne-dots_and_dollars", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + }, + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "coll1", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "Replacing document with top-level dotted key on 3.6+ server", + "runOnRequirements": [ + { + "minServerVersion": "3.6" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a.b": 1 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a.b": 1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a.b": 1 + } + ] + } + ] + }, + { + "description": "Replacing document with top-level dotted key on pre-3.6 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "3.4.99" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a.b": 1 + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a.b": 1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Replacing document with dollar-prefixed key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "$b": 1 + } + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a": { + "$b": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "$b": 1 + } + } + ] + } + ] + }, + { + "description": "Replacing document with dollar-prefixed key in embedded doc on pre-5.0 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "4.99" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "$b": 1 + } + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a": { + "$b": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Replacing document with dotted key in embedded doc on 3.6+ server", + "runOnRequirements": [ + { + "minServerVersion": "3.6" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "b.c": 1 + } + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a": { + "b.c": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "b.c": 1 + } + } + ] + } + ] + }, + { + "description": "Replacing document with dotted key in embedded doc on pre-3.6 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "3.4.99" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "b.c": 1 + } + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a": { + "b.c": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Unacknowledged write using dollar-prefixed or dotted keys may be silently rejected on pre-5.0 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.99" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection1", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "$b": 1 + } + } + }, + "expectResult": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll1", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a": { + "$b": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/replaceOne-hint-unacknowledged.json b/test/crud/unified/replaceOne-hint-unacknowledged.json new file mode 100644 index 0000000000..5c5dec64f6 --- /dev/null +++ b/test/crud/unified/replaceOne-hint-unacknowledged.json @@ -0,0 +1,269 @@ +{ + "description": "replaceOne-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged replaceOne with hint string fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": "_id_" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged replaceOne with hint document fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged replaceOne with hint string on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": "_id_" + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 111 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged replaceOne with hint document on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 111 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/replaceOne-hint.json b/test/crud/unified/replaceOne-hint.json new file mode 100644 index 0000000000..6926e9d8df --- /dev/null +++ b/test/crud/unified/replaceOne-hint.json @@ -0,0 +1,203 @@ +{ + "description": "replaceOne-hint", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_replaceone_hint" + } + } + ], + "initialData": [ + { + "collectionName": "test_replaceone_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "ReplaceOne with hint string", + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": "_id_" + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_replaceone_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 111 + }, + "hint": "_id_", + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_replaceone_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 111 + } + ] + } + ] + }, + { + "description": "ReplaceOne with hint document", + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_replaceone_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 111 + }, + "hint": { + "_id": 1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_replaceone_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 111 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/replaceOne-let.json b/test/crud/unified/replaceOne-let.json new file mode 100644 index 0000000000..e7a7ee65a5 --- /dev/null +++ b/test/crud/unified/replaceOne-let.json @@ -0,0 +1,219 @@ +{ + "description": "replaceOne-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "ReplaceOne with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "replacement": { + "x": "foo" + }, + "let": { + "id": 1 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "u": { + "x": "foo" + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": "foo" + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "ReplaceOne with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "replacement": { + "x": "foo" + }, + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "'update.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "u": { + "x": "foo" + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/replaceOne-validation.json b/test/crud/unified/replaceOne-validation.json new file mode 100644 index 0000000000..6f5b173e02 --- /dev/null +++ b/test/crud/unified/replaceOne-validation.json @@ -0,0 +1,82 @@ +{ + "description": "replaceOne-validation", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "ReplaceOne prohibits atomic modifiers", + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "$set": { + "x": 22 + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateMany-comment.json b/test/crud/unified/updateMany-comment.json new file mode 100644 index 0000000000..88b8b67f5a --- /dev/null +++ b/test/crud/unified/updateMany-comment.json @@ -0,0 +1,254 @@ +{ + "description": "updateMany-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "UpdateMany with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 22 + } + }, + "comment": "comment" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 22 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 22 + } + ] + } + ] + }, + { + "description": "UpdateMany with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 22 + } + }, + "comment": { + "key": "value" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 22 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 22 + } + ] + } + ] + }, + { + "description": "UpdateMany with comment - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 22 + } + }, + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 22 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateMany-dots_and_dollars.json b/test/crud/unified/updateMany-dots_and_dollars.json new file mode 100644 index 0000000000..5d3b9d0453 --- /dev/null +++ b/test/crud/unified/updateMany-dots_and_dollars.json @@ -0,0 +1,404 @@ +{ + "description": "updateMany-dots_and_dollars", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {} + } + ] + } + ], + "tests": [ + { + "description": "Updating document to set top-level dollar-prefixed key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {}, + "$a": 1 + } + ] + } + ] + }, + { + "description": "Updating document to set top-level dotted key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {}, + "a.b": 1 + } + ] + } + ] + }, + { + "description": "Updating document to set dollar-prefixed key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": { + "$a": 1 + } + } + ] + } + ] + }, + { + "description": "Updating document to set dotted key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": { + "a.b": 1 + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateMany-hint-clientError.json b/test/crud/unified/updateMany-hint-clientError.json new file mode 100644 index 0000000000..5da878e293 --- /dev/null +++ b/test/crud/unified/updateMany-hint-clientError.json @@ -0,0 +1,159 @@ +{ + "description": "updateMany-hint-clientError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "maxServerVersion": "3.3.99" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_updatemany_hint" + } + } + ], + "initialData": [ + { + "collectionName": "test_updatemany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "UpdateMany with hint string unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "test_updatemany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "UpdateMany with hint document unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "test_updatemany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateMany-hint-serverError.json b/test/crud/unified/updateMany-hint-serverError.json new file mode 100644 index 0000000000..c81f36b13c --- /dev/null +++ b/test/crud/unified/updateMany-hint-serverError.json @@ -0,0 +1,216 @@ +{ + "description": "updateMany-hint-serverError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.1.9" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_updatemany_hint" + } + } + ], + "initialData": [ + { + "collectionName": "test_updatemany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "UpdateMany with hint string unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_updatemany_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "hint": "_id_", + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_updatemany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "UpdateMany with hint document unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_updatemany_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "hint": { + "_id": 1 + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_updatemany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateMany-hint-unacknowledged.json b/test/crud/unified/updateMany-hint-unacknowledged.json new file mode 100644 index 0000000000..e83838aac2 --- /dev/null +++ b/test/crud/unified/updateMany-hint-unacknowledged.json @@ -0,0 +1,281 @@ +{ + "description": "updateMany-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged updateMany with hint string fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged updateMany with hint document fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged updateMany with hint string on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged updateMany with hint document on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateMany-hint.json b/test/crud/unified/updateMany-hint.json new file mode 100644 index 0000000000..929be52994 --- /dev/null +++ b/test/crud/unified/updateMany-hint.json @@ -0,0 +1,219 @@ +{ + "description": "updateMany-hint", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_updatemany_hint" + } + } + ], + "initialData": [ + { + "collectionName": "test_updatemany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "UpdateMany with hint string", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_updatemany_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "hint": "_id_", + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_updatemany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 34 + } + ] + } + ] + }, + { + "description": "UpdateMany with hint document", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_updatemany_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "hint": { + "_id": 1 + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_updatemany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 34 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateMany-let.json b/test/crud/unified/updateMany-let.json new file mode 100644 index 0000000000..cff3bd4c79 --- /dev/null +++ b/test/crud/unified/updateMany-let.json @@ -0,0 +1,249 @@ +{ + "description": "updateMany-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2, + "name": "name" + }, + { + "_id": 3, + "name": "name" + } + ] + } + ], + "tests": [ + { + "description": "updateMany with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$name", + "$$name" + ] + } + }, + "update": [ + { + "$set": { + "x": "$$x", + "y": "$$y" + } + } + ], + "let": { + "name": "name", + "x": "foo", + "y": { + "$literal": "bar" + } + } + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$name", + "$$name" + ] + } + }, + "u": [ + { + "$set": { + "x": "$$x", + "y": "$$y" + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "let": { + "name": "name", + "x": "foo", + "y": { + "$literal": "bar" + } + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2, + "name": "name", + "x": "foo", + "y": "bar" + }, + { + "_id": 3, + "name": "name", + "x": "foo", + "y": "bar" + } + ] + } + ] + }, + { + "description": "updateMany with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "x": "$$x" + } + } + ], + "let": { + "x": "foo" + } + }, + "expectError": { + "errorContains": "'update.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$set": { + "x": "$$x" + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "let": { + "x": "foo" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2, + "name": "name" + }, + { + "_id": 3, + "name": "name" + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateMany-validation.json b/test/crud/unified/updateMany-validation.json new file mode 100644 index 0000000000..e3e46a1384 --- /dev/null +++ b/test/crud/unified/updateMany-validation.json @@ -0,0 +1,98 @@ +{ + "description": "updateMany-validation", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "UpdateMany requires atomic modifiers", + "operations": [ + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "x": 44 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-comment.json b/test/crud/unified/updateOne-comment.json new file mode 100644 index 0000000000..f4ee74db38 --- /dev/null +++ b/test/crud/unified/updateOne-comment.json @@ -0,0 +1,260 @@ +{ + "description": "updateOne-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 22 + } + }, + "comment": "comment" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 22 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 22 + } + ] + } + ] + }, + { + "description": "UpdateOne with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 22 + } + }, + "comment": { + "key": "value" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 22 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 22 + } + ] + } + ] + }, + { + "description": "UpdateOne with comment - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 22 + } + }, + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 22 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-dots_and_dollars.json b/test/crud/unified/updateOne-dots_and_dollars.json new file mode 100644 index 0000000000..798d522cba --- /dev/null +++ b/test/crud/unified/updateOne-dots_and_dollars.json @@ -0,0 +1,412 @@ +{ + "description": "updateOne-dots_and_dollars", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {} + } + ] + } + ], + "tests": [ + { + "description": "Updating document to set top-level dollar-prefixed key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {}, + "$a": 1 + } + ] + } + ] + }, + { + "description": "Updating document to set top-level dotted key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {}, + "a.b": 1 + } + ] + } + ] + }, + { + "description": "Updating document to set dollar-prefixed key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": { + "$a": 1 + } + } + ] + } + ] + }, + { + "description": "Updating document to set dotted key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": { + "a.b": 1 + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-errorResponse.json b/test/crud/unified/updateOne-errorResponse.json new file mode 100644 index 0000000000..0ceddbc4fc --- /dev/null +++ b/test/crud/unified/updateOne-errorResponse.json @@ -0,0 +1,87 @@ +{ + "description": "updateOne-errorResponse", + "schemaVersion": "1.12", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "tests": [ + { + "description": "update operations support errorResponse assertions", + "runOnRequirements": [ + { + "minServerVersion": "4.0.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.2.0", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 8 + } + } + } + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "errorCode": 8, + "errorResponse": { + "code": 8 + } + } + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-hint-clientError.json b/test/crud/unified/updateOne-hint-clientError.json new file mode 100644 index 0000000000..d4f1a53430 --- /dev/null +++ b/test/crud/unified/updateOne-hint-clientError.json @@ -0,0 +1,147 @@ +{ + "description": "updateOne-hint-clientError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "maxServerVersion": "3.3.99" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_updateone_hint" + } + } + ], + "initialData": [ + { + "collectionName": "test_updateone_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne with hint string unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "test_updateone_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "UpdateOne with hint document unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "test_updateone_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-hint-serverError.json b/test/crud/unified/updateOne-hint-serverError.json new file mode 100644 index 0000000000..05fb033319 --- /dev/null +++ b/test/crud/unified/updateOne-hint-serverError.json @@ -0,0 +1,208 @@ +{ + "description": "updateOne-hint-serverError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.1.9" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_updateone_hint" + } + } + ], + "initialData": [ + { + "collectionName": "test_updateone_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne with hint string unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_updateone_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_", + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_updateone_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "UpdateOne with hint document unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_updateone_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_updateone_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-hint-unacknowledged.json b/test/crud/unified/updateOne-hint-unacknowledged.json new file mode 100644 index 0000000000..859b0f92f9 --- /dev/null +++ b/test/crud/unified/updateOne-hint-unacknowledged.json @@ -0,0 +1,281 @@ +{ + "description": "updateOne-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged updateOne with hint string fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged updateOne with hint document fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged updateOne with hint string on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged updateOne with hint document on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-hint.json b/test/crud/unified/updateOne-hint.json new file mode 100644 index 0000000000..484e00757d --- /dev/null +++ b/test/crud/unified/updateOne-hint.json @@ -0,0 +1,211 @@ +{ + "description": "updateOne-hint", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_updateone_hint" + } + } + ], + "initialData": [ + { + "collectionName": "test_updateone_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne with hint string", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_updateone_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_", + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_updateone_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 23 + } + ] + } + ] + }, + { + "description": "UpdateOne with hint document", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_updateone_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_updateone_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 23 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-let.json b/test/crud/unified/updateOne-let.json new file mode 100644 index 0000000000..e43b979358 --- /dev/null +++ b/test/crud/unified/updateOne-let.json @@ -0,0 +1,227 @@ +{ + "description": "updateOne-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": [ + { + "$set": { + "x": "$$x" + } + } + ], + "let": { + "id": 1, + "x": "foo" + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "u": [ + { + "$set": { + "x": "$$x" + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "let": { + "id": 1, + "x": "foo" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": "foo" + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "UpdateOne with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "x": "$$x" + } + } + ], + "let": { + "x": "foo" + } + }, + "expectError": { + "errorContains": "'update.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$set": { + "x": "$$x" + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "let": { + "x": "foo" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-validation.json b/test/crud/unified/updateOne-validation.json new file mode 100644 index 0000000000..1464642c59 --- /dev/null +++ b/test/crud/unified/updateOne-validation.json @@ -0,0 +1,80 @@ +{ + "description": "updateOne-validation", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne requires atomic modifiers", + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "x": 22 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateWithPipelines.json b/test/crud/unified/updateWithPipelines.json new file mode 100644 index 0000000000..164f2f6a19 --- /dev/null +++ b/test/crud/unified/updateWithPipelines.json @@ -0,0 +1,494 @@ +{ + "description": "updateWithPipelines", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.1.11" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1, + "y": 1, + "t": { + "u": { + "v": 1 + } + } + }, + { + "_id": 2, + "x": 2, + "y": 1 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne using pipelines", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceRoot": { + "newRoot": "$t" + } + }, + { + "$addFields": { + "foo": 1 + } + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$replaceRoot": { + "newRoot": "$t" + } + }, + { + "$addFields": { + "foo": 1 + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + }, + "commandName": "update", + "databaseName": "crud-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "u": { + "v": 1 + }, + "foo": 1 + }, + { + "_id": 2, + "x": 2, + "y": 1 + } + ] + } + ] + }, + { + "description": "UpdateMany using pipelines", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": {}, + "update": [ + { + "$project": { + "x": 1 + } + }, + { + "$addFields": { + "foo": 1 + } + } + ] + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": {}, + "u": [ + { + "$project": { + "x": 1 + } + }, + { + "$addFields": { + "foo": 1 + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + }, + "commandName": "update", + "databaseName": "crud-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1, + "foo": 1 + }, + { + "_id": 2, + "x": 2, + "foo": 1 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate using pipelines", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$project": { + "x": 1 + } + }, + { + "$addFields": { + "foo": 1 + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "update": [ + { + "$project": { + "x": 1 + } + }, + { + "$addFields": { + "foo": 1 + } + } + ] + }, + "commandName": "findAndModify", + "databaseName": "crud-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1, + "foo": 1 + }, + { + "_id": 2, + "x": 2, + "y": 1 + } + ] + } + ] + }, + { + "description": "UpdateOne in bulk write using pipelines", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceRoot": { + "newRoot": "$t" + } + }, + { + "$addFields": { + "foo": 1 + } + } + ] + } + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$replaceRoot": { + "newRoot": "$t" + } + }, + { + "$addFields": { + "foo": 1 + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + }, + "commandName": "update", + "databaseName": "crud-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "u": { + "v": 1 + }, + "foo": 1 + }, + { + "_id": 2, + "x": 2, + "y": 1 + } + ] + } + ] + }, + { + "description": "UpdateMany in bulk write using pipelines", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": {}, + "update": [ + { + "$project": { + "x": 1 + } + }, + { + "$addFields": { + "foo": 1 + } + } + ] + } + } + ] + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": {}, + "u": [ + { + "$project": { + "x": 1 + } + }, + { + "$addFields": { + "foo": 1 + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + }, + "commandName": "update", + "databaseName": "crud-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1, + "foo": 1 + }, + { + "_id": 2, + "x": 2, + "foo": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/v1/read/aggregate-collation.json b/test/crud/v1/read/aggregate-collation.json index 85662a442f..d958e447bf 100644 --- a/test/crud/v1/read/aggregate-collation.json +++ b/test/crud/v1/read/aggregate-collation.json @@ -6,6 +6,7 @@ } ], "minServerVersion": "3.4", + "serverless": "forbid", "tests": [ { "description": "Aggregate with collation", diff --git a/test/crud/v1/read/aggregate-out.json b/test/crud/v1/read/aggregate-out.json index 205cf76571..c195e163e0 100644 --- a/test/crud/v1/read/aggregate-out.json +++ b/test/crud/v1/read/aggregate-out.json @@ -14,6 +14,7 @@ } ], "minServerVersion": "2.6", + "serverless": "forbid", "tests": [ { "description": "Aggregate with $out", @@ -41,16 +42,6 @@ } }, "outcome": { - "result": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], "collection": { "name": "other_test_collection", "data": [ @@ -92,16 +83,6 @@ } }, "outcome": { - "result": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], "collection": { "name": "other_test_collection", "data": [ diff --git a/test/crud/v1/read/count-collation.json b/test/crud/v1/read/count-collation.json index 6f75282fe0..7d61508493 100644 --- a/test/crud/v1/read/count-collation.json +++ b/test/crud/v1/read/count-collation.json @@ -6,6 +6,7 @@ } ], "minServerVersion": "3.4", + "serverless": "forbid", "tests": [ { "description": "Count documents with collation", diff --git a/test/crud/v1/read/distinct-collation.json b/test/crud/v1/read/distinct-collation.json index 0af0c67cb7..984991a43b 100644 --- a/test/crud/v1/read/distinct-collation.json +++ b/test/crud/v1/read/distinct-collation.json @@ -10,6 +10,7 @@ } ], "minServerVersion": "3.4", + "serverless": "forbid", "tests": [ { "description": "Distinct with a collation", diff --git a/test/crud/v1/read/find-collation.json b/test/crud/v1/read/find-collation.json index 53d0e94900..4e56c05253 100644 --- a/test/crud/v1/read/find-collation.json +++ b/test/crud/v1/read/find-collation.json @@ -6,6 +6,7 @@ } ], "minServerVersion": "3.4", + "serverless": "forbid", "tests": [ { "description": "Find with a collation", diff --git a/test/crud/v1/write/bulkWrite-collation.json b/test/crud/v1/write/bulkWrite-collation.json index 8e9d1bcb1a..bc90aa8172 100644 --- a/test/crud/v1/write/bulkWrite-collation.json +++ b/test/crud/v1/write/bulkWrite-collation.json @@ -22,6 +22,7 @@ } ], "minServerVersion": "3.4", + "serverless": "forbid", "tests": [ { "description": "BulkWrite with delete operations and collation", diff --git a/test/crud/v1/write/deleteMany-collation.json b/test/crud/v1/write/deleteMany-collation.json index d17bf3bcb9..fce75e488a 100644 --- a/test/crud/v1/write/deleteMany-collation.json +++ b/test/crud/v1/write/deleteMany-collation.json @@ -14,6 +14,7 @@ } ], "minServerVersion": "3.4", + "serverless": "forbid", "tests": [ { "description": "DeleteMany when many documents match with collation", diff --git a/test/crud/v1/write/deleteOne-collation.json b/test/crud/v1/write/deleteOne-collation.json index 2f7f921130..9bcef411ef 100644 --- a/test/crud/v1/write/deleteOne-collation.json +++ b/test/crud/v1/write/deleteOne-collation.json @@ -14,6 +14,7 @@ } ], "minServerVersion": "3.4", + "serverless": "forbid", "tests": [ { "description": "DeleteOne when many documents matches with collation", diff --git a/test/crud/v1/write/findOneAndDelete-collation.json b/test/crud/v1/write/findOneAndDelete-collation.json index 1ff37d2e88..32480da842 100644 --- a/test/crud/v1/write/findOneAndDelete-collation.json +++ b/test/crud/v1/write/findOneAndDelete-collation.json @@ -14,6 +14,7 @@ } ], "minServerVersion": "3.4", + "serverless": "forbid", "tests": [ { "description": "FindOneAndDelete when one document matches with collation", diff --git a/test/crud/v1/write/findOneAndReplace-collation.json b/test/crud/v1/write/findOneAndReplace-collation.json index babb2f7c11..9b3c25005b 100644 --- a/test/crud/v1/write/findOneAndReplace-collation.json +++ b/test/crud/v1/write/findOneAndReplace-collation.json @@ -10,6 +10,7 @@ } ], "minServerVersion": "3.4", + "serverless": "forbid", "tests": [ { "description": "FindOneAndReplace when one document matches with collation returning the document after modification", diff --git a/test/crud/v1/write/findOneAndUpdate-collation.json b/test/crud/v1/write/findOneAndUpdate-collation.json index 04c1fe73ec..8abab7bd6b 100644 --- a/test/crud/v1/write/findOneAndUpdate-collation.json +++ b/test/crud/v1/write/findOneAndUpdate-collation.json @@ -14,6 +14,7 @@ } ], "minServerVersion": "3.4", + "serverless": "forbid", "tests": [ { "description": "FindOneAndUpdate when many documents match with collation returning the document before modification", diff --git a/test/crud/v1/write/replaceOne-collation.json b/test/crud/v1/write/replaceOne-collation.json index a668fe7383..fa4cbe9970 100644 --- a/test/crud/v1/write/replaceOne-collation.json +++ b/test/crud/v1/write/replaceOne-collation.json @@ -10,6 +10,7 @@ } ], "minServerVersion": "3.4", + "serverless": "forbid", "tests": [ { "description": "ReplaceOne when one document matches with collation", diff --git a/test/crud/v1/write/updateMany-collation.json b/test/crud/v1/write/updateMany-collation.json index 3cb49f2298..8becfd806b 100644 --- a/test/crud/v1/write/updateMany-collation.json +++ b/test/crud/v1/write/updateMany-collation.json @@ -14,6 +14,7 @@ } ], "minServerVersion": "3.4", + "serverless": "forbid", "tests": [ { "description": "UpdateMany when many documents match with collation", diff --git a/test/crud/v1/write/updateOne-collation.json b/test/crud/v1/write/updateOne-collation.json index c49112d519..3afdb83e0f 100644 --- a/test/crud/v1/write/updateOne-collation.json +++ b/test/crud/v1/write/updateOne-collation.json @@ -10,6 +10,7 @@ } ], "minServerVersion": "3.4", + "serverless": "forbid", "tests": [ { "description": "UpdateOne when one document matches with collation", diff --git a/test/crud/v2/aggregate-merge.json b/test/crud/v2/aggregate-merge.json deleted file mode 100644 index 037ae25d24..0000000000 --- a/test/crud/v2/aggregate-merge.json +++ /dev/null @@ -1,415 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.2.0" - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "collection_name": "test_aggregate_merge", - "tests": [ - { - "description": "Aggregate with $merge", - "operations": [ - { - "object": "collection", - "name": "aggregate", - "arguments": { - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$merge": { - "into": "other_test_collection" - } - } - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test_aggregate_merge", - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$merge": { - "into": "other_test_collection" - } - } - ] - } - } - } - ], - "outcome": { - "collection": { - "name": "other_test_collection", - "data": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "Aggregate with $merge and batch size of 0", - "operations": [ - { - "object": "collection", - "name": "aggregate", - "arguments": { - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$merge": { - "into": "other_test_collection" - } - } - ], - "batchSize": 0 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test_aggregate_merge", - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$merge": { - "into": "other_test_collection" - } - } - ], - "cursor": {} - } - } - } - ], - "outcome": { - "collection": { - "name": "other_test_collection", - "data": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "Aggregate with $merge and majority readConcern", - "operations": [ - { - "object": "collection", - "name": "aggregate", - "collectionOptions": { - "readConcern": { - "level": "majority" - } - }, - "arguments": { - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$merge": { - "into": "other_test_collection" - } - } - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test_aggregate_merge", - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$merge": { - "into": "other_test_collection" - } - } - ], - "readConcern": { - "level": "majority" - } - } - } - } - ], - "outcome": { - "collection": { - "name": "other_test_collection", - "data": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "Aggregate with $merge and local readConcern", - "operations": [ - { - "object": "collection", - "name": "aggregate", - "collectionOptions": { - "readConcern": { - "level": "local" - } - }, - "arguments": { - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$merge": { - "into": "other_test_collection" - } - } - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test_aggregate_merge", - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$merge": { - "into": "other_test_collection" - } - } - ], - "readConcern": { - "level": "local" - } - } - } - } - ], - "outcome": { - "collection": { - "name": "other_test_collection", - "data": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "Aggregate with $merge and available readConcern", - "operations": [ - { - "object": "collection", - "name": "aggregate", - "collectionOptions": { - "readConcern": { - "level": "available" - } - }, - "arguments": { - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$merge": { - "into": "other_test_collection" - } - } - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test_aggregate_merge", - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$merge": { - "into": "other_test_collection" - } - } - ], - "readConcern": { - "level": "available" - } - } - } - } - ], - "outcome": { - "collection": { - "name": "other_test_collection", - "data": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/aggregate-out-readConcern.json b/test/crud/v2/aggregate-out-readConcern.json deleted file mode 100644 index c39ee0e281..0000000000 --- a/test/crud/v2/aggregate-out-readConcern.json +++ /dev/null @@ -1,385 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.1.0", - "topology": [ - "replicaset", - "sharded" - ] - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "collection_name": "test_aggregate_out_readconcern", - "tests": [ - { - "description": "readConcern majority with out stage", - "operations": [ - { - "object": "collection", - "name": "aggregate", - "collectionOptions": { - "readConcern": { - "level": "majority" - } - }, - "arguments": { - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$out": "other_test_collection" - } - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test_aggregate_out_readconcern", - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$out": "other_test_collection" - } - ], - "readConcern": { - "level": "majority" - } - } - } - } - ], - "outcome": { - "collection": { - "name": "other_test_collection", - "data": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "readConcern local with out stage", - "operations": [ - { - "object": "collection", - "name": "aggregate", - "collectionOptions": { - "readConcern": { - "level": "local" - } - }, - "arguments": { - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$out": "other_test_collection" - } - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test_aggregate_out_readconcern", - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$out": "other_test_collection" - } - ], - "readConcern": { - "level": "local" - } - } - } - } - ], - "outcome": { - "collection": { - "name": "other_test_collection", - "data": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "readConcern available with out stage", - "operations": [ - { - "object": "collection", - "name": "aggregate", - "collectionOptions": { - "readConcern": { - "level": "available" - } - }, - "arguments": { - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$out": "other_test_collection" - } - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test_aggregate_out_readconcern", - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$out": "other_test_collection" - } - ], - "readConcern": { - "level": "available" - } - } - } - } - ], - "outcome": { - "collection": { - "name": "other_test_collection", - "data": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "readConcern linearizable with out stage", - "operations": [ - { - "object": "collection", - "name": "aggregate", - "collectionOptions": { - "readConcern": { - "level": "linearizable" - } - }, - "arguments": { - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$out": "other_test_collection" - } - ] - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test_aggregate_out_readconcern", - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$out": "other_test_collection" - } - ], - "readConcern": { - "level": "linearizable" - } - } - } - } - ] - }, - { - "description": "invalid readConcern with out stage", - "operations": [ - { - "object": "collection", - "name": "aggregate", - "collectionOptions": { - "readConcern": { - "level": "!invalid123" - } - }, - "arguments": { - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$out": "other_test_collection" - } - ] - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test_aggregate_out_readconcern", - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$out": "other_test_collection" - } - ], - "readConcern": { - "level": "!invalid123" - } - } - } - } - ] - } - ] -} diff --git a/test/crud/v2/bulkWrite-arrayFilters.json b/test/crud/v2/bulkWrite-arrayFilters.json deleted file mode 100644 index be26a337a5..0000000000 --- a/test/crud/v2/bulkWrite-arrayFilters.json +++ /dev/null @@ -1,160 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "3.5.6" - } - ], - "data": [ - { - "_id": 1, - "y": [ - { - "b": 3 - }, - { - "b": 1 - } - ] - }, - { - "_id": 2, - "y": [ - { - "b": 0 - }, - { - "b": 1 - } - ] - } - ], - "collection_name": "test", - "database_name": "crud-tests", - "tests": [ - { - "description": "BulkWrite with arrayFilters", - "operations": [ - { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "updateOne", - "arguments": { - "filter": {}, - "update": { - "$set": { - "y.$[i].b": 2 - } - }, - "arrayFilters": [ - { - "i.b": 3 - } - ] - } - }, - { - "name": "updateMany", - "arguments": { - "filter": {}, - "update": { - "$set": { - "y.$[i].b": 2 - } - }, - "arrayFilters": [ - { - "i.b": 1 - } - ] - } - } - ], - "options": { - "ordered": true - } - }, - "result": { - "deletedCount": 0, - "insertedCount": 0, - "insertedIds": {}, - "matchedCount": 3, - "modifiedCount": 3, - "upsertedCount": 0, - "upsertedIds": {} - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": {}, - "u": { - "$set": { - "y.$[i].b": 2 - } - }, - "arrayFilters": [ - { - "i.b": 3 - } - ] - }, - { - "q": {}, - "u": { - "$set": { - "y.$[i].b": 2 - } - }, - "multi": true, - "arrayFilters": [ - { - "i.b": 1 - } - ] - } - ], - "ordered": true - }, - "command_name": "update", - "database_name": "crud-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "y": [ - { - "b": 2 - }, - { - "b": 2 - } - ] - }, - { - "_id": 2, - "y": [ - { - "b": 0 - }, - { - "b": 2 - } - ] - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/updateWithPipelines.json b/test/crud/v2/updateWithPipelines.json deleted file mode 100644 index 7d20bffb30..0000000000 --- a/test/crud/v2/updateWithPipelines.json +++ /dev/null @@ -1,243 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.1.11" - } - ], - "data": [ - { - "_id": 1, - "x": 1, - "y": 1, - "t": { - "u": { - "v": 1 - } - } - }, - { - "_id": 2, - "x": 2, - "y": 1 - } - ], - "collection_name": "test", - "database_name": "crud-tests", - "tests": [ - { - "description": "UpdateOne using pipelines", - "operations": [ - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 1 - }, - "update": [ - { - "$replaceRoot": { - "newRoot": "$t" - } - }, - { - "$addFields": { - "foo": 1 - } - } - ] - }, - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": { - "_id": 1 - }, - "u": [ - { - "$replaceRoot": { - "newRoot": "$t" - } - }, - { - "$addFields": { - "foo": 1 - } - } - ] - } - ] - }, - "command_name": "update", - "database_name": "crud-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "u": { - "v": 1 - }, - "foo": 1 - }, - { - "_id": 2, - "x": 2, - "y": 1 - } - ] - } - } - }, - { - "description": "UpdateMany using pipelines", - "operations": [ - { - "name": "updateMany", - "arguments": { - "filter": {}, - "update": [ - { - "$project": { - "x": 1 - } - }, - { - "$addFields": { - "foo": 1 - } - } - ] - }, - "result": { - "matchedCount": 2, - "modifiedCount": 2, - "upsertedCount": 0 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": {}, - "u": [ - { - "$project": { - "x": 1 - } - }, - { - "$addFields": { - "foo": 1 - } - } - ], - "multi": true - } - ] - }, - "command_name": "update", - "database_name": "crud-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 1, - "foo": 1 - }, - { - "_id": 2, - "x": 2, - "foo": 1 - } - ] - } - } - }, - { - "description": "FindOneAndUpdate using pipelines", - "operations": [ - { - "name": "findOneAndUpdate", - "arguments": { - "filter": { - "_id": 1 - }, - "update": [ - { - "$project": { - "x": 1 - } - }, - { - "$addFields": { - "foo": 1 - } - } - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "findAndModify": "test", - "update": [ - { - "$project": { - "x": 1 - } - }, - { - "$addFields": { - "foo": 1 - } - } - ] - }, - "command_name": "findAndModify", - "database_name": "crud-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 1, - "foo": 1 - }, - { - "_id": 2, - "x": 2, - "y": 1 - } - ] - } - } - } - ] -} diff --git a/test/test_crud_v2.py b/test/crud_v2_format.py similarity index 53% rename from test/test_crud_v2.py rename to test/crud_v2_format.py index 562e119aad..8eadad8430 100644 --- a/test/test_crud_v2.py +++ b/test/crud_v2_format.py @@ -1,4 +1,4 @@ -# Copyright 2019-present MongoDB, Inc. +# Copyright 2020-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,61 +12,44 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Test the collection module.""" +"""v2 format CRUD test runner. -import os -import sys +https://github.com/mongodb/specifications/blob/master/source/crud/tests/README.rst +""" +from __future__ import annotations -sys.path[0:0] = [""] - -from test import unittest -from test.utils import TestCreator from test.utils_spec_runner import SpecRunner -# Location of JSON test specifications. -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'crud', 'v2') - -# Default test database and collection names. -TEST_DB = 'testdb' -TEST_COLLECTION = 'testcollection' +class TestCrudV2(SpecRunner): + # Default test database and collection names. + TEST_DB = None + TEST_COLLECTION = None + def allowable_errors(self, op): + """Override expected error classes.""" + errors = super().allowable_errors(op) + errors += (ValueError,) + return errors -class TestSpec(SpecRunner): def get_scenario_db_name(self, scenario_def): """Crud spec says database_name is optional.""" - return scenario_def.get('database_name', TEST_DB) + return scenario_def.get("database_name", self.TEST_DB) def get_scenario_coll_name(self, scenario_def): """Crud spec says collection_name is optional.""" - return scenario_def.get('collection_name', TEST_COLLECTION) + return scenario_def.get("collection_name", self.TEST_COLLECTION) def get_object_name(self, op): """Crud spec says object is optional and defaults to 'collection'.""" - return op.get('object', 'collection') + return op.get("object", "collection") def get_outcome_coll_name(self, outcome, collection): """Crud spec says outcome has an optional 'collection.name'.""" - return outcome['collection'].get('name', collection.name) + return outcome["collection"].get("name", collection.name) def setup_scenario(self, scenario_def): """Allow specs to override a test's setup.""" # PYTHON-1935 Only create the collection if there is data to insert. - if scenario_def['data']: - super(TestSpec, self).setup_scenario(scenario_def) - - -def create_test(scenario_def, test, name): - def run_scenario(self): - self.run_scenario(scenario_def, test) - - return run_scenario - - -test_creator = TestCreator(create_test, TestSpec, _TEST_PATH) -test_creator.create_tests() - - -if __name__ == "__main__": - unittest.main() + if scenario_def["data"]: + super().setup_scenario(scenario_def) diff --git a/test/csot/bulkWrite.json b/test/csot/bulkWrite.json new file mode 100644 index 0000000000..9a05809f77 --- /dev/null +++ b/test/csot/bulkWrite.json @@ -0,0 +1,160 @@ +{ + "description": "timeoutMS behaves correctly for bulkWrite operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "w": 1 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS applied to entire bulkWrite, not individual commands", + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": {} + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert", + "update" + ], + "blockConnection": true, + "blockTimeMS": 120 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 1 + } + } + } + ], + "timeoutMS": 200 + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/change-streams.json b/test/csot/change-streams.json new file mode 100644 index 0000000000..a8b2b7e170 --- /dev/null +++ b/test/csot/change-streams.json @@ -0,0 +1,598 @@ +{ + "description": "timeoutMS behaves correctly for change streams", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "error if maxAwaitTimeMS is greater than timeoutMS", + "operations": [ + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 5, + "maxAwaitTimeMS": 10 + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "error if maxAwaitTimeMS is equal to timeoutMS", + "operations": [ + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 5, + "maxAwaitTimeMS": 5 + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "timeoutMS applied to initial aggregate", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 50 + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore if maxAwaitTimeMS is not set", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 30 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 1050 + }, + "saveResultAsEntity": "changeStream" + }, + { + "name": "iterateOnce", + "object": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore if maxAwaitTimeMS is set", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 20, + "batchSize": 2, + "maxAwaitTimeMS": 1 + }, + "saveResultAsEntity": "changeStream" + }, + { + "name": "iterateOnce", + "object": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": 1 + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to full resume attempt in a next call", + "operations": [ + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 20 + }, + "saveResultAsEntity": "changeStream" + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "getMore", + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 12, + "errorCode": 7, + "errorLabels": [ + "ResumableChangeStreamError" + ] + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "change stream can be iterated again if previous iteration times out", + "operations": [ + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "maxAwaitTimeMS": 1, + "timeoutMS": 100 + }, + "saveResultAsEntity": "changeStream" + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 150 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream", + "expectError": { + "isTimeoutError": true + } + }, + { + "name": "iterateOnce", + "object": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll" + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore - failure", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 10 + }, + "saveResultAsEntity": "changeStream" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/close-cursors.json b/test/csot/close-cursors.json new file mode 100644 index 0000000000..1361971c4c --- /dev/null +++ b/test/csot/close-cursors.json @@ -0,0 +1,239 @@ +{ + "description": "timeoutMS behaves correctly when closing cursors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "timeoutMS is refreshed for close", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 50 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "batchSize": 2, + "timeoutMS": 20 + }, + "saveResultAsEntity": "cursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor", + "expectError": { + "isTimeoutError": true + } + }, + { + "name": "close", + "object": "cursor" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "commandName": "getMore" + } + }, + { + "commandFailedEvent": { + "commandName": "getMore" + } + }, + { + "commandStartedEvent": { + "command": { + "killCursors": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + }, + "commandName": "killCursors" + } + }, + { + "commandSucceededEvent": { + "commandName": "killCursors" + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be overridden for close", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "killCursors" + ], + "blockConnection": true, + "blockTimeMS": 30 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "batchSize": 2, + "timeoutMS": 20 + }, + "saveResultAsEntity": "cursor" + }, + { + "name": "close", + "object": "cursor", + "arguments": { + "timeoutMS": 40 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "killCursors": "collection", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + }, + "commandName": "killCursors" + } + }, + { + "commandSucceededEvent": { + "commandName": "killCursors" + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/command-execution.json b/test/csot/command-execution.json new file mode 100644 index 0000000000..f0858791e9 --- /dev/null +++ b/test/csot/command-execution.json @@ -0,0 +1,394 @@ +{ + "description": "timeoutMS behaves correctly during command execution", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.9", + "topologies": [ + "single", + "replicaset", + "sharded-replicaset", + "sharded" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + }, + { + "collectionName": "timeoutColl", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "maxTimeMS value in the command is less than timeoutMS", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "reduceMaxTimeMSTest", + "blockConnection": true, + "blockTimeMS": 75 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "uriOptions": { + "appName": "reduceMaxTimeMSTest", + "w": 1, + "timeoutMS": 500, + "heartbeatFrequencyMS": 500 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "timeoutCollection", + "database": "database", + "collectionName": "timeoutColl" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 1 + }, + "timeoutMS": 100000 + } + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 1000 + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 2 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl", + "maxTimeMS": { + "$$lte": 450 + } + } + } + } + ] + } + ] + }, + { + "description": "command is not sent if RTT is greater than timeoutMS", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "rttTooHighTest", + "blockConnection": true, + "blockTimeMS": 50 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "uriOptions": { + "appName": "rttTooHighTest", + "w": 1, + "timeoutMS": 10, + "heartbeatFrequencyMS": 500 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "timeoutCollection", + "database": "database", + "collectionName": "timeoutColl" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 1 + }, + "timeoutMS": 100000 + } + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 1000 + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 2 + } + }, + "expectError": { + "isTimeoutError": true + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 3 + } + }, + "expectError": { + "isTimeoutError": true + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 4 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl" + } + } + } + ] + } + ] + }, + { + "description": "short-circuit is not enabled with only 1 RTT measurement", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "reduceMaxTimeMSTest", + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "uriOptions": { + "appName": "reduceMaxTimeMSTest", + "w": 1, + "timeoutMS": 90, + "heartbeatFrequencyMS": 100000 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "timeoutCollection", + "database": "database", + "collectionName": "timeoutColl" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 1 + }, + "timeoutMS": 100000 + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 2 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl", + "maxTimeMS": { + "$$lte": 450 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/convenient-transactions.json b/test/csot/convenient-transactions.json new file mode 100644 index 0000000000..0c8cc6edd9 --- /dev/null +++ b/test/csot/convenient-transactions.json @@ -0,0 +1,191 @@ +{ + "description": "timeoutMS behaves correctly for the withTransaction API", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 50 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "withTransaction raises a client-side error if timeoutMS is overridden inside the callback", + "operations": [ + { + "name": "withTransaction", + "object": "session", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + }, + "session": "session", + "timeoutMS": 100 + }, + "expectError": { + "isClientError": true + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [] + } + ] + }, + { + "description": "timeoutMS is not refreshed for each operation in the callback", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 30 + } + } + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + }, + "session": "session" + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2 + }, + "session": "session" + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/cursors.json b/test/csot/cursors.json new file mode 100644 index 0000000000..36949d7509 --- /dev/null +++ b/test/csot/cursors.json @@ -0,0 +1,113 @@ +{ + "description": "tests for timeoutMS behavior that applies to all cursor types", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client" + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "find errors if timeoutMode is set and timeoutMS is not", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "cursorLifetime" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "collection aggregate errors if timeoutMode is set and timeoutMS is not", + "operations": [ + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMode": "cursorLifetime" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "database aggregate errors if timeoutMode is set and timeoutMS is not", + "operations": [ + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [], + "timeoutMode": "cursorLifetime" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "listCollections errors if timeoutMode is set and timeoutMS is not", + "operations": [ + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {}, + "timeoutMode": "cursorLifetime" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "listIndexes errors if timeoutMode is set and timeoutMS is not", + "operations": [ + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMode": "cursorLifetime" + }, + "expectError": { + "isClientError": true + } + } + ] + } + ] +} diff --git a/test/csot/deprecated-options.json b/test/csot/deprecated-options.json new file mode 100644 index 0000000000..9c9b9a2288 --- /dev/null +++ b/test/csot/deprecated-options.json @@ -0,0 +1,7179 @@ +{ + "description": "operations ignore deprecated timeout options if timeoutMS is set", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "commitTransaction ignores socketTimeoutMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 20 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "aggregate" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + }, + { + "name": "commitTransaction", + "object": "session", + "arguments": { + "timeoutMS": 10000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "commitTransaction ignores wTimeoutMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "aggregate" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + }, + { + "name": "commitTransaction", + "object": "session", + "arguments": { + "timeoutMS": 10000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "commitTransaction ignores maxCommitTimeMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "aggregate" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client", + "sessionOptions": { + "defaultTransactionOptions": { + "maxCommitTimeMS": 5000 + } + } + } + } + ] + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + }, + { + "name": "commitTransaction", + "object": "session", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "abortTransaction ignores socketTimeoutMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 20 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "aggregate" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + }, + { + "name": "abortTransaction", + "object": "session", + "arguments": { + "timeoutMS": 10000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "abortTransaction ignores wTimeoutMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "aggregate" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + }, + { + "name": "abortTransaction", + "object": "session", + "arguments": { + "timeoutMS": 10000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "withTransaction ignores socketTimeoutMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 20 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "timeoutMS": 10000, + "callback": [ + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "withTransaction ignores wTimeoutMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "timeoutMS": 10000, + "callback": [ + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "withTransaction ignores maxCommitTimeMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client", + "sessionOptions": { + "defaultTransactionOptions": { + "maxCommitTimeMS": 5000 + } + } + } + } + ] + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "timeoutMS": 1000, + "callback": [ + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - listDatabases on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - listDatabases on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - listDatabaseNames on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "arguments": { + "timeoutMS": 100000 + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - listDatabaseNames on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "arguments": { + "timeoutMS": 100000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - createChangeStream on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - createChangeStream on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - aggregate on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - aggregate on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - aggregate on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - listCollections on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - listCollections on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - listCollectionNames on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - listCollectionNames on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - runCommand on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - runCommand on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - createChangeStream on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - createChangeStream on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "fieldName": "x", + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "listIndexNames", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "document": { + "x": 1 + } + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "documents": [ + { + "x": 1 + } + ] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "name": "x_1" + }, + "expectError": { + "isClientError": false, + "isTimeoutError": false + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "name": "x_1" + }, + "expectError": { + "isClientError": false, + "isTimeoutError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "name": "x_1" + }, + "expectError": { + "isClientError": false, + "isTimeoutError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "dropIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "dropIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/error-transformations.json b/test/csot/error-transformations.json new file mode 100644 index 0000000000..4d9e061c3b --- /dev/null +++ b/test/csot/error-transformations.json @@ -0,0 +1,181 @@ +{ + "description": "MaxTimeMSExpired server errors are transformed into a custom timeout error", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "basic MaxTimeMSExpired error is transformed", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 50 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "write concern error MaxTimeMSExpired is transformed", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "writeConcernError": { + "code": 50, + "errmsg": "maxTimeMS expired" + } + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/global-timeoutMS.json b/test/csot/global-timeoutMS.json new file mode 100644 index 0000000000..34854ac155 --- /dev/null +++ b/test/csot/global-timeoutMS.json @@ -0,0 +1,5830 @@ +{ + "description": "timeoutMS can be configured on a MongoClient", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS can be configured on a MongoClient - listDatabases on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - listDatabases on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - listDatabaseNames on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - listDatabaseNames on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - createChangeStream on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - createChangeStream on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - aggregate on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - aggregate on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - listCollections on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - listCollections on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - listCollectionNames on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - listCollectionNames on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - runCommand on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - runCommand on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - createChangeStream on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - createChangeStream on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "name": "x_1" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "name": "x_1" + }, + "expectError": { + "isClientError": false, + "isTimeoutError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/gridfs-advanced.json b/test/csot/gridfs-advanced.json new file mode 100644 index 0000000000..c6c0944d2f --- /dev/null +++ b/test/csot/gridfs-advanced.json @@ -0,0 +1,385 @@ +{ + "description": "timeoutMS behaves correctly for advanced GridFS API operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 75 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "collection": { + "id": "filesCollection", + "database": "database", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "chunksCollection", + "database": "database", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "test", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000005" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "length-8", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "test", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000005" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000006" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 1, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "timeoutMS can be overridden for a rename", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "rename", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + }, + "newFilename": "foo", + "timeoutMS": 2000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to update during a rename", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "rename", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + }, + "newFilename": "foo" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be overridden for drop", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "drop" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "drop", + "object": "bucket", + "arguments": { + "timeoutMS": 2000 + } + } + ] + }, + { + "description": "timeoutMS applied to files collection drop", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "drop" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "drop", + "object": "bucket", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "drop", + "databaseName": "test", + "command": { + "drop": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to chunks collection drop", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "skip": 1 + }, + "data": { + "failCommands": [ + "drop" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "drop", + "object": "bucket", + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to drop as a whole, not individual parts", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "drop" + ], + "blockConnection": true, + "blockTimeMS": 50 + } + } + } + }, + { + "name": "drop", + "object": "bucket", + "expectError": { + "isTimeoutError": true + } + } + ] + } + ] +} diff --git a/test/csot/gridfs-delete.json b/test/csot/gridfs-delete.json new file mode 100644 index 0000000000..9f4980114b --- /dev/null +++ b/test/csot/gridfs-delete.json @@ -0,0 +1,285 @@ +{ + "description": "timeoutMS behaves correctly for GridFS delete operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 75 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "collection": { + "id": "filesCollection", + "database": "database", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "chunksCollection", + "database": "database", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "test", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000005" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "length-8", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "test", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000005" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000006" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 1, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "timeoutMS can be overridden for delete", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "delete", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + }, + "timeoutMS": 1000 + } + } + ] + }, + { + "description": "timeoutMS applied to delete against the files collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "delete", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to delete against the chunks collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "skip": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "delete", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to entire delete, not individual parts", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 50 + } + } + } + }, + { + "name": "delete", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + } + ] +} diff --git a/test/csot/gridfs-download.json b/test/csot/gridfs-download.json new file mode 100644 index 0000000000..8542f69e89 --- /dev/null +++ b/test/csot/gridfs-download.json @@ -0,0 +1,359 @@ +{ + "description": "timeoutMS behaves correctly for GridFS download operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 75 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "collection": { + "id": "filesCollection", + "database": "database", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "chunksCollection", + "database": "database", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "test", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000005" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "length-8", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "test", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000005" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000006" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 1, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "timeoutMS can be overridden for download", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "download", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + }, + "timeoutMS": 1000 + } + } + ] + }, + { + "description": "timeoutMS applied to find to get files document", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "download", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to find to get chunks", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "skip": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "download", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "fs.chunks", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to entire download, not individual parts", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 50 + } + } + } + }, + { + "name": "download", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "fs.chunks", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/gridfs-find.json b/test/csot/gridfs-find.json new file mode 100644 index 0000000000..7409036284 --- /dev/null +++ b/test/csot/gridfs-find.json @@ -0,0 +1,183 @@ +{ + "description": "timeoutMS behaves correctly for GridFS find operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 75 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "collection": { + "id": "filesCollection", + "database": "database", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "chunksCollection", + "database": "database", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "test", + "documents": [] + }, + { + "collectionName": "fs.chunks", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS can be overridden for a find", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "find", + "object": "bucket", + "arguments": { + "filter": {}, + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to find command", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "find", + "object": "bucket", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/gridfs-upload.json b/test/csot/gridfs-upload.json new file mode 100644 index 0000000000..b3f174973d --- /dev/null +++ b/test/csot/gridfs-upload.json @@ -0,0 +1,409 @@ +{ + "description": "timeoutMS behaves correctly for GridFS upload operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 75 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "collection": { + "id": "filesCollection", + "database": "database", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "chunksCollection", + "database": "database", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "test", + "documents": [] + }, + { + "collectionName": "fs.chunks", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS can be overridden for upload", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + }, + "timeoutMS": 1000 + } + } + ] + }, + { + "description": "timeoutMS applied to initial find on files collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to listIndexes on files collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to index creation for files collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to listIndexes on chunks collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "skip": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to index creation for chunks collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "skip": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to chunk insertion", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to creation of files document", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "skip": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to upload as a whole, not individual parts", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 50 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + } + ] +} diff --git a/test/csot/legacy-timeouts.json b/test/csot/legacy-timeouts.json new file mode 100644 index 0000000000..3a2d2eaefb --- /dev/null +++ b/test/csot/legacy-timeouts.json @@ -0,0 +1,379 @@ +{ + "description": "legacy timeouts continue to work if timeoutMS is not set", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "socketTimeoutMS is not used to derive a maxTimeMS command field", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "socketTimeoutMS": 50000 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "waitQueueTimeoutMS is not used to derive a maxTimeMS command field", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "waitQueueTimeoutMS": 50000 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "wTimeoutMS is not used to derive a maxTimeMS command field", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "wTimeoutMS": 50000 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + }, + "writeConcern": { + "wtimeout": 50000 + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS option is used directly as the maxTimeMS field on a command", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "maxTimeMS": 50000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": 50000 + } + } + } + ] + } + ] + }, + { + "description": "maxCommitTimeMS option is used directly as the maxTimeMS field on a commitTransaction command", + "runOnRequirements": [ + { + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client", + "sessionOptions": { + "defaultTransactionOptions": { + "maxCommitTimeMS": 1000 + } + } + } + } + ] + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + }, + "session": "session" + } + }, + { + "name": "commitTransaction", + "object": "session" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "maxTimeMS": 1000 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/non-tailable-cursors.json b/test/csot/non-tailable-cursors.json new file mode 100644 index 0000000000..0a5448a6bb --- /dev/null +++ b/test/csot/non-tailable-cursors.json @@ -0,0 +1,541 @@ +{ + "description": "timeoutMS behaves correctly for non-tailable cursors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 10 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + { + "collectionName": "aggregateOutputColl", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS applied to find if timeoutMode is cursor_lifetime", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "cursorLifetime" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "remaining timeoutMS applied to getMore if timeoutMode is unset", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMS": 20, + "batchSize": 2 + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "remaining timeoutMS applied to getMore if timeoutMode is cursor_lifetime", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "cursorLifetime", + "timeoutMS": 20, + "batchSize": 2 + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to find if timeoutMode is iteration", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "iteration" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore if timeoutMode is iteration - success", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "iteration", + "timeoutMS": 20, + "batchSize": 2 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore if timeoutMode is iteration - failure", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "iteration", + "batchSize": 2 + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "aggregate with $out errors if timeoutMode is iteration", + "operations": [ + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [ + { + "$out": "aggregateOutputColl" + } + ], + "timeoutMS": 100, + "timeoutMode": "iteration" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [] + } + ] + }, + { + "description": "aggregate with $merge errors if timeoutMode is iteration", + "operations": [ + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [ + { + "$merge": "aggregateOutputColl" + } + ], + "timeoutMS": 100, + "timeoutMode": "iteration" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [] + } + ] + } + ] +} diff --git a/test/csot/override-operation-timeoutMS.json b/test/csot/override-operation-timeoutMS.json new file mode 100644 index 0000000000..896b996ee8 --- /dev/null +++ b/test/csot/override-operation-timeoutMS.json @@ -0,0 +1,3577 @@ +{ + "description": "timeoutMS can be overridden for an operation", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 10 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS can be configured for an operation - listDatabases on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - listDatabases on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - listDatabaseNames on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - listDatabaseNames on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - createChangeStream on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - createChangeStream on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - aggregate on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - aggregate on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 0, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - listCollections on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - listCollections on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - listCollectionNames on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - listCollectionNames on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - runCommand on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - runCommand on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "timeoutMS": 0, + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - createChangeStream on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - createChangeStream on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - aggregate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - aggregate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - count on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - count on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - countDocuments on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - countDocuments on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - estimatedDocumentCount on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - estimatedDocumentCount on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - distinct on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - distinct on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - find on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - find on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - findOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - findOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - listIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - listIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - listIndexNames on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - listIndexNames on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - createChangeStream on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - createChangeStream on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - insertOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - insertOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - insertMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - insertMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - deleteOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - deleteOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - deleteMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - deleteMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - replaceOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - replaceOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - updateOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - updateOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - updateMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - updateMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - findOneAndDelete on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - findOneAndDelete on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - findOneAndReplace on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - findOneAndReplace on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - findOneAndUpdate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - findOneAndUpdate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - bulkWrite on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - bulkWrite on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - createIndex on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - createIndex on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - dropIndex on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "name": "x_1" + }, + "expectError": { + "isTimeoutError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - dropIndex on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "name": "x_1" + }, + "expectError": { + "isTimeoutError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - dropIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - dropIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/retryability-legacy-timeouts.json b/test/csot/retryability-legacy-timeouts.json new file mode 100644 index 0000000000..63e8efccfc --- /dev/null +++ b/test/csot/retryability-legacy-timeouts.json @@ -0,0 +1,3042 @@ +{ + "description": "legacy timeouts behave correctly for retryable operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 100 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "operation succeeds after one socket timeout - insertOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - insertOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - insertMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - insertMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - deleteOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - deleteOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - replaceOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - replaceOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - updateOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - updateOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - findOneAndDelete on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - findOneAndDelete on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - findOneAndReplace on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - findOneAndReplace on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - findOneAndUpdate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - findOneAndUpdate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - bulkWrite on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - bulkWrite on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - listDatabases on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - listDatabases on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - listDatabaseNames on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - listDatabaseNames on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - createChangeStream on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - createChangeStream on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - aggregate on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - aggregate on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - listCollections on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - listCollections on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - listCollectionNames on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - listCollectionNames on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - createChangeStream on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - createChangeStream on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - aggregate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - aggregate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - count on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - count on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - countDocuments on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - countDocuments on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - estimatedDocumentCount on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - estimatedDocumentCount on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - distinct on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - distinct on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - find on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - find on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - findOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - findOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - listIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - listIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - createChangeStream on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - createChangeStream on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/retryability-timeoutMS.json b/test/csot/retryability-timeoutMS.json new file mode 100644 index 0000000000..642eca0ee9 --- /dev/null +++ b/test/csot/retryability-timeoutMS.json @@ -0,0 +1,5439 @@ +{ + "description": "timeoutMS behaves correctly for retryable operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 100 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS applies to whole operation, not individual attempts - insertOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - insertOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - insertOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - insertMany on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - insertMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - insertMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - deleteOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - deleteOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - deleteOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - replaceOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - replaceOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - replaceOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - updateOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - updateOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - updateOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - findOneAndDelete on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndDelete on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - findOneAndDelete on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - findOneAndReplace on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndReplace on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - findOneAndReplace on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - findOneAndUpdate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndUpdate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - findOneAndUpdate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - bulkWrite on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - bulkWrite on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - bulkWrite on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - listDatabases on client", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - listDatabases on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - listDatabases on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - listDatabaseNames on client", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - listDatabaseNames on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - listDatabaseNames on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - createChangeStream on client", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - aggregate on database", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - aggregate on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - aggregate on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 0, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - listCollections on database", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - listCollections on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - listCollections on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - listCollectionNames on database", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - listCollectionNames on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - listCollectionNames on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - createChangeStream on database", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - aggregate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - aggregate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - aggregate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - count on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - count on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - count on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - countDocuments on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - countDocuments on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - countDocuments on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - estimatedDocumentCount on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - estimatedDocumentCount on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - estimatedDocumentCount on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - distinct on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - distinct on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - distinct on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - find on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - find on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - find on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - findOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - findOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - findOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - listIndexes on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - listIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - listIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - createChangeStream on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/sessions-inherit-timeoutMS.json b/test/csot/sessions-inherit-timeoutMS.json new file mode 100644 index 0000000000..8205c086bc --- /dev/null +++ b/test/csot/sessions-inherit-timeoutMS.json @@ -0,0 +1,311 @@ +{ + "description": "sessions inherit timeoutMS from their parent MongoClient", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 50 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS applied to commitTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + } + }, + { + "name": "commitTransaction", + "object": "session", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "commitTransaction" + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to abortTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + } + }, + { + "name": "abortTransaction", + "object": "session" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to withTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/sessions-override-operation-timeoutMS.json b/test/csot/sessions-override-operation-timeoutMS.json new file mode 100644 index 0000000000..ff26de29f5 --- /dev/null +++ b/test/csot/sessions-override-operation-timeoutMS.json @@ -0,0 +1,315 @@ +{ + "description": "timeoutMS can be overridden for individual session operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS can be overridden for commitTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + } + }, + { + "name": "commitTransaction", + "object": "session", + "arguments": { + "timeoutMS": 50 + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "commitTransaction" + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to abortTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + } + }, + { + "name": "abortTransaction", + "object": "session", + "arguments": { + "timeoutMS": 50 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to withTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "timeoutMS": 50, + "callback": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/sessions-override-timeoutMS.json b/test/csot/sessions-override-timeoutMS.json new file mode 100644 index 0000000000..1d3b8932af --- /dev/null +++ b/test/csot/sessions-override-timeoutMS.json @@ -0,0 +1,311 @@ +{ + "description": "timeoutMS can be overridden at the level of a ClientSession", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client", + "sessionOptions": { + "defaultTimeoutMS": 50 + } + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS applied to commitTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + } + }, + { + "name": "commitTransaction", + "object": "session", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "commitTransaction" + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to abortTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + } + }, + { + "name": "abortTransaction", + "object": "session" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to withTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/tailable-awaitData.json b/test/csot/tailable-awaitData.json new file mode 100644 index 0000000000..6da85c7783 --- /dev/null +++ b/test/csot/tailable-awaitData.json @@ -0,0 +1,422 @@ +{ + "description": "timeoutMS behaves correctly for tailable awaitData cursors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 10 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "createOptions": { + "capped": true, + "size": 500 + }, + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "error if timeoutMode is cursor_lifetime", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "cursorLifetime", + "cursorType": "tailableAwait" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "error if maxAwaitTimeMS is greater than timeoutMS", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailableAwait", + "timeoutMS": 5, + "maxAwaitTimeMS": 10 + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "error if maxAwaitTimeMS is equal to timeoutMS", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailableAwait", + "timeoutMS": 5, + "maxAwaitTimeMS": 5 + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "timeoutMS applied to find", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailableAwait" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": true, + "maxTimeMS": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore if maxAwaitTimeMS is not set", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailableAwait", + "timeoutMS": 20, + "batchSize": 1 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": true, + "maxTimeMS": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore if maxAwaitTimeMS is set", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailableAwait", + "timeoutMS": 20, + "batchSize": 1, + "maxAwaitTimeMS": 1 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": true, + "maxTimeMS": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": 1 + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore - failure", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailableAwait", + "batchSize": 1 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": true, + "maxTimeMS": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/tailable-non-awaitData.json b/test/csot/tailable-non-awaitData.json new file mode 100644 index 0000000000..34ee660963 --- /dev/null +++ b/test/csot/tailable-non-awaitData.json @@ -0,0 +1,312 @@ +{ + "description": "timeoutMS behaves correctly for tailable non-awaitData cursors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 10 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "createOptions": { + "capped": true, + "size": 500 + }, + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "error if timeoutMode is cursor_lifetime", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "cursorLifetime", + "cursorType": "tailable" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "timeoutMS applied to find", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailable" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore - success", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailable", + "timeoutMS": 20, + "batchSize": 1 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore - failure", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailable", + "batchSize": 1 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/data_lake/aggregate.json b/test/data_lake/aggregate.json new file mode 100644 index 0000000000..99995bca41 --- /dev/null +++ b/test/data_lake/aggregate.json @@ -0,0 +1,53 @@ +{ + "collection_name": "driverdata", + "database_name": "test", + "tests": [ + { + "description": "Aggregate with pipeline (project, sort, limit)", + "operations": [ + { + "object": "collection", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 0 + } + }, + { + "$sort": { + "a": 1 + } + }, + { + "$limit": 2 + } + ] + }, + "result": [ + { + "a": 1, + "b": 2, + "c": 3 + }, + { + "a": 2, + "b": 3, + "c": 4 + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "driverdata" + } + } + } + ] + } + ] +} diff --git a/test/data_lake/estimatedDocumentCount.json b/test/data_lake/estimatedDocumentCount.json new file mode 100644 index 0000000000..997a3ab3fc --- /dev/null +++ b/test/data_lake/estimatedDocumentCount.json @@ -0,0 +1,27 @@ +{ + "collection_name": "driverdata", + "database_name": "test", + "tests": [ + { + "description": "estimatedDocumentCount succeeds", + "operations": [ + { + "object": "collection", + "name": "estimatedDocumentCount", + "result": 15 + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "count": "driverdata" + }, + "command_name": "count", + "database_name": "test" + } + } + ] + } + ] +} diff --git a/test/data_lake/find.json b/test/data_lake/find.json new file mode 100644 index 0000000000..8a3468a135 --- /dev/null +++ b/test/data_lake/find.json @@ -0,0 +1,65 @@ +{ + "collection_name": "driverdata", + "database_name": "test", + "tests": [ + { + "description": "Find with projection and sort", + "operations": [ + { + "object": "collection", + "name": "find", + "arguments": { + "filter": { + "b": { + "$gt": 5 + } + }, + "projection": { + "_id": 0 + }, + "sort": { + "a": 1 + }, + "limit": 5 + }, + "result": [ + { + "a": 5, + "b": 6, + "c": 7 + }, + { + "a": 6, + "b": 7, + "c": 8 + }, + { + "a": 7, + "b": 8, + "c": 9 + }, + { + "a": 8, + "b": 9, + "c": 10 + }, + { + "a": 9, + "b": 10, + "c": 11 + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "find": "driverdata" + } + } + } + ] + } + ] +} diff --git a/test/data_lake/getMore.json b/test/data_lake/getMore.json new file mode 100644 index 0000000000..e2e1d4788a --- /dev/null +++ b/test/data_lake/getMore.json @@ -0,0 +1,57 @@ +{ + "collection_name": "driverdata", + "database_name": "test", + "tests": [ + { + "description": "A successful find event with getMore", + "operations": [ + { + "object": "collection", + "name": "find", + "arguments": { + "filter": { + "a": { + "$gte": 2 + } + }, + "sort": { + "a": 1 + }, + "batchSize": 3, + "limit": 4 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "find": "driverdata", + "filter": { + "a": { + "$gte": 2 + } + }, + "sort": { + "a": 1 + }, + "batchSize": 3, + "limit": 4 + }, + "command_name": "find", + "database_name": "test" + } + }, + { + "command_started_event": { + "command": { + "batchSize": 1 + }, + "command_name": "getMore", + "database_name": "cursors" + } + } + ] + } + ] +} diff --git a/test/data_lake/listCollections.json b/test/data_lake/listCollections.json new file mode 100644 index 0000000000..e419f7b3e9 --- /dev/null +++ b/test/data_lake/listCollections.json @@ -0,0 +1,25 @@ +{ + "database_name": "test", + "tests": [ + { + "description": "ListCollections succeeds", + "operations": [ + { + "name": "listCollections", + "object": "database" + } + ], + "expectations": [ + { + "command_started_event": { + "command_name": "listCollections", + "database_name": "test", + "command": { + "listCollections": 1 + } + } + } + ] + } + ] +} diff --git a/test/data_lake/listDatabases.json b/test/data_lake/listDatabases.json new file mode 100644 index 0000000000..6458148e49 --- /dev/null +++ b/test/data_lake/listDatabases.json @@ -0,0 +1,24 @@ +{ + "tests": [ + { + "description": "ListDatabases succeeds", + "operations": [ + { + "name": "listDatabases", + "object": "client" + } + ], + "expectations": [ + { + "command_started_event": { + "command_name": "listDatabases", + "database_name": "admin", + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] +} diff --git a/test/data_lake/runCommand.json b/test/data_lake/runCommand.json new file mode 100644 index 0000000000..d81ff1a64b --- /dev/null +++ b/test/data_lake/runCommand.json @@ -0,0 +1,31 @@ +{ + "database_name": "test", + "tests": [ + { + "description": "ping succeeds using runCommand", + "operations": [ + { + "name": "runCommand", + "object": "database", + "command_name": "ping", + "arguments": { + "command": { + "ping": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command_name": "ping", + "database_name": "test", + "command": { + "ping": 1 + } + } + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/errors/error_handling_handshake.json b/test/discovery_and_monitoring/errors/error_handling_handshake.json new file mode 100644 index 0000000000..56ca7d1132 --- /dev/null +++ b/test/discovery_and_monitoring/errors/error_handling_handshake.json @@ -0,0 +1,113 @@ +{ + "description": "Network timeouts before and after the handshake completes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore network timeout application error (afterHandshakeCompletes)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "timeout" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Mark server unknown on network timeout application error (beforeHandshakeCompletes)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "beforeHandshakeCompletes", + "maxWireVersion": 9, + "type": "timeout" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-network-error.json b/test/discovery_and_monitoring/errors/non-stale-network-error.json new file mode 100644 index 0000000000..c22a47dc8a --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-network-error.json @@ -0,0 +1,80 @@ +{ + "description": "Non-stale network error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale network error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-network-timeout-error.json b/test/discovery_and_monitoring/errors/non-stale-network-timeout-error.json new file mode 100644 index 0000000000..03dc5b66c9 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-network-timeout-error.json @@ -0,0 +1,88 @@ +{ + "description": "Non-stale network timeout error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale network timeout error does not mark server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "timeout" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-InterruptedAtShutdown.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-InterruptedAtShutdown.json new file mode 100644 index 0000000000..777e703a3c --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-InterruptedAtShutdown.json @@ -0,0 +1,100 @@ +{ + "description": "Non-stale topologyVersion greater InterruptedAtShutdown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion greater InterruptedAtShutdown error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedAtShutdown", + "code": 11600, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-InterruptedDueToReplStateChange.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-InterruptedDueToReplStateChange.json new file mode 100644 index 0000000000..c4aa7fb71b --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-InterruptedDueToReplStateChange.json @@ -0,0 +1,100 @@ +{ + "description": "Non-stale topologyVersion greater InterruptedDueToReplStateChange error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion greater InterruptedDueToReplStateChange error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedDueToReplStateChange", + "code": 11602, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-LegacyNotPrimary.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-LegacyNotPrimary.json new file mode 100644 index 0000000000..2a9bc8a5cf --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-LegacyNotPrimary.json @@ -0,0 +1,100 @@ +{ + "description": "Non-stale topologyVersion greater LegacyNotPrimary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion greater LegacyNotPrimary error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "LegacyNotPrimary", + "code": 10058, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotPrimaryNoSecondaryOk.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotPrimaryNoSecondaryOk.json new file mode 100644 index 0000000000..638aa306cb --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotPrimaryNoSecondaryOk.json @@ -0,0 +1,100 @@ +{ + "description": "Non-stale topologyVersion greater NotPrimaryNoSecondaryOk error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion greater NotPrimaryNoSecondaryOk error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotPrimaryNoSecondaryOk", + "code": 13435, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotPrimaryOrSecondary.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotPrimaryOrSecondary.json new file mode 100644 index 0000000000..f327954a9d --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotPrimaryOrSecondary.json @@ -0,0 +1,100 @@ +{ + "description": "Non-stale topologyVersion greater NotPrimaryOrSecondary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion greater NotPrimaryOrSecondary error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotPrimaryOrSecondary", + "code": 13436, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotWritablePrimary.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotWritablePrimary.json new file mode 100644 index 0000000000..0ac02fb19b --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotWritablePrimary.json @@ -0,0 +1,100 @@ +{ + "description": "Non-stale topologyVersion greater NotWritablePrimary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion greater NotWritablePrimary error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotWritablePrimary", + "code": 10107, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-PrimarySteppedDown.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-PrimarySteppedDown.json new file mode 100644 index 0000000000..daf2a7e8e1 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-PrimarySteppedDown.json @@ -0,0 +1,100 @@ +{ + "description": "Non-stale topologyVersion greater PrimarySteppedDown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion greater PrimarySteppedDown error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "PrimarySteppedDown", + "code": 189, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-ShutdownInProgress.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-ShutdownInProgress.json new file mode 100644 index 0000000000..a7d9e1fe24 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-ShutdownInProgress.json @@ -0,0 +1,100 @@ +{ + "description": "Non-stale topologyVersion greater ShutdownInProgress error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion greater ShutdownInProgress error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "ShutdownInProgress", + "code": 91, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-InterruptedAtShutdown.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-InterruptedAtShutdown.json new file mode 100644 index 0000000000..2c59e785ab --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-InterruptedAtShutdown.json @@ -0,0 +1,85 @@ +{ + "description": "Non-stale topologyVersion missing InterruptedAtShutdown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion missing InterruptedAtShutdown error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedAtShutdown", + "code": 11600 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-InterruptedDueToReplStateChange.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-InterruptedDueToReplStateChange.json new file mode 100644 index 0000000000..f2cb834e83 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-InterruptedDueToReplStateChange.json @@ -0,0 +1,85 @@ +{ + "description": "Non-stale topologyVersion missing InterruptedDueToReplStateChange error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion missing InterruptedDueToReplStateChange error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedDueToReplStateChange", + "code": 11602 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-LegacyNotPrimary.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-LegacyNotPrimary.json new file mode 100644 index 0000000000..095128d615 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-LegacyNotPrimary.json @@ -0,0 +1,85 @@ +{ + "description": "Non-stale topologyVersion missing LegacyNotPrimary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion missing LegacyNotPrimary error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "LegacyNotPrimary", + "code": 10058 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotPrimaryNoSecondaryOk.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotPrimaryNoSecondaryOk.json new file mode 100644 index 0000000000..3d7312d4a5 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotPrimaryNoSecondaryOk.json @@ -0,0 +1,85 @@ +{ + "description": "Non-stale topologyVersion missing NotPrimaryNoSecondaryOk error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion missing NotPrimaryNoSecondaryOk error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotPrimaryNoSecondaryOk", + "code": 13435 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotPrimaryOrSecondary.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotPrimaryOrSecondary.json new file mode 100644 index 0000000000..a457ba3072 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotPrimaryOrSecondary.json @@ -0,0 +1,85 @@ +{ + "description": "Non-stale topologyVersion missing NotPrimaryOrSecondary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion missing NotPrimaryOrSecondary error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotPrimaryOrSecondary", + "code": 13436 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotWritablePrimary.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotWritablePrimary.json new file mode 100644 index 0000000000..b7427a3f3d --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotWritablePrimary.json @@ -0,0 +1,85 @@ +{ + "description": "Non-stale topologyVersion missing NotWritablePrimary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion missing NotWritablePrimary error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotWritablePrimary", + "code": 10107 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-PrimarySteppedDown.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-PrimarySteppedDown.json new file mode 100644 index 0000000000..8146a60d6e --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-PrimarySteppedDown.json @@ -0,0 +1,85 @@ +{ + "description": "Non-stale topologyVersion missing PrimarySteppedDown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion missing PrimarySteppedDown error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "PrimarySteppedDown", + "code": 189 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-ShutdownInProgress.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-ShutdownInProgress.json new file mode 100644 index 0000000000..c7597007d7 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-ShutdownInProgress.json @@ -0,0 +1,85 @@ +{ + "description": "Non-stale topologyVersion missing ShutdownInProgress error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion missing ShutdownInProgress error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "ShutdownInProgress", + "code": 91 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-InterruptedAtShutdown.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-InterruptedAtShutdown.json new file mode 100644 index 0000000000..8448c60599 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-InterruptedAtShutdown.json @@ -0,0 +1,100 @@ +{ + "description": "Non-stale topologyVersion proccessId changed InterruptedAtShutdown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion proccessId changed InterruptedAtShutdown error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedAtShutdown", + "code": 11600, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-InterruptedDueToReplStateChange.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-InterruptedDueToReplStateChange.json new file mode 100644 index 0000000000..9d601c4ede --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-InterruptedDueToReplStateChange.json @@ -0,0 +1,100 @@ +{ + "description": "Non-stale topologyVersion proccessId changed InterruptedDueToReplStateChange error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion proccessId changed InterruptedDueToReplStateChange error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedDueToReplStateChange", + "code": 11602, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-LegacyNotPrimary.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-LegacyNotPrimary.json new file mode 100644 index 0000000000..8be833f104 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-LegacyNotPrimary.json @@ -0,0 +1,100 @@ +{ + "description": "Non-stale topologyVersion proccessId changed LegacyNotPrimary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion proccessId changed LegacyNotPrimary error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "LegacyNotPrimary", + "code": 10058, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotPrimaryNoSecondaryOk.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotPrimaryNoSecondaryOk.json new file mode 100644 index 0000000000..f2f94c0d00 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotPrimaryNoSecondaryOk.json @@ -0,0 +1,100 @@ +{ + "description": "Non-stale topologyVersion proccessId changed NotPrimaryNoSecondaryOk error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion proccessId changed NotPrimaryNoSecondaryOk error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotPrimaryNoSecondaryOk", + "code": 13435, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotPrimaryOrSecondary.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotPrimaryOrSecondary.json new file mode 100644 index 0000000000..6d3b397566 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotPrimaryOrSecondary.json @@ -0,0 +1,100 @@ +{ + "description": "Non-stale topologyVersion proccessId changed NotPrimaryOrSecondary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion proccessId changed NotPrimaryOrSecondary error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotPrimaryOrSecondary", + "code": 13436, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotWritablePrimary.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotWritablePrimary.json new file mode 100644 index 0000000000..332ddf5ec1 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotWritablePrimary.json @@ -0,0 +1,100 @@ +{ + "description": "Non-stale topologyVersion proccessId changed NotWritablePrimary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion proccessId changed NotWritablePrimary error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotWritablePrimary", + "code": 10107, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-PrimarySteppedDown.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-PrimarySteppedDown.json new file mode 100644 index 0000000000..c22a537f58 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-PrimarySteppedDown.json @@ -0,0 +1,100 @@ +{ + "description": "Non-stale topologyVersion proccessId changed PrimarySteppedDown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion proccessId changed PrimarySteppedDown error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "PrimarySteppedDown", + "code": 189, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-ShutdownInProgress.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-ShutdownInProgress.json new file mode 100644 index 0000000000..eaaab79273 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-ShutdownInProgress.json @@ -0,0 +1,100 @@ +{ + "description": "Non-stale topologyVersion proccessId changed ShutdownInProgress error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion proccessId changed ShutdownInProgress error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "ShutdownInProgress", + "code": 91, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/post-42-InterruptedAtShutdown.json b/test/discovery_and_monitoring/errors/post-42-InterruptedAtShutdown.json new file mode 100644 index 0000000000..40c4ed6c80 --- /dev/null +++ b/test/discovery_and_monitoring/errors/post-42-InterruptedAtShutdown.json @@ -0,0 +1,70 @@ +{ + "description": "Post-4.2 InterruptedAtShutdown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 8 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Post-4.2 InterruptedAtShutdown error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 8, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedAtShutdown", + "code": 11600 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/post-42-InterruptedDueToReplStateChange.json b/test/discovery_and_monitoring/errors/post-42-InterruptedDueToReplStateChange.json new file mode 100644 index 0000000000..5c489f5ecb --- /dev/null +++ b/test/discovery_and_monitoring/errors/post-42-InterruptedDueToReplStateChange.json @@ -0,0 +1,70 @@ +{ + "description": "Post-4.2 InterruptedDueToReplStateChange error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 8 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Post-4.2 InterruptedDueToReplStateChange error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 8, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedDueToReplStateChange", + "code": 11602 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/post-42-LegacyNotPrimary.json b/test/discovery_and_monitoring/errors/post-42-LegacyNotPrimary.json new file mode 100644 index 0000000000..f0851b299e --- /dev/null +++ b/test/discovery_and_monitoring/errors/post-42-LegacyNotPrimary.json @@ -0,0 +1,70 @@ +{ + "description": "Post-4.2 LegacyNotPrimary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 8 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Post-4.2 LegacyNotPrimary error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 8, + "type": "command", + "response": { + "ok": 0, + "errmsg": "LegacyNotPrimary", + "code": 10058 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/post-42-NotPrimaryNoSecondaryOk.json b/test/discovery_and_monitoring/errors/post-42-NotPrimaryNoSecondaryOk.json new file mode 100644 index 0000000000..a675f0ca54 --- /dev/null +++ b/test/discovery_and_monitoring/errors/post-42-NotPrimaryNoSecondaryOk.json @@ -0,0 +1,70 @@ +{ + "description": "Post-4.2 NotPrimaryNoSecondaryOk error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 8 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Post-4.2 NotPrimaryNoSecondaryOk error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 8, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotPrimaryNoSecondaryOk", + "code": 13435 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/post-42-NotPrimaryOrSecondary.json b/test/discovery_and_monitoring/errors/post-42-NotPrimaryOrSecondary.json new file mode 100644 index 0000000000..ea9bf1d16b --- /dev/null +++ b/test/discovery_and_monitoring/errors/post-42-NotPrimaryOrSecondary.json @@ -0,0 +1,70 @@ +{ + "description": "Post-4.2 NotPrimaryOrSecondary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 8 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Post-4.2 NotPrimaryOrSecondary error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 8, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotPrimaryOrSecondary", + "code": 13436 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/post-42-NotWritablePrimary.json b/test/discovery_and_monitoring/errors/post-42-NotWritablePrimary.json new file mode 100644 index 0000000000..10211fca70 --- /dev/null +++ b/test/discovery_and_monitoring/errors/post-42-NotWritablePrimary.json @@ -0,0 +1,70 @@ +{ + "description": "Post-4.2 NotWritablePrimary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 8 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Post-4.2 NotWritablePrimary error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 8, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotWritablePrimary", + "code": 10107 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/post-42-PrimarySteppedDown.json b/test/discovery_and_monitoring/errors/post-42-PrimarySteppedDown.json new file mode 100644 index 0000000000..fa98d0bf06 --- /dev/null +++ b/test/discovery_and_monitoring/errors/post-42-PrimarySteppedDown.json @@ -0,0 +1,70 @@ +{ + "description": "Post-4.2 PrimarySteppedDown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 8 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Post-4.2 PrimarySteppedDown error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 8, + "type": "command", + "response": { + "ok": 0, + "errmsg": "PrimarySteppedDown", + "code": 189 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/post-42-ShutdownInProgress.json b/test/discovery_and_monitoring/errors/post-42-ShutdownInProgress.json new file mode 100644 index 0000000000..cd587205b6 --- /dev/null +++ b/test/discovery_and_monitoring/errors/post-42-ShutdownInProgress.json @@ -0,0 +1,70 @@ +{ + "description": "Post-4.2 ShutdownInProgress error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 8 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Post-4.2 ShutdownInProgress error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 8, + "type": "command", + "response": { + "ok": 0, + "errmsg": "ShutdownInProgress", + "code": 91 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/pre-42-InterruptedAtShutdown.json b/test/discovery_and_monitoring/errors/pre-42-InterruptedAtShutdown.json new file mode 100644 index 0000000000..9f6ea212e5 --- /dev/null +++ b/test/discovery_and_monitoring/errors/pre-42-InterruptedAtShutdown.json @@ -0,0 +1,70 @@ +{ + "description": "Pre-4.2 InterruptedAtShutdown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 7 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Pre-4.2 InterruptedAtShutdown error marks server Unknown and clears the pool", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 7, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedAtShutdown", + "code": 11600 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/pre-42-InterruptedDueToReplStateChange.json b/test/discovery_and_monitoring/errors/pre-42-InterruptedDueToReplStateChange.json new file mode 100644 index 0000000000..7e5f235713 --- /dev/null +++ b/test/discovery_and_monitoring/errors/pre-42-InterruptedDueToReplStateChange.json @@ -0,0 +1,70 @@ +{ + "description": "Pre-4.2 InterruptedDueToReplStateChange error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 7 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Pre-4.2 InterruptedDueToReplStateChange error marks server Unknown and clears the pool", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 7, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedDueToReplStateChange", + "code": 11602 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/pre-42-LegacyNotPrimary.json b/test/discovery_and_monitoring/errors/pre-42-LegacyNotPrimary.json new file mode 100644 index 0000000000..1635f1a856 --- /dev/null +++ b/test/discovery_and_monitoring/errors/pre-42-LegacyNotPrimary.json @@ -0,0 +1,70 @@ +{ + "description": "Pre-4.2 LegacyNotPrimary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 7 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Pre-4.2 LegacyNotPrimary error marks server Unknown and clears the pool", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 7, + "type": "command", + "response": { + "ok": 0, + "errmsg": "LegacyNotPrimary", + "code": 10058 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/pre-42-NotPrimaryNoSecondaryOk.json b/test/discovery_and_monitoring/errors/pre-42-NotPrimaryNoSecondaryOk.json new file mode 100644 index 0000000000..0e70ede02c --- /dev/null +++ b/test/discovery_and_monitoring/errors/pre-42-NotPrimaryNoSecondaryOk.json @@ -0,0 +1,70 @@ +{ + "description": "Pre-4.2 NotPrimaryNoSecondaryOk error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 7 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Pre-4.2 NotPrimaryNoSecondaryOk error marks server Unknown and clears the pool", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 7, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotPrimaryNoSecondaryOk", + "code": 13435 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/pre-42-NotPrimaryOrSecondary.json b/test/discovery_and_monitoring/errors/pre-42-NotPrimaryOrSecondary.json new file mode 100644 index 0000000000..3fefb21663 --- /dev/null +++ b/test/discovery_and_monitoring/errors/pre-42-NotPrimaryOrSecondary.json @@ -0,0 +1,70 @@ +{ + "description": "Pre-4.2 NotPrimaryOrSecondary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 7 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Pre-4.2 NotPrimaryOrSecondary error marks server Unknown and clears the pool", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 7, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotPrimaryOrSecondary", + "code": 13436 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/pre-42-NotWritablePrimary.json b/test/discovery_and_monitoring/errors/pre-42-NotWritablePrimary.json new file mode 100644 index 0000000000..d010da0a5b --- /dev/null +++ b/test/discovery_and_monitoring/errors/pre-42-NotWritablePrimary.json @@ -0,0 +1,70 @@ +{ + "description": "Pre-4.2 NotWritablePrimary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 7 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Pre-4.2 NotWritablePrimary error marks server Unknown and clears the pool", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 7, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotWritablePrimary", + "code": 10107 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/pre-42-PrimarySteppedDown.json b/test/discovery_and_monitoring/errors/pre-42-PrimarySteppedDown.json new file mode 100644 index 0000000000..02956d201d --- /dev/null +++ b/test/discovery_and_monitoring/errors/pre-42-PrimarySteppedDown.json @@ -0,0 +1,70 @@ +{ + "description": "Pre-4.2 PrimarySteppedDown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 7 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Pre-4.2 PrimarySteppedDown error marks server Unknown and clears the pool", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 7, + "type": "command", + "response": { + "ok": 0, + "errmsg": "PrimarySteppedDown", + "code": 189 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/pre-42-ShutdownInProgress.json b/test/discovery_and_monitoring/errors/pre-42-ShutdownInProgress.json new file mode 100644 index 0000000000..fc3a5aa6fe --- /dev/null +++ b/test/discovery_and_monitoring/errors/pre-42-ShutdownInProgress.json @@ -0,0 +1,70 @@ +{ + "description": "Pre-4.2 ShutdownInProgress error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 7 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Pre-4.2 ShutdownInProgress error marks server Unknown and clears the pool", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 7, + "type": "command", + "response": { + "ok": 0, + "errmsg": "ShutdownInProgress", + "code": 91 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/prefer-error-code.json b/test/discovery_and_monitoring/errors/prefer-error-code.json new file mode 100644 index 0000000000..eb00b69613 --- /dev/null +++ b/test/discovery_and_monitoring/errors/prefer-error-code.json @@ -0,0 +1,131 @@ +{ + "description": "Do not check errmsg when code exists", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "errmsg \"not master\" gets ignored when error code exists", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "not master", + "code": 1 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "errmsg \"node is recovering\" gets ignored when error code exists", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "node is recovering", + "code": 1 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-InterruptedAtShutdown.json b/test/discovery_and_monitoring/errors/stale-generation-InterruptedAtShutdown.json new file mode 100644 index 0000000000..2f7c7fd13b --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-InterruptedAtShutdown.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation InterruptedAtShutdown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale InterruptedAtShutdown error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedAtShutdown", + "code": 11600, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-InterruptedDueToReplStateChange.json b/test/discovery_and_monitoring/errors/stale-generation-InterruptedDueToReplStateChange.json new file mode 100644 index 0000000000..b0b51ef676 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-InterruptedDueToReplStateChange.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation InterruptedDueToReplStateChange error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale InterruptedDueToReplStateChange error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedDueToReplStateChange", + "code": 11602, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-NotPrimaryNoSecondaryOk.json b/test/discovery_and_monitoring/errors/stale-generation-NotPrimaryNoSecondaryOk.json new file mode 100644 index 0000000000..b68e23b7a7 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-NotPrimaryNoSecondaryOk.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation NotPrimaryNoSecondaryOk error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotPrimaryNoSecondaryOk error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotPrimaryNoSecondaryOk", + "code": 13435, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-NotPrimaryOrSecondary.json b/test/discovery_and_monitoring/errors/stale-generation-NotPrimaryOrSecondary.json new file mode 100644 index 0000000000..d9b3562654 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-NotPrimaryOrSecondary.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation NotPrimaryOrSecondary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotPrimaryOrSecondary error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotPrimaryOrSecondary", + "code": 13436, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-NotWritablePrimary.json b/test/discovery_and_monitoring/errors/stale-generation-NotWritablePrimary.json new file mode 100644 index 0000000000..90889356dd --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-NotWritablePrimary.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation NotWritablePrimary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotWritablePrimary error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotWritablePrimary", + "code": 10107, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-PrimarySteppedDown.json b/test/discovery_and_monitoring/errors/stale-generation-PrimarySteppedDown.json new file mode 100644 index 0000000000..0a707a1c07 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-PrimarySteppedDown.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation PrimarySteppedDown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale PrimarySteppedDown error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "PrimarySteppedDown", + "code": 189, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-ShutdownInProgress.json b/test/discovery_and_monitoring/errors/stale-generation-ShutdownInProgress.json new file mode 100644 index 0000000000..5da3413d5b --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-ShutdownInProgress.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation ShutdownInProgress error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale ShutdownInProgress error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "ShutdownInProgress", + "code": 91, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-InterruptedAtShutdown.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-InterruptedAtShutdown.json new file mode 100644 index 0000000000..d29310fb61 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-InterruptedAtShutdown.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation InterruptedAtShutdown error afterHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale InterruptedAtShutdown error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedAtShutdown", + "code": 11600, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-InterruptedDueToReplStateChange.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-InterruptedDueToReplStateChange.json new file mode 100644 index 0000000000..376bb93770 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-InterruptedDueToReplStateChange.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation InterruptedDueToReplStateChange error afterHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale InterruptedDueToReplStateChange error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedDueToReplStateChange", + "code": 11602, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-LegacyNotPrimary.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-LegacyNotPrimary.json new file mode 100644 index 0000000000..990fc45e4e --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-LegacyNotPrimary.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation LegacyNotPrimary error afterHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale LegacyNotPrimary error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "LegacyNotPrimary", + "code": 10058, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotPrimaryNoSecondaryOk.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotPrimaryNoSecondaryOk.json new file mode 100644 index 0000000000..1744a82f77 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotPrimaryNoSecondaryOk.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation NotPrimaryNoSecondaryOk error afterHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotPrimaryNoSecondaryOk error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotPrimaryNoSecondaryOk", + "code": 13435, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotPrimaryOrSecondary.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotPrimaryOrSecondary.json new file mode 100644 index 0000000000..57ca1cf158 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotPrimaryOrSecondary.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation NotPrimaryOrSecondary error afterHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotPrimaryOrSecondary error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotPrimaryOrSecondary", + "code": 13436, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotWritablePrimary.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotWritablePrimary.json new file mode 100644 index 0000000000..995453c82b --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotWritablePrimary.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation NotWritablePrimary error afterHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotWritablePrimary error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotWritablePrimary", + "code": 10107, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-PrimarySteppedDown.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-PrimarySteppedDown.json new file mode 100644 index 0000000000..bf4c85d24f --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-PrimarySteppedDown.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation PrimarySteppedDown error afterHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale PrimarySteppedDown error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "PrimarySteppedDown", + "code": 189, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-ShutdownInProgress.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-ShutdownInProgress.json new file mode 100644 index 0000000000..9374900e06 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-ShutdownInProgress.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation ShutdownInProgress error afterHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale ShutdownInProgress error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "ShutdownInProgress", + "code": 91, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-network.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-network.json new file mode 100644 index 0000000000..f5d01b6540 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-network.json @@ -0,0 +1,163 @@ +{ + "description": "Stale generation network error afterHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale network error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-timeout.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-timeout.json new file mode 100644 index 0000000000..fa84343b0b --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-timeout.json @@ -0,0 +1,163 @@ +{ + "description": "Stale generation timeout error afterHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale timeout error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "timeout" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-InterruptedAtShutdown.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-InterruptedAtShutdown.json new file mode 100644 index 0000000000..72fac9a86e --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-InterruptedAtShutdown.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation InterruptedAtShutdown error beforeHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale InterruptedAtShutdown error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "beforeHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedAtShutdown", + "code": 11600, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-InterruptedDueToReplStateChange.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-InterruptedDueToReplStateChange.json new file mode 100644 index 0000000000..3c713592a3 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-InterruptedDueToReplStateChange.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation InterruptedDueToReplStateChange error beforeHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale InterruptedDueToReplStateChange error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "beforeHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedDueToReplStateChange", + "code": 11602, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-LegacyNotPrimary.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-LegacyNotPrimary.json new file mode 100644 index 0000000000..257b6ec6fb --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-LegacyNotPrimary.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation LegacyNotPrimary error beforeHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale LegacyNotPrimary error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "beforeHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "LegacyNotPrimary", + "code": 10058, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotPrimaryNoSecondaryOk.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotPrimaryNoSecondaryOk.json new file mode 100644 index 0000000000..dcb5716f44 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotPrimaryNoSecondaryOk.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation NotPrimaryNoSecondaryOk error beforeHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotPrimaryNoSecondaryOk error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "beforeHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotPrimaryNoSecondaryOk", + "code": 13435, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotPrimaryOrSecondary.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotPrimaryOrSecondary.json new file mode 100644 index 0000000000..58cefafae9 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotPrimaryOrSecondary.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation NotPrimaryOrSecondary error beforeHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotPrimaryOrSecondary error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "beforeHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotPrimaryOrSecondary", + "code": 13436, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotWritablePrimary.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotWritablePrimary.json new file mode 100644 index 0000000000..c92b01e054 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotWritablePrimary.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation NotWritablePrimary error beforeHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotWritablePrimary error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "beforeHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotWritablePrimary", + "code": 10107, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-PrimarySteppedDown.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-PrimarySteppedDown.json new file mode 100644 index 0000000000..62759b6ad9 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-PrimarySteppedDown.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation PrimarySteppedDown error beforeHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale PrimarySteppedDown error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "beforeHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "PrimarySteppedDown", + "code": 189, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-ShutdownInProgress.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-ShutdownInProgress.json new file mode 100644 index 0000000000..4661632c4f --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-ShutdownInProgress.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation ShutdownInProgress error beforeHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale ShutdownInProgress error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "beforeHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "ShutdownInProgress", + "code": 91, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-network.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-network.json new file mode 100644 index 0000000000..15b044fc73 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-network.json @@ -0,0 +1,163 @@ +{ + "description": "Stale generation network error beforeHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale network error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "beforeHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-timeout.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-timeout.json new file mode 100644 index 0000000000..acbb9e581e --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-timeout.json @@ -0,0 +1,163 @@ +{ + "description": "Stale generation timeout error beforeHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale timeout error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "beforeHandshakeCompletes", + "maxWireVersion": 9, + "type": "timeout" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-topologyVersion-InterruptedAtShutdown.json b/test/discovery_and_monitoring/errors/stale-topologyVersion-InterruptedAtShutdown.json new file mode 100644 index 0000000000..f2207a04d5 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-topologyVersion-InterruptedAtShutdown.json @@ -0,0 +1,147 @@ +{ + "description": "Stale topologyVersion InterruptedAtShutdown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale InterruptedAtShutdown error (topologyVersion less)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedAtShutdown", + "code": 11600, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "0" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale InterruptedAtShutdown error (topologyVersion equal)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedAtShutdown", + "code": 11600, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-topologyVersion-InterruptedDueToReplStateChange.json b/test/discovery_and_monitoring/errors/stale-topologyVersion-InterruptedDueToReplStateChange.json new file mode 100644 index 0000000000..4387451ce6 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-topologyVersion-InterruptedDueToReplStateChange.json @@ -0,0 +1,147 @@ +{ + "description": "Stale topologyVersion InterruptedDueToReplStateChange error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale InterruptedDueToReplStateChange error (topologyVersion less)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedDueToReplStateChange", + "code": 11602, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "0" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale InterruptedDueToReplStateChange error (topologyVersion equal)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedDueToReplStateChange", + "code": 11602, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-topologyVersion-LegacyNotPrimary.json b/test/discovery_and_monitoring/errors/stale-topologyVersion-LegacyNotPrimary.json new file mode 100644 index 0000000000..8c0cf00f22 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-topologyVersion-LegacyNotPrimary.json @@ -0,0 +1,147 @@ +{ + "description": "Stale topologyVersion LegacyNotPrimary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale LegacyNotPrimary error (topologyVersion less)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "LegacyNotPrimary", + "code": 10058, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "0" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale LegacyNotPrimary error (topologyVersion equal)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "LegacyNotPrimary", + "code": 10058, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-topologyVersion-NotPrimaryNoSecondaryOk.json b/test/discovery_and_monitoring/errors/stale-topologyVersion-NotPrimaryNoSecondaryOk.json new file mode 100644 index 0000000000..99a828326c --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-topologyVersion-NotPrimaryNoSecondaryOk.json @@ -0,0 +1,147 @@ +{ + "description": "Stale topologyVersion NotPrimaryNoSecondaryOk error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotPrimaryNoSecondaryOk error (topologyVersion less)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotPrimaryNoSecondaryOk", + "code": 13435, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "0" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotPrimaryNoSecondaryOk error (topologyVersion equal)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotPrimaryNoSecondaryOk", + "code": 13435, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-topologyVersion-NotPrimaryOrSecondary.json b/test/discovery_and_monitoring/errors/stale-topologyVersion-NotPrimaryOrSecondary.json new file mode 100644 index 0000000000..ba2ea87106 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-topologyVersion-NotPrimaryOrSecondary.json @@ -0,0 +1,147 @@ +{ + "description": "Stale topologyVersion NotPrimaryOrSecondary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotPrimaryOrSecondary error (topologyVersion less)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotPrimaryOrSecondary", + "code": 13436, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "0" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotPrimaryOrSecondary error (topologyVersion equal)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotPrimaryOrSecondary", + "code": 13436, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-topologyVersion-NotWritablePrimary.json b/test/discovery_and_monitoring/errors/stale-topologyVersion-NotWritablePrimary.json new file mode 100644 index 0000000000..8edd317a73 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-topologyVersion-NotWritablePrimary.json @@ -0,0 +1,147 @@ +{ + "description": "Stale topologyVersion NotWritablePrimary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotWritablePrimary error (topologyVersion less)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotWritablePrimary", + "code": 10107, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "0" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotWritablePrimary error (topologyVersion equal)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotWritablePrimary", + "code": 10107, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-topologyVersion-PrimarySteppedDown.json b/test/discovery_and_monitoring/errors/stale-topologyVersion-PrimarySteppedDown.json new file mode 100644 index 0000000000..da8e4755eb --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-topologyVersion-PrimarySteppedDown.json @@ -0,0 +1,147 @@ +{ + "description": "Stale topologyVersion PrimarySteppedDown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale PrimarySteppedDown error (topologyVersion less)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "PrimarySteppedDown", + "code": 189, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "0" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale PrimarySteppedDown error (topologyVersion equal)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "PrimarySteppedDown", + "code": 189, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-topologyVersion-ShutdownInProgress.json b/test/discovery_and_monitoring/errors/stale-topologyVersion-ShutdownInProgress.json new file mode 100644 index 0000000000..aa252e1dc4 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-topologyVersion-ShutdownInProgress.json @@ -0,0 +1,147 @@ +{ + "description": "Stale topologyVersion ShutdownInProgress error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale ShutdownInProgress error (topologyVersion less)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "ShutdownInProgress", + "code": 91, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "0" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale ShutdownInProgress error (topologyVersion equal)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "ShutdownInProgress", + "code": 91, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/write_errors_ignored.json b/test/discovery_and_monitoring/errors/write_errors_ignored.json new file mode 100644 index 0000000000..b588807e08 --- /dev/null +++ b/test/discovery_and_monitoring/errors/write_errors_ignored.json @@ -0,0 +1,98 @@ +{ + "description": "writeErrors field is ignored", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore command error with writeErrors field", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 1, + "writeErrors": [ + { + "errmsg": "NotPrimaryNoSecondaryOk", + "code": 13435, + "index": 0 + } + ] + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/load-balanced/discover_load_balancer.json b/test/discovery_and_monitoring/load-balanced/discover_load_balancer.json new file mode 100644 index 0000000000..d2e34478e6 --- /dev/null +++ b/test/discovery_and_monitoring/load-balanced/discover_load_balancer.json @@ -0,0 +1,28 @@ +{ + "description": "Load balancer can be discovered and only has the address property set", + "uri": "mongodb://a/?loadBalanced=true", + "phases": [ + { + "outcome": { + "servers": { + "a:27017": { + "type": "LoadBalancer", + "setName": null, + "setVersion": null, + "electionId": null, + "logicalSessionTimeoutMinutes": null, + "minWireVersion": null, + "maxWireVersion": null, + "topologyVersion": null + } + }, + "topologyType": "LoadBalanced", + "setName": null, + "logicalSessionTimeoutMinutes": null, + "maxSetVersion": null, + "maxElectionId": null, + "compatible": true + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/compatible.json b/test/discovery_and_monitoring/rs/compatible.json index d670770f6d..444b13e9d5 100644 --- a/test/discovery_and_monitoring/rs/compatible.json +++ b/test/discovery_and_monitoring/rs/compatible.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", @@ -22,7 +23,8 @@ "b:27017", { "ok": 1, - "ismaster": false, + "helloOk": true, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "hosts": [ diff --git a/test/discovery_and_monitoring/rs/compatible_unknown.json b/test/discovery_and_monitoring/rs/compatible_unknown.json new file mode 100644 index 0000000000..cf92dd1ed3 --- /dev/null +++ b/test/discovery_and_monitoring/rs/compatible_unknown.json @@ -0,0 +1,40 @@ +{ + "description": "Replica set member and an unknown server", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "setName": "rs", + "logicalSessionTimeoutMinutes": null, + "compatible": true + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/discover_arbiters.json b/test/discovery_and_monitoring/rs/discover_arbiters.json index ced7baeb65..53709b0cee 100644 --- a/test/discovery_and_monitoring/rs/discover_arbiters.json +++ b/test/discovery_and_monitoring/rs/discover_arbiters.json @@ -1,6 +1,6 @@ { - "description": "Discover arbiters", - "uri": "mongodb://a/?replicaSet=rs", + "description": "Discover arbiters with directConnection URI option", + "uri": "mongodb://a/?directConnection=false", "phases": [ { "responses": [ @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/rs/discover_arbiters_replicaset.json b/test/discovery_and_monitoring/rs/discover_arbiters_replicaset.json new file mode 100644 index 0000000000..64fb49f4fc --- /dev/null +++ b/test/discovery_and_monitoring/rs/discover_arbiters_replicaset.json @@ -0,0 +1,42 @@ +{ + "description": "Discover arbiters with replicaSet URI option", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "arbiters": [ + "b:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/discover_ghost.json b/test/discovery_and_monitoring/rs/discover_ghost.json new file mode 100644 index 0000000000..2e24c83e0b --- /dev/null +++ b/test/discovery_and_monitoring/rs/discover_ghost.json @@ -0,0 +1,32 @@ +{ + "description": "Discover ghost with directConnection URI option", + "uri": "mongodb://b/?directConnection=false", + "phases": [ + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "isreplicaset": true, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "b:27017": { + "type": "RSGhost", + "setName": null + } + }, + "topologyType": "Unknown", + "logicalSessionTimeoutMinutes": null, + "setName": null + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/ghost_discovered.json b/test/discovery_and_monitoring/rs/discover_ghost_replicaset.json similarity index 83% rename from test/discovery_and_monitoring/rs/ghost_discovered.json rename to test/discovery_and_monitoring/rs/discover_ghost_replicaset.json index bf22cbb0eb..cf5fe83a54 100644 --- a/test/discovery_and_monitoring/rs/ghost_discovered.json +++ b/test/discovery_and_monitoring/rs/discover_ghost_replicaset.json @@ -1,5 +1,5 @@ { - "description": "Ghost discovered", + "description": "Discover ghost with replicaSet URI option", "uri": "mongodb://a,b/?replicaSet=rs", "phases": [ { @@ -8,7 +8,8 @@ "b:27017", { "ok": 1, - "ismaster": false, + "helloOk": true, + "isWritablePrimary": false, "isreplicaset": true, "minWireVersion": 0, "maxWireVersion": 6 diff --git a/test/discovery_and_monitoring/rs/discover_hidden.json b/test/discovery_and_monitoring/rs/discover_hidden.json new file mode 100644 index 0000000000..e4a90f1f9c --- /dev/null +++ b/test/discovery_and_monitoring/rs/discover_hidden.json @@ -0,0 +1,46 @@ +{ + "description": "Discover hidden with directConnection URI option", + "uri": "mongodb://a/?directConnection=false", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "hidden": true, + "hosts": [ + "c:27017", + "d:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSOther", + "setName": "rs" + }, + "c:27017": { + "type": "Unknown", + "setName": null + }, + "d:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/discover_hidden_replicaset.json b/test/discovery_and_monitoring/rs/discover_hidden_replicaset.json new file mode 100644 index 0000000000..04420596f0 --- /dev/null +++ b/test/discovery_and_monitoring/rs/discover_hidden_replicaset.json @@ -0,0 +1,46 @@ +{ + "description": "Discover hidden with replicaSet URI option", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "hidden": true, + "hosts": [ + "c:27017", + "d:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSOther", + "setName": "rs" + }, + "c:27017": { + "type": "Unknown", + "setName": null + }, + "d:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/discover_passives.json b/test/discovery_and_monitoring/rs/discover_passives.json index e46249d668..30258409f6 100644 --- a/test/discovery_and_monitoring/rs/discover_passives.json +++ b/test/discovery_and_monitoring/rs/discover_passives.json @@ -1,6 +1,6 @@ { - "description": "Discover passives", - "uri": "mongodb://a/?replicaSet=rs", + "description": "Discover passives with directConnection URI option", + "uri": "mongodb://a/?directConnection=false", "phases": [ { "responses": [ @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -43,7 +44,8 @@ "b:27017", { "ok": 1, - "ismaster": false, + "helloOk": true, + "isWritablePrimary": false, "secondary": true, "passive": true, "hosts": [ diff --git a/test/discovery_and_monitoring/rs/discover_passives_replicaset.json b/test/discovery_and_monitoring/rs/discover_passives_replicaset.json new file mode 100644 index 0000000000..266eaa5234 --- /dev/null +++ b/test/discovery_and_monitoring/rs/discover_passives_replicaset.json @@ -0,0 +1,80 @@ +{ + "description": "Discover passives with replicaSet URI option", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "passives": [ + "b:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "passive": true, + "hosts": [ + "a:27017" + ], + "passives": [ + "b:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "RSSecondary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/discover_primary.json b/test/discovery_and_monitoring/rs/discover_primary.json index ea2cce9b72..2d1292bbd4 100644 --- a/test/discovery_and_monitoring/rs/discover_primary.json +++ b/test/discovery_and_monitoring/rs/discover_primary.json @@ -1,6 +1,6 @@ { - "description": "Replica set discovery from primary", - "uri": "mongodb://a/?replicaSet=rs", + "description": "Discover primary with directConnection URI option", + "uri": "mongodb://a/?directConnection=false", "phases": [ { "responses": [ @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", diff --git a/test/discovery_and_monitoring/rs/discover_primary_replicaset.json b/test/discovery_and_monitoring/rs/discover_primary_replicaset.json new file mode 100644 index 0000000000..54dfefba5f --- /dev/null +++ b/test/discovery_and_monitoring/rs/discover_primary_replicaset.json @@ -0,0 +1,40 @@ +{ + "description": "Discover primary with replicaSet URI option", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/discover_rsother.json b/test/discovery_and_monitoring/rs/discover_rsother.json new file mode 100644 index 0000000000..4ab25667f0 --- /dev/null +++ b/test/discovery_and_monitoring/rs/discover_rsother.json @@ -0,0 +1,45 @@ +{ + "description": "Discover RSOther with directConnection URI option", + "uri": "mongodb://b/?directConnection=false", + "phases": [ + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": false, + "hosts": [ + "c:27017", + "d:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "b:27017": { + "type": "RSOther", + "setName": "rs" + }, + "c:27017": { + "type": "Unknown", + "setName": null + }, + "d:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/rsother_discovered.json b/test/discovery_and_monitoring/rs/discover_rsother_replicaset.json similarity index 86% rename from test/discovery_and_monitoring/rs/rsother_discovered.json rename to test/discovery_and_monitoring/rs/discover_rsother_replicaset.json index c575501d80..e3958d70ad 100644 --- a/test/discovery_and_monitoring/rs/rsother_discovered.json +++ b/test/discovery_and_monitoring/rs/discover_rsother_replicaset.json @@ -1,5 +1,5 @@ { - "description": "RSOther discovered", + "description": "Discover RSOther with replicaSet URI option", "uri": "mongodb://a,b/?replicaSet=rs", "phases": [ { @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": false, + "helloOk": true, + "isWritablePrimary": false, "secondary": true, "hidden": true, "hosts": [ @@ -24,7 +25,8 @@ "b:27017", { "ok": 1, - "ismaster": false, + "helloOk": true, + "isWritablePrimary": false, "secondary": false, "hosts": [ "c:27017", diff --git a/test/discovery_and_monitoring/rs/discover_secondary.json b/test/discovery_and_monitoring/rs/discover_secondary.json index 7210b3845c..22325d4e03 100644 --- a/test/discovery_and_monitoring/rs/discover_secondary.json +++ b/test/discovery_and_monitoring/rs/discover_secondary.json @@ -1,6 +1,6 @@ { - "description": "Replica set discovery from secondary", - "uri": "mongodb://b/?replicaSet=rs", + "description": "Discover secondary with directConnection URI option", + "uri": "mongodb://b/?directConnection=false", "phases": [ { "responses": [ @@ -8,7 +8,8 @@ "b:27017", { "ok": 1, - "ismaster": false, + "helloOk": true, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "hosts": [ diff --git a/test/discovery_and_monitoring/rs/discover_secondary_replicaset.json b/test/discovery_and_monitoring/rs/discover_secondary_replicaset.json new file mode 100644 index 0000000000..d903b6444d --- /dev/null +++ b/test/discovery_and_monitoring/rs/discover_secondary_replicaset.json @@ -0,0 +1,41 @@ +{ + "description": "Discover secondary with replicaSet URI option", + "uri": "mongodb://b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null + }, + "b:27017": { + "type": "RSSecondary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/discovery.json b/test/discovery_and_monitoring/rs/discovery.json index 57ed568e3b..50e1269223 100644 --- a/test/discovery_and_monitoring/rs/discovery.json +++ b/test/discovery_and_monitoring/rs/discovery.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": false, + "helloOk": true, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "hosts": [ @@ -47,7 +48,8 @@ "b:27017", { "ok": 1, - "ismaster": false, + "helloOk": true, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "primary": "d:27017", @@ -91,7 +93,8 @@ "d:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "b:27017", @@ -134,7 +137,8 @@ "c:27017", { "ok": 1, - "ismaster": false, + "helloOk": true, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "hosts": [ diff --git a/test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json b/test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json new file mode 100644 index 0000000000..2fcea2bf66 --- /dev/null +++ b/test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json @@ -0,0 +1,92 @@ +{ + "description": "ElectionId is considered higher precedence than setVersion", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 17 + } + ], + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 17 + } + ], + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + }, + "minWireVersion": 0, + "maxWireVersion": 17 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "setVersion": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/equal_electionids.json b/test/discovery_and_monitoring/rs/equal_electionids.json index 8a5aa8cd67..17df3207fa 100644 --- a/test/discovery_and_monitoring/rs/equal_electionids.json +++ b/test/discovery_and_monitoring/rs/equal_electionids.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -26,7 +27,8 @@ "b:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -60,7 +62,11 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000001" + } } } ] diff --git a/test/discovery_and_monitoring/rs/hosts_differ_from_seeds.json b/test/discovery_and_monitoring/rs/hosts_differ_from_seeds.json index a67db57d0c..4e02304c61 100644 --- a/test/discovery_and_monitoring/rs/hosts_differ_from_seeds.json +++ b/test/discovery_and_monitoring/rs/hosts_differ_from_seeds.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "b:27017" diff --git a/test/discovery_and_monitoring/rs/incompatible_arbiter.json b/test/discovery_and_monitoring/rs/incompatible_arbiter.json new file mode 100644 index 0000000000..f0539cb337 --- /dev/null +++ b/test/discovery_and_monitoring/rs/incompatible_arbiter.json @@ -0,0 +1,56 @@ +{ + "description": "Incompatible arbiter", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 6 + } + ], + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "arbiterOnly": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 1 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "RSArbiter", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "setName": "rs", + "logicalSessionTimeoutMinutes": null, + "compatible": false + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/incompatible_ghost.json b/test/discovery_and_monitoring/rs/incompatible_ghost.json new file mode 100644 index 0000000000..824e953f90 --- /dev/null +++ b/test/discovery_and_monitoring/rs/incompatible_ghost.json @@ -0,0 +1,51 @@ +{ + "description": "Incompatible ghost", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 6 + } + ], + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isreplicaset": true, + "minWireVersion": 0, + "maxWireVersion": 1 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "RSGhost", + "setName": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "setName": "rs", + "logicalSessionTimeoutMinutes": null, + "compatible": false + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/incompatible_other.json b/test/discovery_and_monitoring/rs/incompatible_other.json new file mode 100644 index 0000000000..6f301ef5de --- /dev/null +++ b/test/discovery_and_monitoring/rs/incompatible_other.json @@ -0,0 +1,56 @@ +{ + "description": "Incompatible other", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 6 + } + ], + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "hidden": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 1 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "RSOther", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "setName": "rs", + "logicalSessionTimeoutMinutes": null, + "compatible": false + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/ls_timeout.json b/test/discovery_and_monitoring/rs/ls_timeout.json index 6860742c9e..96389d3b76 100644 --- a/test/discovery_and_monitoring/rs/ls_timeout.json +++ b/test/discovery_and_monitoring/rs/ls_timeout.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017", @@ -53,7 +54,8 @@ "d:27017", { "ok": 1, - "ismaster": false, + "helloOk": true, + "isWritablePrimary": false, "isreplicaset": true, "minWireVersion": 0, "maxWireVersion": 6 @@ -90,7 +92,8 @@ "e:27017", { "ok": 1, - "ismaster": false, + "helloOk": true, + "isWritablePrimary": false, "hosts": [ "a:27017", "b:27017", @@ -136,7 +139,8 @@ "b:27017", { "ok": 1, - "ismaster": false, + "helloOk": true, + "isWritablePrimary": false, "secondary": true, "hosts": [ "a:27017", @@ -184,7 +188,8 @@ "c:27017", { "ok": 1, - "ismaster": false, + "helloOk": true, + "isWritablePrimary": false, "setName": "rs", "hidden": true, "logicalSessionTimeoutMinutes": 1, @@ -226,7 +231,8 @@ "b:27017", { "ok": 1, - "ismaster": false, + "helloOk": true, + "isWritablePrimary": false, "secondary": true, "hosts": [ "a:27017", diff --git a/test/discovery_and_monitoring/rs/member_reconfig.json b/test/discovery_and_monitoring/rs/member_reconfig.json index 336acff023..0e2c2c462e 100644 --- a/test/discovery_and_monitoring/rs/member_reconfig.json +++ b/test/discovery_and_monitoring/rs/member_reconfig.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", @@ -41,7 +42,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017" diff --git a/test/discovery_and_monitoring/rs/member_standalone.json b/test/discovery_and_monitoring/rs/member_standalone.json index a97dfabf52..0756003a89 100644 --- a/test/discovery_and_monitoring/rs/member_standalone.json +++ b/test/discovery_and_monitoring/rs/member_standalone.json @@ -8,7 +8,8 @@ "b:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "minWireVersion": 0, "maxWireVersion": 6 } @@ -32,7 +33,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017" diff --git a/test/discovery_and_monitoring/rs/new_primary.json b/test/discovery_and_monitoring/rs/new_primary.json index eb73b304bd..ed1a6245f9 100644 --- a/test/discovery_and_monitoring/rs/new_primary.json +++ b/test/discovery_and_monitoring/rs/new_primary.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", @@ -41,7 +42,8 @@ "b:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", diff --git a/test/discovery_and_monitoring/rs/new_primary_new_electionid.json b/test/discovery_and_monitoring/rs/new_primary_new_electionid.json index cd6c37cef7..ccb3a41f75 100644 --- a/test/discovery_and_monitoring/rs/new_primary_new_electionid.json +++ b/test/discovery_and_monitoring/rs/new_primary_new_electionid.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -41,7 +42,11 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000001" + } } }, { @@ -50,7 +55,8 @@ "b:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -83,7 +89,11 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } } }, { @@ -92,7 +102,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -125,7 +136,11 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } } } ] diff --git a/test/discovery_and_monitoring/rs/new_primary_new_setversion.json b/test/discovery_and_monitoring/rs/new_primary_new_setversion.json index c5828171d4..415a0f66aa 100644 --- a/test/discovery_and_monitoring/rs/new_primary_new_setversion.json +++ b/test/discovery_and_monitoring/rs/new_primary_new_setversion.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -41,7 +42,11 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000001" + } } }, { @@ -50,7 +55,8 @@ "b:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -83,7 +89,11 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000001" + } } }, { @@ -92,7 +102,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -125,7 +136,11 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000001" + } } } ] diff --git a/test/discovery_and_monitoring/rs/new_primary_wrong_set_name.json b/test/discovery_and_monitoring/rs/new_primary_wrong_set_name.json index 7be79d2d3c..d7b19cfe8f 100644 --- a/test/discovery_and_monitoring/rs/new_primary_wrong_set_name.json +++ b/test/discovery_and_monitoring/rs/new_primary_wrong_set_name.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -41,7 +42,8 @@ "b:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/rs/non_rs_member.json b/test/discovery_and_monitoring/rs/non_rs_member.json index 907c1651e0..538077ef09 100644 --- a/test/discovery_and_monitoring/rs/non_rs_member.json +++ b/test/discovery_and_monitoring/rs/non_rs_member.json @@ -8,6 +8,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "minWireVersion": 0, "maxWireVersion": 6 } diff --git a/test/discovery_and_monitoring/rs/normalize_case.json b/test/discovery_and_monitoring/rs/normalize_case.json index 4d0b0ae629..96a944f0c3 100644 --- a/test/discovery_and_monitoring/rs/normalize_case.json +++ b/test/discovery_and_monitoring/rs/normalize_case.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "A:27017" diff --git a/test/discovery_and_monitoring/rs/normalize_case_me.json b/test/discovery_and_monitoring/rs/normalize_case_me.json index e854e7fb43..ab1720cefc 100644 --- a/test/discovery_and_monitoring/rs/normalize_case_me.json +++ b/test/discovery_and_monitoring/rs/normalize_case_me.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "setName": "rs", "me": "A:27017", "hosts": [ @@ -51,7 +52,8 @@ "b:27017", { "ok": 1, - "ismaster": false, + "helloOk": true, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "me": "B:27017", diff --git a/test/discovery_and_monitoring/rs/null_election_id-pre-6.0.json b/test/discovery_and_monitoring/rs/null_election_id-pre-6.0.json new file mode 100644 index 0000000000..f1fa2e252e --- /dev/null +++ b/test/discovery_and_monitoring/rs/null_election_id-pre-6.0.json @@ -0,0 +1,203 @@ +{ + "description": "Pre 6.0 Primaries with and without electionIds", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017", + "c:27017" + ], + "setVersion": 1, + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "c:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1 + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017", + "c:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + }, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + }, + "c:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017", + "c:27017" + ], + "setVersion": 1, + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "c:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + }, + { + "responses": [ + [ + "c:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017", + "c:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "c:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/null_election_id.json b/test/discovery_and_monitoring/rs/null_election_id.json index d4348df442..8a99a78475 100644 --- a/test/discovery_and_monitoring/rs/null_election_id.json +++ b/test/discovery_and_monitoring/rs/null_election_id.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017", @@ -17,7 +18,7 @@ "setVersion": 1, "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], @@ -42,7 +43,8 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 1 } }, { @@ -51,7 +53,8 @@ "b:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017", @@ -63,7 +66,7 @@ "$oid": "000000000000000000000002" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], @@ -90,7 +93,11 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } } }, { @@ -99,7 +106,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017", @@ -108,23 +116,26 @@ "setVersion": 1, "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], "outcome": { "servers": { "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 1, - "electionId": null - }, - "b:27017": { "type": "Unknown", "setName": null, + "setVersion": null, "electionId": null }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + }, "c:27017": { "type": "Unknown", "setName": null, @@ -133,7 +144,11 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } } }, { @@ -142,7 +157,8 @@ "c:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017", @@ -154,23 +170,26 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], "outcome": { "servers": { "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 1, - "electionId": null - }, - "b:27017": { "type": "Unknown", "setName": null, + "setVersion": null, "electionId": null }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + }, "c:27017": { "type": "Unknown", "setName": null, @@ -179,7 +198,11 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } } } ] diff --git a/test/discovery_and_monitoring/rs/primary_becomes_ghost.json b/test/discovery_and_monitoring/rs/primary_becomes_ghost.json new file mode 100644 index 0000000000..9c54b39856 --- /dev/null +++ b/test/discovery_and_monitoring/rs/primary_becomes_ghost.json @@ -0,0 +1,61 @@ +{ + "description": "Primary becomes ghost", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "isreplicaset": true, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSGhost", + "setName": null + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/primary_becomes_mongos.json b/test/discovery_and_monitoring/rs/primary_becomes_mongos.json new file mode 100644 index 0000000000..ac416e57d5 --- /dev/null +++ b/test/discovery_and_monitoring/rs/primary_becomes_mongos.json @@ -0,0 +1,56 @@ +{ + "description": "Primary becomes mongos", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "msg": "isdbgrid", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": {}, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/primary_becomes_standalone.json b/test/discovery_and_monitoring/rs/primary_becomes_standalone.json index e35c75f4bc..a64524d0ca 100644 --- a/test/discovery_and_monitoring/rs/primary_becomes_standalone.json +++ b/test/discovery_and_monitoring/rs/primary_becomes_standalone.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/rs/primary_changes_set_name.json b/test/discovery_and_monitoring/rs/primary_changes_set_name.json index d008326123..bf70ca3014 100644 --- a/test/discovery_and_monitoring/rs/primary_changes_set_name.json +++ b/test/discovery_and_monitoring/rs/primary_changes_set_name.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -36,7 +37,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/rs/primary_disconnect.json b/test/discovery_and_monitoring/rs/primary_disconnect.json index 271ca5874e..3db854f085 100644 --- a/test/discovery_and_monitoring/rs/primary_disconnect.json +++ b/test/discovery_and_monitoring/rs/primary_disconnect.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/rs/primary_disconnect_electionid.json b/test/discovery_and_monitoring/rs/primary_disconnect_electionid.json index e81f299086..3a80b150fe 100644 --- a/test/discovery_and_monitoring/rs/primary_disconnect_electionid.json +++ b/test/discovery_and_monitoring/rs/primary_disconnect_electionid.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -26,7 +27,8 @@ "b:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -59,7 +61,11 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } } }, { @@ -84,7 +90,11 @@ }, "topologyType": "ReplicaSetNoPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } } }, { @@ -93,7 +103,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -123,7 +134,11 @@ }, "topologyType": "ReplicaSetNoPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } } }, { @@ -132,7 +147,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -165,7 +181,11 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000003" + } } }, { @@ -174,7 +194,8 @@ "b:27017", { "ok": 1, - "ismaster": false, + "helloOk": true, + "isWritablePrimary": false, "secondary": true, "hosts": [ "a:27017", @@ -203,7 +224,11 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000003" + } } } ] diff --git a/test/discovery_and_monitoring/rs/primary_disconnect_setversion.json b/test/discovery_and_monitoring/rs/primary_disconnect_setversion.json index d0e55c545a..32e03fb7d4 100644 --- a/test/discovery_and_monitoring/rs/primary_disconnect_setversion.json +++ b/test/discovery_and_monitoring/rs/primary_disconnect_setversion.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -26,7 +27,8 @@ "b:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -59,7 +61,11 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000001" + } } }, { @@ -84,7 +90,11 @@ }, "topologyType": "ReplicaSetNoPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000001" + } } }, { @@ -93,7 +103,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -123,7 +134,11 @@ }, "topologyType": "ReplicaSetNoPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000001" + } } }, { @@ -132,7 +147,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -165,7 +181,11 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000002" + } } }, { @@ -174,7 +194,8 @@ "b:27017", { "ok": 1, - "ismaster": false, + "helloOk": true, + "isWritablePrimary": false, "secondary": true, "hosts": [ "a:27017", @@ -203,7 +224,11 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000002" + } } } ] diff --git a/test/discovery_and_monitoring/rs/primary_hint_from_secondary_with_mismatched_me.json b/test/discovery_and_monitoring/rs/primary_hint_from_secondary_with_mismatched_me.json index 806fda37c3..bc02cc9571 100644 --- a/test/discovery_and_monitoring/rs/primary_hint_from_secondary_with_mismatched_me.json +++ b/test/discovery_and_monitoring/rs/primary_hint_from_secondary_with_mismatched_me.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": false, + "helloOk": true, + "isWritablePrimary": false, "secondary": true, "me": "c:27017", "hosts": [ @@ -39,7 +40,8 @@ "b:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "me": "b:27017", "hosts": [ "b:27017" diff --git a/test/discovery_and_monitoring/rs/primary_mismatched_me.json b/test/discovery_and_monitoring/rs/primary_mismatched_me.json index 8d18a6971f..2d2c0f40d8 100644 --- a/test/discovery_and_monitoring/rs/primary_mismatched_me.json +++ b/test/discovery_and_monitoring/rs/primary_mismatched_me.json @@ -26,7 +26,8 @@ "a:27017", "b:27017" ], - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "ok": 1, "setName": "rs", "minWireVersion": 0, diff --git a/test/discovery_and_monitoring/rs/primary_mismatched_me_not_removed.json b/test/discovery_and_monitoring/rs/primary_mismatched_me_not_removed.json new file mode 100644 index 0000000000..4c40093659 --- /dev/null +++ b/test/discovery_and_monitoring/rs/primary_mismatched_me_not_removed.json @@ -0,0 +1,79 @@ +{ + "description": "Primary mismatched me is not removed", + "uri": "mongodb://localhost:27017,localhost:27018/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "localhost:27017", + { + "ok": 1, + "hosts": [ + "localhost:27017", + "localhost:27018" + ], + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "primary": "localhost:27017", + "me": "a:27017", + "minWireVersion": 0, + "maxWireVersion": 7 + } + ] + ], + "outcome": { + "servers": { + "localhost:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "localhost:27018": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "localhost:27018", + { + "ok": 1, + "hosts": [ + "localhost:27017", + "localhost:27018" + ], + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "setName": "rs", + "primary": "localhost:27017", + "me": "localhost:27018", + "minWireVersion": 0, + "maxWireVersion": 7 + } + ] + ], + "outcome": { + "servers": { + "localhost:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "localhost:27018": { + "type": "RSSecondary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/primary_reports_new_member.json b/test/discovery_and_monitoring/rs/primary_reports_new_member.json index 6ed55ab3d1..ac0d9374f0 100644 --- a/test/discovery_and_monitoring/rs/primary_reports_new_member.json +++ b/test/discovery_and_monitoring/rs/primary_reports_new_member.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": false, + "helloOk": true, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "hosts": [ @@ -42,7 +43,8 @@ "b:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", @@ -75,7 +77,8 @@ "b:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", @@ -113,7 +116,8 @@ "c:27017", { "ok": 1, - "ismaster": false, + "helloOk": true, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "primary": "b:27017", diff --git a/test/discovery_and_monitoring/rs/primary_to_no_primary_mismatched_me.json b/test/discovery_and_monitoring/rs/primary_to_no_primary_mismatched_me.json index fdb250ffef..6dbd73dadc 100644 --- a/test/discovery_and_monitoring/rs/primary_to_no_primary_mismatched_me.json +++ b/test/discovery_and_monitoring/rs/primary_to_no_primary_mismatched_me.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -42,7 +43,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "c:27017", "d:27017" diff --git a/test/discovery_and_monitoring/rs/primary_wrong_set_name.json b/test/discovery_and_monitoring/rs/primary_wrong_set_name.json index eda4787173..cc0691fb8c 100644 --- a/test/discovery_and_monitoring/rs/primary_wrong_set_name.json +++ b/test/discovery_and_monitoring/rs/primary_wrong_set_name.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/rs/repeated.json b/test/discovery_and_monitoring/rs/repeated.json new file mode 100644 index 0000000000..610aeae0ac --- /dev/null +++ b/test/discovery_and_monitoring/rs/repeated.json @@ -0,0 +1,144 @@ +{ + "description": "Repeated isWritablePrimary response must be processed", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "hidden": true, + "hosts": [ + "a:27017", + "c:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSOther", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown" + }, + "c:27017": { + "type": "Unknown" + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "c:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSOther", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown" + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "hidden": true, + "hosts": [ + "a:27017", + "c:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSOther", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown" + }, + "c:27017": { + "type": "Unknown" + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "c:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "c:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSOther", + "setName": "rs" + }, + "c:27017": { + "type": "RSPrimary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/replicaset_rsnp.json b/test/discovery_and_monitoring/rs/replicaset_rsnp.json new file mode 100644 index 0000000000..3148e1c141 --- /dev/null +++ b/test/discovery_and_monitoring/rs/replicaset_rsnp.json @@ -0,0 +1,26 @@ +{ + "description": "replicaSet URI option causes starting topology to be RSNP", + "uri": "mongodb://a/?replicaSet=rs&directConnection=false", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": {}, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/response_from_removed.json b/test/discovery_and_monitoring/rs/response_from_removed.json index dd3562d7fc..87a66d9e72 100644 --- a/test/discovery_and_monitoring/rs/response_from_removed.json +++ b/test/discovery_and_monitoring/rs/response_from_removed.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017" @@ -36,7 +37,8 @@ "b:27017", { "ok": 1, - "ismaster": false, + "helloOk": true, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "hosts": [ diff --git a/test/discovery_and_monitoring/rs/sec_not_auth.json b/test/discovery_and_monitoring/rs/sec_not_auth.json index 7d5e700035..a39855e654 100644 --- a/test/discovery_and_monitoring/rs/sec_not_auth.json +++ b/test/discovery_and_monitoring/rs/sec_not_auth.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", @@ -22,7 +23,8 @@ "b:27017", { "ok": 1, - "ismaster": false, + "helloOk": true, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "hosts": [ diff --git a/test/discovery_and_monitoring/rs/secondary_ignore_ok_0-pre-6.0.json b/test/discovery_and_monitoring/rs/secondary_ignore_ok_0-pre-6.0.json new file mode 100644 index 0000000000..054425c84c --- /dev/null +++ b/test/discovery_and_monitoring/rs/secondary_ignore_ok_0-pre-6.0.json @@ -0,0 +1,83 @@ +{ + "description": "Pre 6.0 New primary", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 6 + } + ], + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "RSSecondary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 0, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json b/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json new file mode 100644 index 0000000000..ee9519930b --- /dev/null +++ b/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json @@ -0,0 +1,83 @@ +{ + "description": "Secondary ignored when ok is zero", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 6 + } + ], + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "RSSecondary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 0, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/secondary_mismatched_me.json b/test/discovery_and_monitoring/rs/secondary_mismatched_me.json index d2a70f6788..6f1b9b5986 100644 --- a/test/discovery_and_monitoring/rs/secondary_mismatched_me.json +++ b/test/discovery_and_monitoring/rs/secondary_mismatched_me.json @@ -1,5 +1,6 @@ { "description": "Secondary mismatched me", + "uri": "mongodb://localhost:27017/?replicaSet=rs", "phases": [ { "outcome": { @@ -26,7 +27,8 @@ "a:27017", "b:27017" ], - "ismaster": false, + "helloOk": true, + "isWritablePrimary": false, "ok": 1, "setName": "rs", "minWireVersion": 0, @@ -35,6 +37,5 @@ ] ] } - ], - "uri": "mongodb://localhost:27017/?replicaSet=rs" + ] } diff --git a/test/discovery_and_monitoring/rs/secondary_wrong_set_name.json b/test/discovery_and_monitoring/rs/secondary_wrong_set_name.json index 4c132b633e..8d2f152f59 100644 --- a/test/discovery_and_monitoring/rs/secondary_wrong_set_name.json +++ b/test/discovery_and_monitoring/rs/secondary_wrong_set_name.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": false, + "helloOk": true, + "isWritablePrimary": false, "secondary": true, "hosts": [ "a:27017" diff --git a/test/discovery_and_monitoring/rs/secondary_wrong_set_name_with_primary.json b/test/discovery_and_monitoring/rs/secondary_wrong_set_name_with_primary.json index 73cbab7c5d..b7ef2d6d6a 100644 --- a/test/discovery_and_monitoring/rs/secondary_wrong_set_name_with_primary.json +++ b/test/discovery_and_monitoring/rs/secondary_wrong_set_name_with_primary.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -41,7 +42,8 @@ "b:27017", { "ok": 1, - "ismaster": false, + "helloOk": true, + "isWritablePrimary": false, "secondary": true, "hosts": [ "a:27017", diff --git a/test/discovery_and_monitoring/rs/set_version_can_rollback.json b/test/discovery_and_monitoring/rs/set_version_can_rollback.json new file mode 100644 index 0000000000..1cc608a344 --- /dev/null +++ b/test/discovery_and_monitoring/rs/set_version_can_rollback.json @@ -0,0 +1,147 @@ +{ + "description": "Set version rolls back after new primary with higher election Id", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 17 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2, + "electionId": { + "$oid": "000000000000000000000001" + } + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000001" + } + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + }, + "minWireVersion": 0, + "maxWireVersion": 17 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 17 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json b/test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json new file mode 100644 index 0000000000..3669511c5a --- /dev/null +++ b/test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json @@ -0,0 +1,84 @@ +{ + "description": "setVersion version that is equal is treated the same as greater than if there is no electionId", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "minWireVersion": 0, + "maxWireVersion": 17 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1 + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "minWireVersion": 0, + "maxWireVersion": 17 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1 + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json b/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json new file mode 100644 index 0000000000..97870d71d5 --- /dev/null +++ b/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json @@ -0,0 +1,84 @@ +{ + "description": "setVersion that is greater than maxSetVersion is used if there is no electionId", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "minWireVersion": 0, + "maxWireVersion": 17 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1 + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "minWireVersion": 0, + "maxWireVersion": 17 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2 + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/setversion_without_electionid-pre-6.0.json b/test/discovery_and_monitoring/rs/setversion_without_electionid-pre-6.0.json new file mode 100644 index 0000000000..c2e2fe5b9b --- /dev/null +++ b/test/discovery_and_monitoring/rs/setversion_without_electionid-pre-6.0.json @@ -0,0 +1,84 @@ +{ + "description": "Pre 6.0 setVersion is ignored if there is no electionId", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2 + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2 + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/setversion_without_electionid.json b/test/discovery_and_monitoring/rs/setversion_without_electionid.json index dbd9765d2f..256fafe108 100644 --- a/test/discovery_and_monitoring/rs/setversion_without_electionid.json +++ b/test/discovery_and_monitoring/rs/setversion_without_electionid.json @@ -1,5 +1,5 @@ { - "description": "setVersion is ignored if there is no electionId", + "description": "setVersion that is less than maxSetVersion is ignored if there is no electionId", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -16,7 +17,7 @@ "setName": "rs", "setVersion": 2, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], @@ -36,7 +37,8 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 2 } }, { @@ -45,7 +47,8 @@ "b:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -53,27 +56,28 @@ "setName": "rs", "setVersion": 1, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], "outcome": { "servers": { "a:27017": { - "type": "Unknown", - "setName": null, + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2, "electionId": null }, "b:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 1, + "type": "Unknown", + "setName": null, "electionId": null } }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 2 } } ] diff --git a/test/discovery_and_monitoring/rs/stepdown_change_set_name.json b/test/discovery_and_monitoring/rs/stepdown_change_set_name.json index 39a4f532dd..e9075f97f2 100644 --- a/test/discovery_and_monitoring/rs/stepdown_change_set_name.json +++ b/test/discovery_and_monitoring/rs/stepdown_change_set_name.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -36,7 +37,8 @@ "a:27017", { "ok": 1, - "ismaster": false, + "helloOk": true, + "isWritablePrimary": false, "secondary": true, "hosts": [ "a:27017" diff --git a/test/discovery_and_monitoring/rs/too_new.json b/test/discovery_and_monitoring/rs/too_new.json index 945145af88..0433d27a36 100644 --- a/test/discovery_and_monitoring/rs/too_new.json +++ b/test/discovery_and_monitoring/rs/too_new.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", @@ -22,7 +23,8 @@ "b:27017", { "ok": 1, - "ismaster": false, + "helloOk": true, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "hosts": [ diff --git a/test/discovery_and_monitoring/rs/too_old.json b/test/discovery_and_monitoring/rs/too_old.json index 3f9eadc4bc..461d00acc4 100644 --- a/test/discovery_and_monitoring/rs/too_old.json +++ b/test/discovery_and_monitoring/rs/too_old.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", @@ -22,7 +23,8 @@ "b:27017", { "ok": 1, - "ismaster": false, + "helloOk": true, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "hosts": [ diff --git a/test/discovery_and_monitoring/rs/topology_version_equal.json b/test/discovery_and_monitoring/rs/topology_version_equal.json new file mode 100644 index 0000000000..d3baa13479 --- /dev/null +++ b/test/discovery_and_monitoring/rs/topology_version_equal.json @@ -0,0 +1,101 @@ +{ + "description": "Primary with equal topologyVersion", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + }, + "b:27017": { + "type": "Unknown", + "topologyVersion": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/topology_version_greater.json b/test/discovery_and_monitoring/rs/topology_version_greater.json new file mode 100644 index 0000000000..f296ccee62 --- /dev/null +++ b/test/discovery_and_monitoring/rs/topology_version_greater.json @@ -0,0 +1,259 @@ +{ + "description": "Primary with newer topologyVersion", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + }, + "b:27017": { + "type": "Unknown", + "topologyVersion": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "c:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "0" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "0" + } + } + }, + "c:27017": { + "type": "Unknown", + "topologyVersion": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "d:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null + }, + "d:27017": { + "type": "Unknown", + "topologyVersion": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "e:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000003" + }, + "counter": { + "$numberLong": "0" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000003" + }, + "counter": { + "$numberLong": "0" + } + } + }, + "e:27017": { + "type": "Unknown", + "topologyVersion": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + {} + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null + }, + "e:27017": { + "type": "Unknown", + "topologyVersion": null + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/topology_version_less.json b/test/discovery_and_monitoring/rs/topology_version_less.json new file mode 100644 index 0000000000..435337ff25 --- /dev/null +++ b/test/discovery_and_monitoring/rs/topology_version_less.json @@ -0,0 +1,97 @@ +{ + "description": "Primary with older topologyVersion", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "0" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/unexpected_mongos.json b/test/discovery_and_monitoring/rs/unexpected_mongos.json index 95c7aa9dce..cc19a961f2 100644 --- a/test/discovery_and_monitoring/rs/unexpected_mongos.json +++ b/test/discovery_and_monitoring/rs/unexpected_mongos.json @@ -8,7 +8,8 @@ "b:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, "maxWireVersion": 6 diff --git a/test/discovery_and_monitoring/rs/use_setversion_without_electionid-pre-6.0.json b/test/discovery_and_monitoring/rs/use_setversion_without_electionid-pre-6.0.json new file mode 100644 index 0000000000..5c58b65614 --- /dev/null +++ b/test/discovery_and_monitoring/rs/use_setversion_without_electionid-pre-6.0.json @@ -0,0 +1,138 @@ +{ + "description": "Pre 6.0 Record max setVersion, even from primary without electionId", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + } + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000001" + } + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2 + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000001" + } + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + }, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2 + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000001" + } + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json b/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json index 19e1727bf3..551f3e12c2 100644 --- a/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json +++ b/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -19,7 +20,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], @@ -41,7 +42,11 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000001" + } } }, { @@ -50,7 +55,8 @@ "b:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -58,26 +64,33 @@ "setName": "rs", "setVersion": 2, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], "outcome": { "servers": { "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + } + }, + "b:27017": { "type": "Unknown", "setName": null, "electionId": null - }, - "b:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 2 } }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000001" + } } }, { @@ -86,7 +99,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -97,26 +111,33 @@ "$oid": "000000000000000000000002" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], "outcome": { "servers": { "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + }, + "b:27017": { "type": "Unknown", "setName": null, "electionId": null - }, - "b:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 2 } }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } } } ] diff --git a/test/discovery_and_monitoring/rs/wrong_set_name.json b/test/discovery_and_monitoring/rs/wrong_set_name.json index 45be2f502b..9654ff7b79 100644 --- a/test/discovery_and_monitoring/rs/wrong_set_name.json +++ b/test/discovery_and_monitoring/rs/wrong_set_name.json @@ -8,7 +8,8 @@ "b:27017", { "ok": 1, - "ismaster": false, + "helloOk": true, + "isWritablePrimary": false, "secondary": true, "hosts": [ "b:27017", diff --git a/test/discovery_and_monitoring/sharded/compatible.json b/test/discovery_and_monitoring/sharded/compatible.json index 3dae1f7ea1..e531db97f9 100644 --- a/test/discovery_and_monitoring/sharded/compatible.json +++ b/test/discovery_and_monitoring/sharded/compatible.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, "maxWireVersion": 1000 @@ -18,7 +19,8 @@ "b:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, "maxWireVersion": 6 diff --git a/test/discovery_and_monitoring/sharded/discover_single_mongos.json b/test/discovery_and_monitoring/sharded/discover_single_mongos.json new file mode 100644 index 0000000000..9e877a0840 --- /dev/null +++ b/test/discovery_and_monitoring/sharded/discover_single_mongos.json @@ -0,0 +1,31 @@ +{ + "description": "Discover single mongos", + "uri": "mongodb://a/?directConnection=false", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "msg": "isdbgrid", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Mongos", + "setName": null + } + }, + "topologyType": "Sharded", + "setName": null + } + } + ] +} diff --git a/test/discovery_and_monitoring/sharded/ls_timeout_mongos.json b/test/discovery_and_monitoring/sharded/ls_timeout_mongos.json index 96f8dec17a..93fa398d52 100644 --- a/test/discovery_and_monitoring/sharded/ls_timeout_mongos.json +++ b/test/discovery_and_monitoring/sharded/ls_timeout_mongos.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "msg": "isdbgrid", "logicalSessionTimeoutMinutes": 1, "minWireVersion": 0, @@ -19,7 +20,8 @@ "b:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "msg": "isdbgrid", "logicalSessionTimeoutMinutes": 2, "minWireVersion": 0, @@ -49,7 +51,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "msg": "isdbgrid", "logicalSessionTimeoutMinutes": 1, "minWireVersion": 0, @@ -60,7 +63,8 @@ "b:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, "maxWireVersion": 6 diff --git a/test/discovery_and_monitoring/sharded/mongos_disconnect.json b/test/discovery_and_monitoring/sharded/mongos_disconnect.json index 04015694a8..50a93eda5f 100644 --- a/test/discovery_and_monitoring/sharded/mongos_disconnect.json +++ b/test/discovery_and_monitoring/sharded/mongos_disconnect.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, "maxWireVersion": 6 @@ -18,7 +19,8 @@ "b:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, "maxWireVersion": 6 @@ -70,7 +72,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, "maxWireVersion": 6 diff --git a/test/discovery_and_monitoring/sharded/multiple_mongoses.json b/test/discovery_and_monitoring/sharded/multiple_mongoses.json index 6e60fd05c7..311592d715 100644 --- a/test/discovery_and_monitoring/sharded/multiple_mongoses.json +++ b/test/discovery_and_monitoring/sharded/multiple_mongoses.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, "maxWireVersion": 6 @@ -18,7 +19,8 @@ "b:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, "maxWireVersion": 6 diff --git a/test/discovery_and_monitoring/sharded/non_mongos_removed.json b/test/discovery_and_monitoring/sharded/non_mongos_removed.json index 58cf7c07d7..d74375ebbf 100644 --- a/test/discovery_and_monitoring/sharded/non_mongos_removed.json +++ b/test/discovery_and_monitoring/sharded/non_mongos_removed.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, "maxWireVersion": 6 @@ -18,7 +19,8 @@ "b:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "b:27017" ], diff --git a/test/discovery_and_monitoring/sharded/too_new.json b/test/discovery_and_monitoring/sharded/too_new.json index 9521e11789..4b997d2163 100644 --- a/test/discovery_and_monitoring/sharded/too_new.json +++ b/test/discovery_and_monitoring/sharded/too_new.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 999, "maxWireVersion": 1000 @@ -18,7 +19,8 @@ "b:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "msg": "isdbgrid" } ] diff --git a/test/discovery_and_monitoring/sharded/too_old.json b/test/discovery_and_monitoring/sharded/too_old.json index 6bd187f61d..688e1db0f5 100644 --- a/test/discovery_and_monitoring/sharded/too_old.json +++ b/test/discovery_and_monitoring/sharded/too_old.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 2, "maxWireVersion": 6 @@ -18,7 +19,8 @@ "b:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "msg": "isdbgrid" } ] diff --git a/test/discovery_and_monitoring/single/compatible.json b/test/discovery_and_monitoring/single/compatible.json index ee6b847ade..302927598c 100644 --- a/test/discovery_and_monitoring/single/compatible.json +++ b/test/discovery_and_monitoring/single/compatible.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "minWireVersion": 0, "maxWireVersion": 6 } diff --git a/test/discovery_and_monitoring/single/direct_connection_external_ip.json b/test/discovery_and_monitoring/single/direct_connection_external_ip.json index 4458150186..90676a8f9b 100644 --- a/test/discovery_and_monitoring/single/direct_connection_external_ip.json +++ b/test/discovery_and_monitoring/single/direct_connection_external_ip.json @@ -1,6 +1,6 @@ { "description": "Direct connection to RSPrimary via external IP", - "uri": "mongodb://a", + "uri": "mongodb://a/?directConnection=true", "phases": [ { "responses": [ @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "b:27017" ], diff --git a/test/discovery_and_monitoring/single/direct_connection_mongos.json b/test/discovery_and_monitoring/single/direct_connection_mongos.json index a7fa079490..25fe965185 100644 --- a/test/discovery_and_monitoring/single/direct_connection_mongos.json +++ b/test/discovery_and_monitoring/single/direct_connection_mongos.json @@ -1,6 +1,6 @@ { - "description": "Connect to mongos", - "uri": "mongodb://a", + "description": "Direct connection to mongos", + "uri": "mongodb://a/?directConnection=true", "phases": [ { "responses": [ @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, "maxWireVersion": 6 diff --git a/test/discovery_and_monitoring/single/direct_connection_replicaset.json b/test/discovery_and_monitoring/single/direct_connection_replicaset.json new file mode 100644 index 0000000000..cd8660888a --- /dev/null +++ b/test/discovery_and_monitoring/single/direct_connection_replicaset.json @@ -0,0 +1,32 @@ +{ + "description": "Direct connection with replicaSet URI option", + "uri": "mongodb://a/?replicaSet=rs&directConnection=true", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + } + }, + "topologyType": "Single", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/single/direct_connection_rsarbiter.json b/test/discovery_and_monitoring/single/direct_connection_rsarbiter.json index 3ef374d6f1..e204956056 100644 --- a/test/discovery_and_monitoring/single/direct_connection_rsarbiter.json +++ b/test/discovery_and_monitoring/single/direct_connection_rsarbiter.json @@ -1,6 +1,6 @@ { - "description": "Connect to RSArbiter", - "uri": "mongodb://a", + "description": "Direct connection to RSArbiter", + "uri": "mongodb://a/?directConnection=true", "phases": [ { "responses": [ @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": false, + "helloOk": true, + "isWritablePrimary": false, "arbiterOnly": true, "hosts": [ "a:27017", diff --git a/test/discovery_and_monitoring/single/direct_connection_rsprimary.json b/test/discovery_and_monitoring/single/direct_connection_rsprimary.json index bd5aaf7f04..409e8502b3 100644 --- a/test/discovery_and_monitoring/single/direct_connection_rsprimary.json +++ b/test/discovery_and_monitoring/single/direct_connection_rsprimary.json @@ -1,6 +1,6 @@ { - "description": "Connect to RSPrimary", - "uri": "mongodb://a", + "description": "Direct connection to RSPrimary", + "uri": "mongodb://a/?directConnection=true", "phases": [ { "responses": [ @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" diff --git a/test/discovery_and_monitoring/single/direct_connection_rssecondary.json b/test/discovery_and_monitoring/single/direct_connection_rssecondary.json index 3b4f3c8c5a..305f283b52 100644 --- a/test/discovery_and_monitoring/single/direct_connection_rssecondary.json +++ b/test/discovery_and_monitoring/single/direct_connection_rssecondary.json @@ -1,6 +1,6 @@ { - "description": "Connect to RSSecondary", - "uri": "mongodb://a", + "description": "Direct connection to RSSecondary", + "uri": "mongodb://a/?directConnection=true", "phases": [ { "responses": [ @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": false, + "helloOk": true, + "isWritablePrimary": false, "secondary": true, "hosts": [ "a:27017", diff --git a/test/discovery_and_monitoring/single/direct_connection_standalone.json b/test/discovery_and_monitoring/single/direct_connection_standalone.json index 2ecff9b9ae..b47278482a 100644 --- a/test/discovery_and_monitoring/single/direct_connection_standalone.json +++ b/test/discovery_and_monitoring/single/direct_connection_standalone.json @@ -1,6 +1,6 @@ { - "description": "Connect to standalone", - "uri": "mongodb://a", + "description": "Direct connection to standalone", + "uri": "mongodb://a/?directConnection=true", "phases": [ { "responses": [ @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "minWireVersion": 0, "maxWireVersion": 6 } diff --git a/test/discovery_and_monitoring/single/unavailable_seed.json b/test/discovery_and_monitoring/single/direct_connection_unavailable_seed.json similarity index 78% rename from test/discovery_and_monitoring/single/unavailable_seed.json rename to test/discovery_and_monitoring/single/direct_connection_unavailable_seed.json index e9cce02ebf..16f2735da5 100644 --- a/test/discovery_and_monitoring/single/unavailable_seed.json +++ b/test/discovery_and_monitoring/single/direct_connection_unavailable_seed.json @@ -1,6 +1,6 @@ { - "description": "Unavailable seed", - "uri": "mongodb://a", + "description": "Direct connection to unavailable seed", + "uri": "mongodb://a/?directConnection=true", "phases": [ { "responses": [ diff --git a/test/discovery_and_monitoring/single/direct_connection_wrong_set_name.json b/test/discovery_and_monitoring/single/direct_connection_wrong_set_name.json new file mode 100644 index 0000000000..71080e6810 --- /dev/null +++ b/test/discovery_and_monitoring/single/direct_connection_wrong_set_name.json @@ -0,0 +1,65 @@ +{ + "description": "Direct connection to RSPrimary with wrong set name", + "uri": "mongodb://a/?directConnection=true&replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "wrong", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown" + } + }, + "topologyType": "Single", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + } + }, + "topologyType": "Single", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/single/discover_standalone.json b/test/discovery_and_monitoring/single/discover_standalone.json new file mode 100644 index 0000000000..858cbdaf63 --- /dev/null +++ b/test/discovery_and_monitoring/single/discover_standalone.json @@ -0,0 +1,31 @@ +{ + "description": "Discover standalone", + "uri": "mongodb://a/?directConnection=false", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Standalone", + "setName": null + } + }, + "topologyType": "Single", + "logicalSessionTimeoutMinutes": null, + "setName": null + } + } + ] +} diff --git a/test/discovery_and_monitoring/single/discover_unavailable_seed.json b/test/discovery_and_monitoring/single/discover_unavailable_seed.json new file mode 100644 index 0000000000..b1f306c2be --- /dev/null +++ b/test/discovery_and_monitoring/single/discover_unavailable_seed.json @@ -0,0 +1,25 @@ +{ + "description": "Discover unavailable seed", + "uri": "mongodb://a/?directConnection=false", + "phases": [ + { + "responses": [ + [ + "a:27017", + {} + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "Unknown", + "logicalSessionTimeoutMinutes": null, + "setName": null + } + } + ] +} diff --git a/test/discovery_and_monitoring/single/ls_timeout_standalone.json b/test/discovery_and_monitoring/single/ls_timeout_standalone.json index ae6c8ba11b..87b3e4e8a1 100644 --- a/test/discovery_and_monitoring/single/ls_timeout_standalone.json +++ b/test/discovery_and_monitoring/single/ls_timeout_standalone.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "logicalSessionTimeoutMinutes": 7, "minWireVersion": 0, "maxWireVersion": 6 diff --git a/test/discovery_and_monitoring/single/not_ok_response.json b/test/discovery_and_monitoring/single/not_ok_response.json index 06f71305dc..8e7c2a10e3 100644 --- a/test/discovery_and_monitoring/single/not_ok_response.json +++ b/test/discovery_and_monitoring/single/not_ok_response.json @@ -1,5 +1,5 @@ { - "description": "Handle a not-ok ismaster response", + "description": "Handle a not-ok isWritablePrimary response", "uri": "mongodb://a", "phases": [ { @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "minWireVersion": 0, "maxWireVersion": 6 } @@ -17,7 +18,8 @@ "a:27017", { "ok": 0, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "minWireVersion": 0, "maxWireVersion": 6 } diff --git a/test/discovery_and_monitoring/single/standalone_removed.json b/test/discovery_and_monitoring/single/standalone_removed.json index 4c363ffffb..57f8f861b1 100644 --- a/test/discovery_and_monitoring/single/standalone_removed.json +++ b/test/discovery_and_monitoring/single/standalone_removed.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "minWireVersion": 0, "maxWireVersion": 6 } diff --git a/test/discovery_and_monitoring/single/direct_connection_slave.json b/test/discovery_and_monitoring/single/standalone_using_legacy_hello.json similarity index 84% rename from test/discovery_and_monitoring/single/direct_connection_slave.json rename to test/discovery_and_monitoring/single/standalone_using_legacy_hello.json index a40debd183..46660fa8de 100644 --- a/test/discovery_and_monitoring/single/direct_connection_slave.json +++ b/test/discovery_and_monitoring/single/standalone_using_legacy_hello.json @@ -1,5 +1,5 @@ { - "description": "Direct connection to slave", + "description": "Connect to standalone using legacy hello", "uri": "mongodb://a", "phases": [ { @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": false, + "ismaster": true, "minWireVersion": 0, "maxWireVersion": 6 } diff --git a/test/discovery_and_monitoring/single/too_new.json b/test/discovery_and_monitoring/single/too_new.json index 38e4621d60..8dd57d3348 100644 --- a/test/discovery_and_monitoring/single/too_new.json +++ b/test/discovery_and_monitoring/single/too_new.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "minWireVersion": 999, "maxWireVersion": 1000 } diff --git a/test/discovery_and_monitoring/single/too_old.json b/test/discovery_and_monitoring/single/too_old.json index fbf68262c0..8c027e01db 100644 --- a/test/discovery_and_monitoring/single/too_old.json +++ b/test/discovery_and_monitoring/single/too_old.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true + "helloOk": true, + "isWritablePrimary": true } ] ], diff --git a/test/discovery_and_monitoring/single/too_old_then_upgraded.json b/test/discovery_and_monitoring/single/too_old_then_upgraded.json new file mode 100644 index 0000000000..58ae7d9de4 --- /dev/null +++ b/test/discovery_and_monitoring/single/too_old_then_upgraded.json @@ -0,0 +1,56 @@ +{ + "description": "Standalone with default maxWireVersion of 0 is upgraded to one with maxWireVersion 6", + "uri": "mongodb://a", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Standalone", + "setName": null + } + }, + "topologyType": "Single", + "logicalSessionTimeoutMinutes": null, + "setName": null, + "compatible": false + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Standalone", + "setName": null + } + }, + "topologyType": "Single", + "logicalSessionTimeoutMinutes": null, + "setName": null, + "compatible": true + } + } + ] +} diff --git a/test/discovery_and_monitoring/unified/auth-error.json b/test/discovery_and_monitoring/unified/auth-error.json new file mode 100644 index 0000000000..62d26494c7 --- /dev/null +++ b/test/discovery_and_monitoring/unified/auth-error.json @@ -0,0 +1,230 @@ +{ + "description": "auth-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "auth": true, + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "auth-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Reset server and pool after AuthenticationFailure error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "appName": "authErrorTest", + "errorCode": 18 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "appname": "authErrorTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "auth-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "auth-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "auth-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/auth-misc-command-error.json b/test/discovery_and_monitoring/unified/auth-misc-command-error.json new file mode 100644 index 0000000000..fd62fe604e --- /dev/null +++ b/test/discovery_and_monitoring/unified/auth-misc-command-error.json @@ -0,0 +1,230 @@ +{ + "description": "auth-misc-command-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "auth": true, + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "auth-misc-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Reset server and pool after misc command error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "appName": "authMiscErrorTest", + "errorCode": 1 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "appname": "authMiscErrorTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "auth-misc-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "auth-misc-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "auth-misc-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/auth-network-error.json b/test/discovery_and_monitoring/unified/auth-network-error.json new file mode 100644 index 0000000000..84763af32e --- /dev/null +++ b/test/discovery_and_monitoring/unified/auth-network-error.json @@ -0,0 +1,230 @@ +{ + "description": "auth-network-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "auth": true, + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "auth-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Reset server and pool after network error during authentication", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "closeConnection": true, + "appName": "authNetworkErrorTest" + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "appname": "authNetworkErrorTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "auth-network-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "auth-network-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "auth-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/auth-network-timeout-error.json b/test/discovery_and_monitoring/unified/auth-network-timeout-error.json new file mode 100644 index 0000000000..3cf9576eba --- /dev/null +++ b/test/discovery_and_monitoring/unified/auth-network-timeout-error.json @@ -0,0 +1,233 @@ +{ + "description": "auth-network-timeout-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "auth": true, + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "auth-network-timeout-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Reset server and pool after network timeout error during authentication", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "blockConnection": true, + "blockTimeMS": 500, + "appName": "authNetworkTimeoutErrorTest" + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "appname": "authNetworkTimeoutErrorTest", + "connectTimeoutMS": 250, + "socketTimeoutMS": 250 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "auth-network-timeout-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "auth-network-timeout-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "auth-network-timeout-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/auth-shutdown-error.json b/test/discovery_and_monitoring/unified/auth-shutdown-error.json new file mode 100644 index 0000000000..b9e503af66 --- /dev/null +++ b/test/discovery_and_monitoring/unified/auth-shutdown-error.json @@ -0,0 +1,230 @@ +{ + "description": "auth-shutdown-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "auth": true, + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "auth-shutdown-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Reset server and pool after shutdown error during authentication", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "appName": "authShutdownErrorTest", + "errorCode": 91 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "appname": "authShutdownErrorTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "auth-shutdown-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "auth-shutdown-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "auth-shutdown-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/cancel-server-check.json b/test/discovery_and_monitoring/unified/cancel-server-check.json new file mode 100644 index 0000000000..a60ccfcb41 --- /dev/null +++ b/test/discovery_and_monitoring/unified/cancel-server-check.json @@ -0,0 +1,201 @@ +{ + "description": "cancel-server-check", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ], + "serverless": "forbid" + }, + { + "minServerVersion": "4.2", + "topologies": [ + "sharded" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "cancel-server-check", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Cancel server check", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": true, + "heartbeatFrequencyMS": 10000, + "serverSelectionTimeoutMS": 5000, + "appname": "cancelServerCheckTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "cancel-server-check" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + }, + "client": "setupClient" + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2 + } + }, + "expectResult": { + "insertedId": 2 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 3 + } + }, + "expectResult": { + "insertedId": 3 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "outcome": [ + { + "collectionName": "cancel-server-check", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/connectTimeoutMS.json b/test/discovery_and_monitoring/unified/connectTimeoutMS.json new file mode 100644 index 0000000000..d3e860a9cb --- /dev/null +++ b/test/discovery_and_monitoring/unified/connectTimeoutMS.json @@ -0,0 +1,221 @@ +{ + "description": "connectTimeoutMS", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "connectTimeoutMS", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "connectTimeoutMS=0", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 0, + "heartbeatFrequencyMS": 500, + "appname": "connectTimeoutMS=0" + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "connectTimeoutMS" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "connectTimeoutMS=0", + "blockConnection": true, + "blockTimeMS": 550 + } + }, + "client": "setupClient" + } + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 750 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 0 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "connectTimeoutMS", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "connectTimeoutMS", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "connectTimeoutMS", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/find-network-error.json b/test/discovery_and_monitoring/unified/find-network-error.json new file mode 100644 index 0000000000..c1b6db40ca --- /dev/null +++ b/test/discovery_and_monitoring/unified/find-network-error.json @@ -0,0 +1,234 @@ +{ + "description": "find-network-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "find-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Reset server and pool after network error on find", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true, + "appName": "findNetworkErrorTest" + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "retryReads": false, + "appname": "findNetworkErrorTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "find-network-error" + } + } + ] + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "find-network-error" + }, + "commandName": "find", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "find-network-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "find-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/find-network-timeout-error.json b/test/discovery_and_monitoring/unified/find-network-timeout-error.json new file mode 100644 index 0000000000..e5ac9f21aa --- /dev/null +++ b/test/discovery_and_monitoring/unified/find-network-timeout-error.json @@ -0,0 +1,199 @@ +{ + "description": "find-network-timeout-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "find-network-timeout-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Ignore network timeout error on find", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 500, + "appName": "findNetworkTimeoutErrorTest" + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "retryReads": false, + "appname": "findNetworkTimeoutErrorTest", + "socketTimeoutMS": 250 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "find-network-timeout-error" + } + } + ] + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 3 + } + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 0 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "find-network-timeout-error" + }, + "commandName": "find", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "find-network-timeout-error", + "documents": [ + { + "_id": 3 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "find-network-timeout-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/find-shutdown-error.json b/test/discovery_and_monitoring/unified/find-shutdown-error.json new file mode 100644 index 0000000000..6e5a2cac05 --- /dev/null +++ b/test/discovery_and_monitoring/unified/find-shutdown-error.json @@ -0,0 +1,251 @@ +{ + "description": "find-shutdown-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "find-shutdown-error", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Concurrent shutdown error on find", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "uriOptions": { + "retryWrites": false, + "retryReads": false, + "heartbeatFrequencyMS": 500, + "appname": "shutdownErrorFindTest" + }, + "observeEvents": [ + "serverDescriptionChangedEvent", + "poolClearedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "find-shutdown-error" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "appName": "shutdownErrorFindTest", + "errorCode": 91, + "blockConnection": true, + "blockTimeMS": 500 + } + }, + "client": "setupClient" + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "thread": { + "id": "thread0" + } + }, + { + "thread": { + "id": "thread1" + } + } + ] + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread0", + "operation": { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread1", + "operation": { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread0" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread1" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 4 + } + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "outcome": [ + { + "collectionName": "find-shutdown-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/hello-command-error.json b/test/discovery_and_monitoring/unified/hello-command-error.json new file mode 100644 index 0000000000..9afea87e77 --- /dev/null +++ b/test/discovery_and_monitoring/unified/hello-command-error.json @@ -0,0 +1,376 @@ +{ + "description": "hello-command-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.9", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "hello-command-error", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Command error on Monitor handshake", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "commandErrorHandshakeTest", + "closeConnection": false, + "errorCode": 91 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "serverDescriptionChangedEvent", + "poolClearedEvent", + "commandStartedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 250, + "heartbeatFrequencyMS": 500, + "appname": "commandErrorHandshakeTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "hello-command-error" + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "hello-command-error", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "hello-command-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "Command error on Monitor check", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 1000, + "heartbeatFrequencyMS": 500, + "appname": "commandErrorCheckTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "hello-command-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "commandErrorCheckTest", + "closeConnection": false, + "blockConnection": true, + "blockTimeMS": 750, + "errorCode": 91 + } + }, + "client": "setupClient" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "hello-command-error", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "hello-command-error", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "hello-command-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/hello-network-error.json b/test/discovery_and_monitoring/unified/hello-network-error.json new file mode 100644 index 0000000000..55373c90cc --- /dev/null +++ b/test/discovery_and_monitoring/unified/hello-network-error.json @@ -0,0 +1,346 @@ +{ + "description": "hello-network-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.9", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "hello-network-error", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Network error on Monitor handshake", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "networkErrorHandshakeTest", + "closeConnection": true + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 250, + "heartbeatFrequencyMS": 500, + "appname": "networkErrorHandshakeTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "hello-network-error" + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "hello-network-error", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "hello-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "Network error on Monitor check", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 250, + "heartbeatFrequencyMS": 500, + "appname": "networkErrorCheckTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "hello-network-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "networkErrorCheckTest", + "closeConnection": true + } + }, + "client": "setupClient" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "hello-network-error", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "hello-network-error", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "hello-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/hello-timeout.json b/test/discovery_and_monitoring/unified/hello-timeout.json new file mode 100644 index 0000000000..fe7cf4e78d --- /dev/null +++ b/test/discovery_and_monitoring/unified/hello-timeout.json @@ -0,0 +1,514 @@ +{ + "description": "hello-timeout", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "hello-timeout", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Network timeout on Monitor handshake", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "timeoutMonitorHandshakeTest", + "blockConnection": true, + "blockTimeMS": 1000 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 250, + "heartbeatFrequencyMS": 500, + "appname": "timeoutMonitorHandshakeTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "hello-timeout" + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "hello-timeout", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "hello-timeout", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "Network timeout on Monitor check", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 750, + "heartbeatFrequencyMS": 500, + "appname": "timeoutMonitorCheckTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "hello-timeout" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "timeoutMonitorCheckTest", + "blockConnection": true, + "blockTimeMS": 1000 + } + }, + "client": "setupClient" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "hello-timeout", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "hello-timeout", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "hello-timeout", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "Driver extends timeout while streaming", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 250, + "heartbeatFrequencyMS": 500, + "appname": "extendsTimeoutTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "hello-timeout" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 2000 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 0 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "hello-timeout", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "hello-timeout", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "hello-timeout", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/insert-network-error.json b/test/discovery_and_monitoring/unified/insert-network-error.json new file mode 100644 index 0000000000..bfe41a4cb6 --- /dev/null +++ b/test/discovery_and_monitoring/unified/insert-network-error.json @@ -0,0 +1,246 @@ +{ + "description": "insert-network-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "insert-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Reset server and pool after network error on insert", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true, + "appName": "insertNetworkErrorTest" + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "appname": "insertNetworkErrorTest" + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "insert-network-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "insert-network-error", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "insert-network-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "insert-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/insert-shutdown-error.json b/test/discovery_and_monitoring/unified/insert-shutdown-error.json new file mode 100644 index 0000000000..af7c6c987a --- /dev/null +++ b/test/discovery_and_monitoring/unified/insert-shutdown-error.json @@ -0,0 +1,250 @@ +{ + "description": "insert-shutdown-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "insert-shutdown-error", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Concurrent shutdown error on insert", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "uriOptions": { + "retryWrites": false, + "heartbeatFrequencyMS": 500, + "appname": "shutdownErrorInsertTest" + }, + "observeEvents": [ + "serverDescriptionChangedEvent", + "poolClearedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "insert-shutdown-error" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "appName": "shutdownErrorInsertTest", + "errorCode": 91, + "blockConnection": true, + "blockTimeMS": 500 + } + }, + "client": "setupClient" + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "thread": { + "id": "thread0" + } + }, + { + "thread": { + "id": "thread1" + } + } + ] + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread0", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2 + } + }, + "expectError": { + "isError": true + } + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread1", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 3 + } + }, + "expectError": { + "isError": true + } + } + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread0" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread1" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 4 + } + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "outcome": [ + { + "collectionName": "insert-shutdown-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/minPoolSize-error.json b/test/discovery_and_monitoring/unified/minPoolSize-error.json new file mode 100644 index 0000000000..7e294baf66 --- /dev/null +++ b/test/discovery_and_monitoring/unified/minPoolSize-error.json @@ -0,0 +1,177 @@ +{ + "description": "minPoolSize-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.9", + "serverless": "forbid", + "topologies": [ + "single" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "sdam-minPoolSize-error", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Network error on minPoolSize background creation", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "skip": 3 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "SDAMminPoolSizeError", + "closeConnection": true + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "serverDescriptionChangedEvent", + "poolClearedEvent", + "poolReadyEvent" + ], + "uriOptions": { + "heartbeatFrequencyMS": 10000, + "appname": "SDAMminPoolSizeError", + "minPoolSize": 10, + "serverSelectionTimeoutMS": 1000 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "sdam-minPoolSize-error" + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolReadyEvent": {} + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": {} + }, + "commandName": "ping" + }, + "expectError": { + "isError": true + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "off" + }, + "client": "setupClient" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolReadyEvent": {} + }, + "count": 2 + } + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/pool-cleared-error.json b/test/discovery_and_monitoring/unified/pool-cleared-error.json new file mode 100644 index 0000000000..b7f6924f2b --- /dev/null +++ b/test/discovery_and_monitoring/unified/pool-cleared-error.json @@ -0,0 +1,373 @@ +{ + "description": "pool-cleared-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.9", + "serverless": "forbid", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "pool-cleared-error", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "PoolClearedError does not mark server unknown", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": true, + "maxPoolSize": 1, + "appname": "poolClearedErrorTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "pool-cleared-error" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 100, + "closeConnection": true, + "appName": "poolClearedErrorTest" + } + }, + "client": "setupClient" + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "thread": { + "id": "thread0" + } + }, + { + "thread": { + "id": "thread1" + } + }, + { + "thread": { + "id": "thread2" + } + }, + { + "thread": { + "id": "thread3" + } + }, + { + "thread": { + "id": "thread4" + } + }, + { + "thread": { + "id": "thread5" + } + } + ] + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread0", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2 + } + } + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread1", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 3 + } + } + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread2", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 4 + } + } + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread3", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 5 + } + } + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread4", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 6 + } + } + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread5", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 7 + } + } + } + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread0" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread1" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread2" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread3" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread4" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread5" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 8 + } + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "outcome": [ + { + "collectionName": "pool-cleared-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + }, + { + "_id": 5 + }, + { + "_id": 6 + }, + { + "_id": 7 + }, + { + "_id": 8 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/rediscover-quickly-after-step-down.json b/test/discovery_and_monitoring/unified/rediscover-quickly-after-step-down.json new file mode 100644 index 0000000000..3147a07a1e --- /dev/null +++ b/test/discovery_and_monitoring/unified/rediscover-quickly-after-step-down.json @@ -0,0 +1,242 @@ +{ + "description": "rediscover-quickly-after-step-down", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient" + } + }, + { + "database": { + "id": "adminDatabase", + "client": "setupClient", + "databaseName": "admin" + } + } + ], + "initialData": [ + { + "collectionName": "test-replSetStepDown", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Rediscover quickly after replSetStepDown", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "poolClearedEvent", + "commandStartedEvent" + ], + "uriOptions": { + "appname": "replSetStepDownTest", + "heartbeatFrequencyMS": 60000, + "serverSelectionTimeoutMS": 5000, + "w": "majority" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test-replSetStepDown" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + }, + { + "name": "recordTopologyDescription", + "object": "testRunner", + "arguments": { + "client": "client", + "id": "topologyDescription" + } + }, + { + "name": "assertTopologyType", + "object": "testRunner", + "arguments": { + "topologyDescription": "topologyDescription", + "topologyType": "ReplicaSetWithPrimary" + } + }, + { + "name": "runCommand", + "object": "adminDatabase", + "arguments": { + "command": { + "replSetFreeze": 0 + }, + "readPreference": { + "mode": "secondary" + }, + "commandName": "replSetFreeze" + } + }, + { + "name": "runCommand", + "object": "adminDatabase", + "arguments": { + "command": { + "replSetStepDown": 30, + "secondaryCatchUpPeriodSecs": 30, + "force": false + }, + "commandName": "replSetStepDown" + } + }, + { + "name": "waitForPrimaryChange", + "object": "testRunner", + "arguments": { + "client": "client", + "priorTopologyDescription": "topologyDescription", + "timeoutMS": 15000 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test-replSetStepDown", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test-replSetStepDown", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test-replSetStepDown", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/serverMonitoringMode.json b/test/discovery_and_monitoring/unified/serverMonitoringMode.json new file mode 100644 index 0000000000..7d681b4f9e --- /dev/null +++ b/test/discovery_and_monitoring/unified/serverMonitoringMode.json @@ -0,0 +1,449 @@ +{ + "description": "serverMonitoringMode", + "schemaVersion": "1.17", + "runOnRequirements": [ + { + "topologies": [ + "single", + "sharded", + "sharded-replicaset" + ], + "serverless": "forbid" + } + ], + "tests": [ + { + "description": "connect with serverMonitoringMode=auto >=4.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "serverMonitoringMode": "auto" + }, + "useMultipleMongoses": false, + "observeEvents": [ + "serverHeartbeatStartedEvent", + "serverHeartbeatSucceededEvent", + "serverHeartbeatFailedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "sdam-tests" + } + } + ] + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatStartedEvent": {} + }, + "count": 2 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "sdam", + "ignoreExtraEvents": true, + "events": [ + { + "serverHeartbeatStartedEvent": { + "awaited": false + } + }, + { + "serverHeartbeatSucceededEvent": { + "awaited": false + } + }, + { + "serverHeartbeatStartedEvent": { + "awaited": true + } + } + ] + } + ] + }, + { + "description": "connect with serverMonitoringMode=auto <4.4", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "serverMonitoringMode": "auto", + "heartbeatFrequencyMS": 500 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "serverHeartbeatStartedEvent", + "serverHeartbeatSucceededEvent", + "serverHeartbeatFailedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "sdam-tests" + } + } + ] + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatStartedEvent": {} + }, + "count": 2 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "sdam", + "ignoreExtraEvents": true, + "events": [ + { + "serverHeartbeatStartedEvent": { + "awaited": false + } + }, + { + "serverHeartbeatSucceededEvent": { + "awaited": false + } + }, + { + "serverHeartbeatStartedEvent": { + "awaited": false + } + } + ] + } + ] + }, + { + "description": "connect with serverMonitoringMode=stream >=4.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "serverMonitoringMode": "stream" + }, + "useMultipleMongoses": false, + "observeEvents": [ + "serverHeartbeatStartedEvent", + "serverHeartbeatSucceededEvent", + "serverHeartbeatFailedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "sdam-tests" + } + } + ] + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatStartedEvent": {} + }, + "count": 2 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "sdam", + "ignoreExtraEvents": true, + "events": [ + { + "serverHeartbeatStartedEvent": { + "awaited": false + } + }, + { + "serverHeartbeatSucceededEvent": { + "awaited": false + } + }, + { + "serverHeartbeatStartedEvent": { + "awaited": true + } + } + ] + } + ] + }, + { + "description": "connect with serverMonitoringMode=stream <4.4", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "serverMonitoringMode": "stream", + "heartbeatFrequencyMS": 500 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "serverHeartbeatStartedEvent", + "serverHeartbeatSucceededEvent", + "serverHeartbeatFailedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "sdam-tests" + } + } + ] + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatStartedEvent": {} + }, + "count": 2 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "sdam", + "ignoreExtraEvents": true, + "events": [ + { + "serverHeartbeatStartedEvent": { + "awaited": false + } + }, + { + "serverHeartbeatSucceededEvent": { + "awaited": false + } + }, + { + "serverHeartbeatStartedEvent": { + "awaited": false + } + } + ] + } + ] + }, + { + "description": "connect with serverMonitoringMode=poll", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "serverMonitoringMode": "poll", + "heartbeatFrequencyMS": 500 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "serverHeartbeatStartedEvent", + "serverHeartbeatSucceededEvent", + "serverHeartbeatFailedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "sdam-tests" + } + } + ] + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatStartedEvent": {} + }, + "count": 2 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "sdam", + "ignoreExtraEvents": true, + "events": [ + { + "serverHeartbeatStartedEvent": { + "awaited": false + } + }, + { + "serverHeartbeatSucceededEvent": { + "awaited": false + } + }, + { + "serverHeartbeatStartedEvent": { + "awaited": false + } + } + ] + } + ] + } + ] +} diff --git a/test/gridfs/delete.json b/test/gridfs/delete.json index fb5de861f1..7a4ec27f88 100644 --- a/test/gridfs/delete.json +++ b/test/gridfs/delete.json @@ -1,304 +1,799 @@ { - "data": { - "files": [ - { - "_id": { - "$oid": "000000000000000000000001" - }, - "length": 0, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "md5": "d41d8cd98f00b204e9800998ecf8427e", - "filename": "length-0", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - }, - { - "_id": { - "$oid": "000000000000000000000002" - }, - "length": 0, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "md5": "d41d8cd98f00b204e9800998ecf8427e", - "filename": "length-0-with-empty-chunk", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - }, - { - "_id": { - "$oid": "000000000000000000000003" - }, - "length": 2, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "md5": "c700ed4fdb1d27055aa3faa2c2432283", - "filename": "length-2", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - }, - { - "_id": { - "$oid": "000000000000000000000004" - }, - "length": 8, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "md5": "dd254cdc958e53abaa67da9f797125f5", - "filename": "length-8", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} + "description": "gridfs-delete", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" } - ], - "chunks": [ - { - "_id": { - "$oid": "000000000000000000000001" - }, - "files_id": { - "$oid": "000000000000000000000002" + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "gridfs-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + }, + { + "collection": { + "id": "bucket0_files_collection", + "database": "database0", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "bucket0_chunks_collection", + "database": "database0", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "d41d8cd98f00b204e9800998ecf8427e", + "filename": "length-0", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} }, - "n": 0, - "data": { - "$hex": "" - } - }, - { - "_id": { - "$oid": "000000000000000000000002" + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "d41d8cd98f00b204e9800998ecf8427e", + "filename": "length-0-with-empty-chunk", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} }, - "files_id": { - "$oid": "000000000000000000000003" + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "c700ed4fdb1d27055aa3faa2c2432283", + "filename": "length-2", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} }, - "n": 0, - "data": { - "$hex": "1122" + { + "_id": { + "$oid": "000000000000000000000004" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "dd254cdc958e53abaa67da9f797125f5", + "filename": "length-8", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} } - }, - { - "_id": { - "$oid": "000000000000000000000003" - }, - "files_id": { - "$oid": "000000000000000000000004" + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } }, - "n": 0, - "data": { - "$hex": "11223344" - } - }, - { - "_id": { - "$oid": "000000000000000000000004" + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESI=", + "subType": "00" + } + } }, - "files_id": { - "$oid": "000000000000000000000004" + { + "_id": { + "$oid": "000000000000000000000003" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } }, - "n": 1, - "data": { - "$hex": "55667788" + { + "_id": { + "$oid": "000000000000000000000004" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 1, + "data": { + "$binary": { + "base64": "VWZ3iA==", + "subType": "00" + } + } } - } - ] - }, + ] + } + ], "tests": [ { - "description": "Delete when length is 0", - "act": { - "operation": "delete", - "arguments": { - "id": { - "$oid": "000000000000000000000001" + "description": "delete when length is 0", + "operations": [ + { + "name": "delete", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } } } - }, - "assert": { - "result": "void", - "data": [ - { - "delete": "expected.files", - "deletes": [ - { - "q": { - "_id": { - "$oid": "000000000000000000000001" - } - }, - "limit": 1 + ], + "outcome": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "d41d8cd98f00b204e9800998ecf8427e", + "filename": "length-0-with-empty-chunk", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "c700ed4fdb1d27055aa3faa2c2432283", + "filename": "length-2", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "dd254cdc958e53abaa67da9f797125f5", + "filename": "length-8", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } } - ] - } - ] - } + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESI=", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 1, + "data": { + "$binary": { + "base64": "VWZ3iA==", + "subType": "00" + } + } + } + ] + } + ] }, { - "description": "Delete when length is 0 and there is one extra empty chunk", - "act": { - "operation": "delete", - "arguments": { - "id": { - "$oid": "000000000000000000000002" + "description": "delete when length is 0 and there is one extra empty chunk", + "operations": [ + { + "name": "delete", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000002" + } } } - }, - "assert": { - "result": "void", - "data": [ - { - "delete": "expected.files", - "deletes": [ - { - "q": { - "_id": { - "$oid": "000000000000000000000002" - } - }, - "limit": 1 + ], + "outcome": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "d41d8cd98f00b204e9800998ecf8427e", + "filename": "length-0", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "c700ed4fdb1d27055aa3faa2c2432283", + "filename": "length-2", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "dd254cdc958e53abaa67da9f797125f5", + "filename": "length-8", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESI=", + "subType": "00" + } } - ] - }, - { - "delete": "expected.chunks", - "deletes": [ - { - "q": { - "files_id": { - "$oid": "000000000000000000000002" - } - }, - "limit": 0 + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } } - ] - } - ] - } + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 1, + "data": { + "$binary": { + "base64": "VWZ3iA==", + "subType": "00" + } + } + } + ] + } + ] }, { - "description": "Delete when length is 8", - "act": { - "operation": "delete", - "arguments": { - "id": { - "$oid": "000000000000000000000004" + "description": "delete when length is 8", + "operations": [ + { + "name": "delete", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000004" + } } } - }, - "assert": { - "result": "void", - "data": [ - { - "delete": "expected.files", - "deletes": [ - { - "q": { - "_id": { - "$oid": "000000000000000000000004" - } - }, - "limit": 1 + ], + "outcome": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "d41d8cd98f00b204e9800998ecf8427e", + "filename": "length-0", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "d41d8cd98f00b204e9800998ecf8427e", + "filename": "length-0-with-empty-chunk", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "c700ed4fdb1d27055aa3faa2c2432283", + "filename": "length-2", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } } - ] - }, - { - "delete": "expected.chunks", - "deletes": [ - { - "q": { - "files_id": { - "$oid": "000000000000000000000004" - } - }, - "limit": 0 + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESI=", + "subType": "00" + } } - ] - } - ] - } + } + ] + } + ] }, { - "description": "Delete when files entry does not exist", - "act": { - "operation": "delete", - "arguments": { - "id": { - "$oid": "000000000000000000000000" + "description": "delete when files entry does not exist", + "operations": [ + { + "name": "delete", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000000" + } + }, + "expectError": { + "isError": true } } - }, - "assert": { - "error": "FileNotFound" - } + ], + "outcome": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "d41d8cd98f00b204e9800998ecf8427e", + "filename": "length-0", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "d41d8cd98f00b204e9800998ecf8427e", + "filename": "length-0-with-empty-chunk", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "c700ed4fdb1d27055aa3faa2c2432283", + "filename": "length-2", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "dd254cdc958e53abaa67da9f797125f5", + "filename": "length-8", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESI=", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 1, + "data": { + "$binary": { + "base64": "VWZ3iA==", + "subType": "00" + } + } + } + ] + } + ] }, { - "description": "Delete when files entry does not exist and there are orphaned chunks", - "arrange": { - "data": [ - { - "delete": "fs.files", - "deletes": [ - { - "q": { - "_id": { - "$oid": "000000000000000000000004" - } - }, - "limit": 1 + "description": "delete when files entry does not exist and there are orphaned chunks", + "operations": [ + { + "name": "deleteOne", + "object": "bucket0_files_collection", + "arguments": { + "filter": { + "_id": { + "$oid": "000000000000000000000004" } - ] + } + }, + "expectResult": { + "deletedCount": 1 } - ] - }, - "act": { - "operation": "delete", - "arguments": { - "id": { - "$oid": "000000000000000000000004" + }, + { + "name": "delete", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000004" + } + }, + "expectError": { + "isError": true } } - }, - "assert": { - "error": "FileNotFound", - "data": [ - { - "delete": "expected.files", - "deletes": [ - { - "q": { - "_id": { - "$oid": "000000000000000000000004" - } - }, - "limit": 1 + ], + "outcome": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "d41d8cd98f00b204e9800998ecf8427e", + "filename": "length-0", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "d41d8cd98f00b204e9800998ecf8427e", + "filename": "length-0-with-empty-chunk", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "c700ed4fdb1d27055aa3faa2c2432283", + "filename": "length-2", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } } - ] - }, - { - "delete": "expected.chunks", - "deletes": [ - { - "q": { - "files_id": { - "$oid": "000000000000000000000004" - } - }, - "limit": 0 + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESI=", + "subType": "00" + } } - ] - } - ] - } + } + ] + } + ] } ] } diff --git a/test/gridfs/download.json b/test/gridfs/download.json index 5092fba981..48d3246218 100644 --- a/test/gridfs/download.json +++ b/test/gridfs/download.json @@ -1,467 +1,558 @@ { - "data": { - "files": [ - { - "_id": { - "$oid": "000000000000000000000001" - }, - "length": 0, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "md5": "d41d8cd98f00b204e9800998ecf8427e", - "filename": "length-0", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - }, - { - "_id": { - "$oid": "000000000000000000000002" - }, - "length": 0, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "md5": "d41d8cd98f00b204e9800998ecf8427e", - "filename": "length-0-with-empty-chunk", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - }, - { - "_id": { - "$oid": "000000000000000000000003" - }, - "length": 2, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "md5": "c700ed4fdb1d27055aa3faa2c2432283", - "filename": "length-2", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - }, - { - "_id": { - "$oid": "000000000000000000000004" - }, - "length": 8, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "md5": "dd254cdc958e53abaa67da9f797125f5", - "filename": "length-8", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - }, - { - "_id": { - "$oid": "000000000000000000000005" - }, - "length": 10, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "md5": "57d83cd477bfb1ccd975ab33d827a92b", - "filename": "length-10", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - }, - { - "_id": { - "$oid": "000000000000000000000006" - }, - "length": 2, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "md5": "c700ed4fdb1d27055aa3faa2c2432283", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} + "description": "gridfs-download", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" } - ], - "chunks": [ - { - "_id": { - "$oid": "000000000000000000000001" - }, - "files_id": { - "$oid": "000000000000000000000002" - }, - "n": 0, - "data": { - "$hex": "" - } - }, - { - "_id": { - "$oid": "000000000000000000000002" - }, - "files_id": { - "$oid": "000000000000000000000003" + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "gridfs-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + }, + { + "collection": { + "id": "bucket0_files_collection", + "database": "database0", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "bucket0_chunks_collection", + "database": "database0", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "d41d8cd98f00b204e9800998ecf8427e", + "filename": "length-0", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} }, - "n": 0, - "data": { - "$hex": "1122" - } - }, - { - "_id": { - "$oid": "000000000000000000000003" + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "d41d8cd98f00b204e9800998ecf8427e", + "filename": "length-0-with-empty-chunk", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} }, - "files_id": { - "$oid": "000000000000000000000004" + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "c700ed4fdb1d27055aa3faa2c2432283", + "filename": "length-2", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} }, - "n": 0, - "data": { - "$hex": "11223344" - } - }, - { - "_id": { - "$oid": "000000000000000000000004" + { + "_id": { + "$oid": "000000000000000000000004" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "dd254cdc958e53abaa67da9f797125f5", + "filename": "length-8", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} }, - "files_id": { - "$oid": "000000000000000000000004" + { + "_id": { + "$oid": "000000000000000000000005" + }, + "length": 10, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "57d83cd477bfb1ccd975ab33d827a92b", + "filename": "length-10", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} }, - "n": 1, - "data": { - "$hex": "55667788" + { + "_id": { + "$oid": "000000000000000000000006" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "c700ed4fdb1d27055aa3faa2c2432283", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} } - }, - { - "_id": { - "$oid": "000000000000000000000005" - }, - "files_id": { - "$oid": "000000000000000000000005" + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } }, - "n": 0, - "data": { - "$hex": "11223344" - } - }, - { - "_id": { - "$oid": "000000000000000000000006" + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESI=", + "subType": "00" + } + } }, - "files_id": { - "$oid": "000000000000000000000005" + { + "_id": { + "$oid": "000000000000000000000003" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } }, - "n": 1, - "data": { - "$hex": "55667788" - } - }, - { - "_id": { - "$oid": "000000000000000000000007" + { + "_id": { + "$oid": "000000000000000000000004" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 1, + "data": { + "$binary": { + "base64": "VWZ3iA==", + "subType": "00" + } + } }, - "files_id": { - "$oid": "000000000000000000000005" + { + "_id": { + "$oid": "000000000000000000000005" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } }, - "n": 2, - "data": { - "$hex": "99aa" - } - }, - { - "_id": { - "$oid": "000000000000000000000008" + { + "_id": { + "$oid": "000000000000000000000006" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 1, + "data": { + "$binary": { + "base64": "VWZ3iA==", + "subType": "00" + } + } }, - "files_id": { - "$oid": "000000000000000000000006" + { + "_id": { + "$oid": "000000000000000000000007" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 2, + "data": { + "$binary": { + "base64": "mao=", + "subType": "00" + } + } }, - "n": 0, - "data": { - "$hex": "1122" + { + "_id": { + "$oid": "000000000000000000000008" + }, + "files_id": { + "$oid": "000000000000000000000006" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESI=", + "subType": "00" + } + } } - } - ] - }, + ] + } + ], "tests": [ { - "description": "Download when length is zero", - "act": { - "operation": "download", - "arguments": { - "id": { - "$oid": "000000000000000000000001" + "description": "download when length is zero", + "operations": [ + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } }, - "options": {} - } - }, - "assert": { - "result": { - "$hex": "" + "expectResult": { + "$$matchesHexBytes": "" + } } - } + ] }, { - "description": "Download when length is zero and there is one empty chunk", - "act": { - "operation": "download", - "arguments": { - "id": { - "$oid": "000000000000000000000002" + "description": "download when length is zero and there is one empty chunk", + "operations": [ + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000002" + } }, - "options": {} - } - }, - "assert": { - "result": { - "$hex": "" + "expectResult": { + "$$matchesHexBytes": "" + } } - } + ] }, { - "description": "Download when there is one chunk", - "act": { - "operation": "download", - "arguments": { - "id": { - "$oid": "000000000000000000000003" + "description": "download when there is one chunk", + "operations": [ + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000003" + } }, - "options": {} - } - }, - "assert": { - "result": { - "$hex": "1122" + "expectResult": { + "$$matchesHexBytes": "1122" + } } - } + ] }, { - "description": "Download when there are two chunks", - "act": { - "operation": "download", - "arguments": { - "id": { - "$oid": "000000000000000000000004" + "description": "download when there are two chunks", + "operations": [ + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000004" + } }, - "options": {} - } - }, - "assert": { - "result": { - "$hex": "1122334455667788" + "expectResult": { + "$$matchesHexBytes": "1122334455667788" + } } - } + ] }, { - "description": "Download when there are three chunks", - "act": { - "operation": "download", - "arguments": { - "id": { - "$oid": "000000000000000000000005" + "description": "download when there are three chunks", + "operations": [ + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } }, - "options": {} - } - }, - "assert": { - "result": { - "$hex": "112233445566778899aa" + "expectResult": { + "$$matchesHexBytes": "112233445566778899aa" + } } - } + ] }, { - "description": "Download when files entry does not exist", - "act": { - "operation": "download", - "arguments": { - "id": { - "$oid": "000000000000000000000000" - }, - "options": {} + "description": "download when files entry does not exist", + "operations": [ + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000000" + } + }, + "expectError": { + "isError": true + } } - }, - "assert": { - "error": "FileNotFound" - } + ] }, { - "description": "Download when an intermediate chunk is missing", - "arrange": { - "data": [ - { - "delete": "fs.chunks", - "deletes": [ - { - "q": { - "files_id": { - "$oid": "000000000000000000000005" - }, - "n": 1 - }, - "limit": 1 - } - ] + "description": "download when an intermediate chunk is missing", + "operations": [ + { + "name": "deleteOne", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": { + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 1 + } + }, + "expectResult": { + "deletedCount": 1 } - ] - }, - "act": { - "operation": "download", - "arguments": { - "id": { - "$oid": "000000000000000000000005" + }, + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isError": true } } - }, - "assert": { - "error": "ChunkIsMissing" - } + ] }, { - "description": "Download when final chunk is missing", - "arrange": { - "data": [ - { - "delete": "fs.chunks", - "deletes": [ - { - "q": { - "files_id": { - "$oid": "000000000000000000000005" - }, - "n": 1 - }, - "limit": 1 - } - ] + "description": "download when final chunk is missing", + "operations": [ + { + "name": "deleteOne", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": { + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 2 + } + }, + "expectResult": { + "deletedCount": 1 } - ] - }, - "act": { - "operation": "download", - "arguments": { - "id": { - "$oid": "000000000000000000000005" + }, + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isError": true } } - }, - "assert": { - "error": "ChunkIsMissing" - } + ] }, { - "description": "Download when an intermediate chunk is the wrong size", - "arrange": { - "data": [ - { - "update": "fs.chunks", - "updates": [ + "description": "download when an intermediate chunk is the wrong size", + "operations": [ + { + "name": "bulkWrite", + "object": "bucket0_chunks_collection", + "arguments": { + "requests": [ { - "q": { - "files_id": { - "$oid": "000000000000000000000005" + "updateOne": { + "filter": { + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 1 }, - "n": 1 - }, - "u": { - "$set": { - "data": { - "$hex": "556677" + "update": { + "$set": { + "data": { + "$binary": { + "base64": "VWZ3", + "subType": "00" + } + } } } } }, { - "q": { - "files_id": { - "$oid": "000000000000000000000005" + "updateOne": { + "filter": { + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 2 }, - "n": 2 - }, - "u": { - "$set": { - "data": { - "$hex": "8899aa" + "update": { + "$set": { + "data": { + "$binary": { + "base64": "iJmq", + "subType": "00" + } + } } } } } ] + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2 } - ] - }, - "act": { - "operation": "download", - "arguments": { - "id": { - "$oid": "000000000000000000000005" + }, + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isError": true } } - }, - "assert": { - "error": "ChunkIsWrongSize" - } + ] }, { - "description": "Download when final chunk is the wrong size", - "arrange": { - "data": [ - { - "update": "fs.chunks", - "updates": [ - { - "q": { - "files_id": { - "$oid": "000000000000000000000005" - }, - "n": 2 - }, - "u": { - "$set": { - "data": { - "$hex": "99" - } + "description": "download when final chunk is the wrong size", + "operations": [ + { + "name": "updateOne", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": { + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 2 + }, + "update": { + "$set": { + "data": { + "$binary": { + "base64": "mQ==", + "subType": "00" } } } - ] + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1 } - ] - }, - "act": { - "operation": "download", - "arguments": { - "id": { - "$oid": "000000000000000000000005" + }, + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isError": true } } - }, - "assert": { - "error": "ChunkIsWrongSize" - } + ] }, { - "description": "Download legacy file with no name", - "act": { - "operation": "download", - "arguments": { - "id": { - "$oid": "000000000000000000000006" + "description": "download legacy file with no name", + "operations": [ + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000006" + } }, - "options": {} - } - }, - "assert": { - "result": { - "$hex": "1122" + "expectResult": { + "$$matchesHexBytes": "1122" + } } - } + ] } ] } diff --git a/test/gridfs/downloadByName.json b/test/gridfs/downloadByName.json new file mode 100644 index 0000000000..cd44663957 --- /dev/null +++ b/test/gridfs/downloadByName.json @@ -0,0 +1,330 @@ +{ + "description": "gridfs-downloadByName", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "gridfs-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + }, + { + "collection": { + "id": "bucket0_files_collection", + "database": "database0", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "bucket0_chunks_collection", + "database": "database0", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "47ed733b8d10be225eceba344d533586", + "filename": "abc", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-02T00:00:00.000Z" + }, + "md5": "b15835f133ff2e27c7cb28117bfae8f4", + "filename": "abc", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-03T00:00:00.000Z" + }, + "md5": "eccbc87e4b5ce2fe28308fd9f2a7baf3", + "filename": "abc", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-04T00:00:00.000Z" + }, + "md5": "f623e75af30e62bbd73d6df5b50bb7b5", + "filename": "abc", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000005" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-05T00:00:00.000Z" + }, + "md5": "4c614360da93c0a041b22e537de151eb", + "filename": "abc", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000001" + }, + "n": 0, + "data": { + "$binary": { + "base64": "EQ==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "Ig==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "Mw==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 0, + "data": { + "$binary": { + "base64": "RA==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000005" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 0, + "data": { + "$binary": { + "base64": "VQ==", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "downloadByName defaults to latest revision (-1)", + "operations": [ + { + "name": "downloadByName", + "object": "bucket0", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "55" + } + } + ] + }, + { + "description": "downloadByName when revision is 0", + "operations": [ + { + "name": "downloadByName", + "object": "bucket0", + "arguments": { + "filename": "abc", + "revision": 0 + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ] + }, + { + "description": "downloadByName when revision is 1", + "operations": [ + { + "name": "downloadByName", + "object": "bucket0", + "arguments": { + "filename": "abc", + "revision": 1 + }, + "expectResult": { + "$$matchesHexBytes": "22" + } + } + ] + }, + { + "description": "downloadByName when revision is 2", + "operations": [ + { + "name": "downloadByName", + "object": "bucket0", + "arguments": { + "filename": "abc", + "revision": 2 + }, + "expectResult": { + "$$matchesHexBytes": "33" + } + } + ] + }, + { + "description": "downloadByName when revision is -2", + "operations": [ + { + "name": "downloadByName", + "object": "bucket0", + "arguments": { + "filename": "abc", + "revision": -2 + }, + "expectResult": { + "$$matchesHexBytes": "44" + } + } + ] + }, + { + "description": "downloadByName when revision is -1", + "operations": [ + { + "name": "downloadByName", + "object": "bucket0", + "arguments": { + "filename": "abc", + "revision": -1 + }, + "expectResult": { + "$$matchesHexBytes": "55" + } + } + ] + }, + { + "description": "downloadByName when files entry does not exist", + "operations": [ + { + "name": "downloadByName", + "object": "bucket0", + "arguments": { + "filename": "xyz" + }, + "expectError": { + "isError": true + } + } + ] + }, + { + "description": "downloadByName when revision does not exist", + "operations": [ + { + "name": "downloadByName", + "object": "bucket0", + "arguments": { + "filename": "abc", + "revision": 999 + }, + "expectError": { + "isError": true + } + } + ] + } + ] +} diff --git a/test/gridfs/download_by_name.json b/test/gridfs/download_by_name.json deleted file mode 100644 index ecc8c9e2cc..0000000000 --- a/test/gridfs/download_by_name.json +++ /dev/null @@ -1,240 +0,0 @@ -{ - "data": { - "files": [ - { - "_id": { - "$oid": "000000000000000000000001" - }, - "length": 1, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "md5": "47ed733b8d10be225eceba344d533586", - "filename": "abc", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - }, - { - "_id": { - "$oid": "000000000000000000000002" - }, - "length": 1, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-02T00:00:00.000Z" - }, - "md5": "b15835f133ff2e27c7cb28117bfae8f4", - "filename": "abc", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - }, - { - "_id": { - "$oid": "000000000000000000000003" - }, - "length": 1, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-03T00:00:00.000Z" - }, - "md5": "eccbc87e4b5ce2fe28308fd9f2a7baf3", - "filename": "abc", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - }, - { - "_id": { - "$oid": "000000000000000000000004" - }, - "length": 1, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-04T00:00:00.000Z" - }, - "md5": "f623e75af30e62bbd73d6df5b50bb7b5", - "filename": "abc", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - }, - { - "_id": { - "$oid": "000000000000000000000005" - }, - "length": 1, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-05T00:00:00.000Z" - }, - "md5": "4c614360da93c0a041b22e537de151eb", - "filename": "abc", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - } - ], - "chunks": [ - { - "_id": { - "$oid": "000000000000000000000001" - }, - "files_id": { - "$oid": "000000000000000000000001" - }, - "n": 0, - "data": { - "$hex": "11" - } - }, - { - "_id": { - "$oid": "000000000000000000000002" - }, - "files_id": { - "$oid": "000000000000000000000002" - }, - "n": 0, - "data": { - "$hex": "22" - } - }, - { - "_id": { - "$oid": "000000000000000000000003" - }, - "files_id": { - "$oid": "000000000000000000000003" - }, - "n": 0, - "data": { - "$hex": "33" - } - }, - { - "_id": { - "$oid": "000000000000000000000004" - }, - "files_id": { - "$oid": "000000000000000000000004" - }, - "n": 0, - "data": { - "$hex": "44" - } - }, - { - "_id": { - "$oid": "000000000000000000000005" - }, - "files_id": { - "$oid": "000000000000000000000005" - }, - "n": 0, - "data": { - "$hex": "55" - } - } - ] - }, - "tests": [ - { - "description": "Download_by_name when revision is 0", - "act": { - "operation": "download_by_name", - "arguments": { - "filename": "abc", - "options": { - "revision": 0 - } - } - }, - "assert": { - "result": { - "$hex": "11" - } - } - }, - { - "description": "Download_by_name when revision is 1", - "act": { - "operation": "download_by_name", - "arguments": { - "filename": "abc", - "options": { - "revision": 1 - } - } - }, - "assert": { - "result": { - "$hex": "22" - } - } - }, - { - "description": "Download_by_name when revision is -2", - "act": { - "operation": "download_by_name", - "arguments": { - "filename": "abc", - "options": { - "revision": -2 - } - } - }, - "assert": { - "result": { - "$hex": "44" - } - } - }, - { - "description": "Download_by_name when revision is -1", - "act": { - "operation": "download_by_name", - "arguments": { - "filename": "abc", - "options": { - "revision": -1 - } - } - }, - "assert": { - "result": { - "$hex": "55" - } - } - }, - { - "description": "Download_by_name when files entry does not exist", - "act": { - "operation": "download_by_name", - "arguments": { - "filename": "xyz" - } - }, - "assert": { - "error": "FileNotFound" - } - }, - { - "description": "Download_by_name when revision does not exist", - "act": { - "operation": "download_by_name", - "arguments": { - "filename": "abc", - "options": { - "revision": 999 - } - } - }, - "assert": { - "error": "RevisionNotFound" - } - } - ] -} diff --git a/test/gridfs/upload-disableMD5.json b/test/gridfs/upload-disableMD5.json new file mode 100644 index 0000000000..d5a9d6f4ab --- /dev/null +++ b/test/gridfs/upload-disableMD5.json @@ -0,0 +1,172 @@ +{ + "description": "gridfs-upload-disableMD5", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "gridfs-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + }, + { + "collection": { + "id": "bucket0_files_collection", + "database": "database0", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "bucket0_chunks_collection", + "database": "database0", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "upload when length is 0 sans MD5", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "" + }, + "chunkSizeBytes": 4, + "disableMD5": true + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$exists": false + }, + "filename": "filename" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {} + }, + "expectResult": [] + } + ] + }, + { + "description": "upload when length is 1 sans MD5", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "11" + }, + "chunkSizeBytes": 4, + "disableMD5": true + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$exists": false + }, + "filename": "filename" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "n": 0, + "data": { + "$binary": { + "base64": "EQ==", + "subType": "00" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/gridfs/upload.json b/test/gridfs/upload.json index 324ac49d23..97e18d2bc2 100644 --- a/test/gridfs/upload.json +++ b/test/gridfs/upload.json @@ -1,387 +1,616 @@ { - "data": { - "files": [], - "chunks": [] - }, + "description": "gridfs-upload", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "gridfs-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + }, + { + "collection": { + "id": "bucket0_files_collection", + "database": "database0", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "bucket0_chunks_collection", + "database": "database0", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [] + } + ], "tests": [ { - "description": "Upload when length is 0", - "act": { - "operation": "upload", - "arguments": { - "filename": "filename", - "source": { - "$hex": "" - }, - "options": { + "description": "upload when length is 0", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "" + }, "chunkSizeBytes": 4 - } + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$unsetOrMatches": "d41d8cd98f00b204e9800998ecf8427e" + }, + "filename": "filename" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {} + }, + "expectResult": [] } - }, - "assert": { - "result": "&result", - "data": [ - { - "insert": "expected.files", - "documents": [ - { - "_id": "*result", - "length": 0, - "chunkSize": 4, - "uploadDate": "*actual", - "md5": "d41d8cd98f00b204e9800998ecf8427e", - "filename": "filename" - } - ] - } - ] - } + ] }, { - "description": "Upload when length is 1", - "act": { - "operation": "upload", - "arguments": { - "filename": "filename", - "source": { - "$hex": "11" - }, - "options": { + "description": "upload when length is 1", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "11" + }, "chunkSizeBytes": 4 - } - } - }, - "assert": { - "result": "&result", - "data": [ - { - "insert": "expected.files", - "documents": [ - { - "_id": "*result", - "length": 1, - "chunkSize": 4, - "uploadDate": "*actual", - "md5": "47ed733b8d10be225eceba344d533586", - "filename": "filename" - } - ] - }, - { - "insert": "expected.chunks", - "documents": [ - { - "_id": "*actual", - "files_id": "*result", - "n": 0, - "data": { - "$hex": "11" + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$unsetOrMatches": "47ed733b8d10be225eceba344d533586" + }, + "filename": "filename" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "n": 0, + "data": { + "$binary": { + "base64": "EQ==", + "subType": "00" } } - ] - } - ] - } + } + ] + } + ] }, { - "description": "Upload when length is 3", - "act": { - "operation": "upload", - "arguments": { - "filename": "filename", - "source": { - "$hex": "112233" - }, - "options": { + "description": "upload when length is 3", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "112233" + }, "chunkSizeBytes": 4 - } - } - }, - "assert": { - "result": "&result", - "data": [ - { - "insert": "expected.files", - "documents": [ - { - "_id": "*result", - "length": 3, - "chunkSize": 4, - "uploadDate": "*actual", - "md5": "bafae3a174ab91fc70db7a6aa50f4f52", - "filename": "filename" - } - ] - }, - { - "insert": "expected.chunks", - "documents": [ - { - "_id": "*actual", - "files_id": "*result", - "n": 0, - "data": { - "$hex": "112233" + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 3, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$unsetOrMatches": "bafae3a174ab91fc70db7a6aa50f4f52" + }, + "filename": "filename" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIz", + "subType": "00" } } - ] - } - ] - } + } + ] + } + ] }, { - "description": "Upload when length is 4", - "act": { - "operation": "upload", - "arguments": { - "filename": "filename", - "source": { - "$hex": "11223344" - }, - "options": { + "description": "upload when length is 4", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "11223344" + }, "chunkSizeBytes": 4 - } - } - }, - "assert": { - "result": "&result", - "data": [ - { - "insert": "expected.files", - "documents": [ - { - "_id": "*result", - "length": 4, - "chunkSize": 4, - "uploadDate": "*actual", - "md5": "7e7c77cff5705d1f7574a25ef6662117", - "filename": "filename" - } - ] - }, - { - "insert": "expected.chunks", - "documents": [ - { - "_id": "*actual", - "files_id": "*result", - "n": 0, - "data": { - "$hex": "11223344" + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 4, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$unsetOrMatches": "7e7c77cff5705d1f7574a25ef6662117" + }, + "filename": "filename" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" } } - ] - } - ] - } + } + ] + } + ] }, { - "description": "Upload when length is 5", - "act": { - "operation": "upload", - "arguments": { - "filename": "filename", - "source": { - "$hex": "1122334455" - }, - "options": { + "description": "upload when length is 5", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + }, "chunkSizeBytes": 4 - } - } - }, - "assert": { - "result": "&result", - "data": [ - { - "insert": "expected.files", - "documents": [ - { - "_id": "*result", - "length": 5, - "chunkSize": 4, - "uploadDate": "*actual", - "md5": "283d4fea5dded59cf837d3047328f5af", - "filename": "filename" - } - ] - }, - { - "insert": "expected.chunks", - "documents": [ - { - "_id": "*actual", - "files_id": "*result", - "n": 0, - "data": { - "$hex": "11223344" + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 5, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$unsetOrMatches": "283d4fea5dded59cf837d3047328f5af" + }, + "filename": "filename" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {}, + "sort": { + "n": 1 + } + }, + "expectResult": [ + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" } + } + }, + { + "_id": { + "$$type": "objectId" }, - { - "_id": "*actual", - "files_id": "*result", - "n": 1, - "data": { - "$hex": "55" + "files_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "n": 1, + "data": { + "$binary": { + "base64": "VQ==", + "subType": "00" } } - ] - } - ] - } + } + ] + } + ] }, { - "description": "Upload when length is 8", - "act": { - "operation": "upload", - "arguments": { - "filename": "filename", - "source": { - "$hex": "1122334455667788" - }, - "options": { + "description": "upload when length is 8", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455667788" + }, "chunkSizeBytes": 4 - } - } - }, - "assert": { - "result": "&result", - "data": [ - { - "insert": "expected.files", - "documents": [ - { - "_id": "*result", - "length": 8, - "chunkSize": 4, - "uploadDate": "*actual", - "md5": "dd254cdc958e53abaa67da9f797125f5", - "filename": "filename" - } - ] - }, - { - "insert": "expected.chunks", - "documents": [ - { - "_id": "*actual", - "files_id": "*result", - "n": 0, - "data": { - "$hex": "11223344" + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$unsetOrMatches": "dd254cdc958e53abaa67da9f797125f5" + }, + "filename": "filename" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {}, + "sort": { + "n": 1 + } + }, + "expectResult": [ + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" } + } + }, + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "uploadedObjectId" }, - { - "_id": "*actual", - "files_id": "*result", - "n": 1, - "data": { - "$hex": "55667788" + "n": 1, + "data": { + "$binary": { + "base64": "VWZ3iA==", + "subType": "00" } } - ] - } - ] - } + } + ] + } + ] }, { - "description": "Upload when contentType is provided", - "act": { - "operation": "upload", - "arguments": { - "filename": "filename", - "source": { - "$hex": "11" - }, - "options": { + "description": "upload when contentType is provided", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "11" + }, "chunkSizeBytes": 4, "contentType": "image/jpeg" - } - } - }, - "assert": { - "result": "&result", - "data": [ - { - "insert": "expected.files", - "documents": [ - { - "_id": "*result", - "length": 1, - "chunkSize": 4, - "uploadDate": "*actual", - "md5": "47ed733b8d10be225eceba344d533586", - "filename": "filename", - "contentType": "image/jpeg" - } - ] - }, - { - "insert": "expected.chunks", - "documents": [ - { - "_id": "*actual", - "files_id": "*result", - "n": 0, - "data": { - "$hex": "11" + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$unsetOrMatches": "47ed733b8d10be225eceba344d533586" + }, + "filename": "filename", + "contentType": "image/jpeg" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "n": 0, + "data": { + "$binary": { + "base64": "EQ==", + "subType": "00" } } - ] - } - ] - } + } + ] + } + ] }, { - "description": "Upload when metadata is provided", - "act": { - "operation": "upload", - "arguments": { - "filename": "filename", - "source": { - "$hex": "11" - }, - "options": { + "description": "upload when metadata is provided", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "11" + }, "chunkSizeBytes": 4, "metadata": { "x": 1 } - } - } - }, - "assert": { - "result": "&result", - "data": [ - { - "insert": "expected.files", - "documents": [ - { - "_id": "*result", - "length": 1, - "chunkSize": 4, - "uploadDate": "*actual", - "md5": "47ed733b8d10be225eceba344d533586", - "filename": "filename", - "metadata": { - "x": 1 - } + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$unsetOrMatches": "47ed733b8d10be225eceba344d533586" + }, + "filename": "filename", + "metadata": { + "x": 1 } - ] - }, - { - "insert": "expected.chunks", - "documents": [ - { - "_id": "*actual", - "files_id": "*result", - "n": 0, - "data": { - "$hex": "11" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "n": 0, + "data": { + "$binary": { + "base64": "EQ==", + "subType": "00" } } - ] - } - ] - } + } + ] + } + ] } ] } diff --git a/test/index_management/createSearchIndex.json b/test/index_management/createSearchIndex.json new file mode 100644 index 0000000000..04cffbe9c9 --- /dev/null +++ b/test/index_management/createSearchIndex.json @@ -0,0 +1,136 @@ +{ + "description": "createSearchIndex", + "schemaVersion": "1.4", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + } + ], + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "load-balanced", + "sharded" + ], + "serverless": "forbid" + } + ], + "tests": [ + { + "description": "no name provided for an index definition", + "operations": [ + { + "name": "createSearchIndex", + "object": "collection0", + "arguments": { + "model": { + "definition": { + "mappings": { + "dynamic": true + } + } + } + }, + "expectError": { + "isError": true, + "errorContains": "Search index commands are only supported with Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "createSearchIndexes": "collection0", + "indexes": [ + { + "definition": { + "mappings": { + "dynamic": true + } + } + } + ], + "$db": "database0" + } + } + } + ] + } + ] + }, + { + "description": "name provided for an index definition", + "operations": [ + { + "name": "createSearchIndex", + "object": "collection0", + "arguments": { + "model": { + "definition": { + "mappings": { + "dynamic": true + } + }, + "name": "test index" + } + }, + "expectError": { + "isError": true, + "errorContains": "Search index commands are only supported with Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "createSearchIndexes": "collection0", + "indexes": [ + { + "definition": { + "mappings": { + "dynamic": true + } + }, + "name": "test index" + } + ], + "$db": "database0" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/index_management/createSearchIndexes.json b/test/index_management/createSearchIndexes.json new file mode 100644 index 0000000000..95dbedde77 --- /dev/null +++ b/test/index_management/createSearchIndexes.json @@ -0,0 +1,172 @@ +{ + "description": "createSearchIndexes", + "schemaVersion": "1.4", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + } + ], + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "load-balanced", + "sharded" + ], + "serverless": "forbid" + } + ], + "tests": [ + { + "description": "empty index definition array", + "operations": [ + { + "name": "createSearchIndexes", + "object": "collection0", + "arguments": { + "models": [] + }, + "expectError": { + "isError": true, + "errorContains": "Search index commands are only supported with Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "createSearchIndexes": "collection0", + "indexes": [], + "$db": "database0" + } + } + } + ] + } + ] + }, + { + "description": "no name provided for an index definition", + "operations": [ + { + "name": "createSearchIndexes", + "object": "collection0", + "arguments": { + "models": [ + { + "definition": { + "mappings": { + "dynamic": true + } + } + } + ] + }, + "expectError": { + "isError": true, + "errorContains": "Search index commands are only supported with Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "createSearchIndexes": "collection0", + "indexes": [ + { + "definition": { + "mappings": { + "dynamic": true + } + } + } + ], + "$db": "database0" + } + } + } + ] + } + ] + }, + { + "description": "name provided for an index definition", + "operations": [ + { + "name": "createSearchIndexes", + "object": "collection0", + "arguments": { + "models": [ + { + "definition": { + "mappings": { + "dynamic": true + } + }, + "name": "test index" + } + ] + }, + "expectError": { + "isError": true, + "errorContains": "Search index commands are only supported with Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "createSearchIndexes": "collection0", + "indexes": [ + { + "definition": { + "mappings": { + "dynamic": true + } + }, + "name": "test index" + } + ], + "$db": "database0" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/index_management/dropSearchIndex.json b/test/index_management/dropSearchIndex.json new file mode 100644 index 0000000000..0f21a5b68d --- /dev/null +++ b/test/index_management/dropSearchIndex.json @@ -0,0 +1,74 @@ +{ + "description": "dropSearchIndex", + "schemaVersion": "1.4", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + } + ], + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "load-balanced", + "sharded" + ], + "serverless": "forbid" + } + ], + "tests": [ + { + "description": "sends the correct command", + "operations": [ + { + "name": "dropSearchIndex", + "object": "collection0", + "arguments": { + "name": "test index" + }, + "expectError": { + "isError": true, + "errorContains": "Search index commands are only supported with Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "dropSearchIndex": "collection0", + "name": "test index", + "$db": "database0" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/index_management/listSearchIndexes.json b/test/index_management/listSearchIndexes.json new file mode 100644 index 0000000000..24c51ad88c --- /dev/null +++ b/test/index_management/listSearchIndexes.json @@ -0,0 +1,156 @@ +{ + "description": "listSearchIndexes", + "schemaVersion": "1.4", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + } + ], + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "load-balanced", + "sharded" + ], + "serverless": "forbid" + } + ], + "tests": [ + { + "description": "when no name is provided, it does not populate the filter", + "operations": [ + { + "name": "listSearchIndexes", + "object": "collection0", + "expectError": { + "isError": true, + "errorContains": "Search index commands are only supported with Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "pipeline": [ + { + "$listSearchIndexes": {} + } + ] + } + } + } + ] + } + ] + }, + { + "description": "when a name is provided, it is present in the filter", + "operations": [ + { + "name": "listSearchIndexes", + "object": "collection0", + "arguments": { + "name": "test index" + }, + "expectError": { + "isError": true, + "errorContains": "Search index commands are only supported with Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "pipeline": [ + { + "$listSearchIndexes": { + "name": "test index" + } + } + ], + "$db": "database0" + } + } + } + ] + } + ] + }, + { + "description": "aggregation cursor options are supported", + "operations": [ + { + "name": "listSearchIndexes", + "object": "collection0", + "arguments": { + "name": "test index", + "aggregationOptions": { + "batchSize": 10 + } + }, + "expectError": { + "isError": true, + "errorContains": "Search index commands are only supported with Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": { + "batchSize": 10 + }, + "pipeline": [ + { + "$listSearchIndexes": { + "name": "test index" + } + } + ], + "$db": "database0" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/index_management/updateSearchIndex.json b/test/index_management/updateSearchIndex.json new file mode 100644 index 0000000000..88a46a3069 --- /dev/null +++ b/test/index_management/updateSearchIndex.json @@ -0,0 +1,76 @@ +{ + "description": "updateSearchIndex", + "schemaVersion": "1.4", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + } + ], + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "load-balanced", + "sharded" + ], + "serverless": "forbid" + } + ], + "tests": [ + { + "description": "sends the correct command", + "operations": [ + { + "name": "updateSearchIndex", + "object": "collection0", + "arguments": { + "name": "test index", + "definition": {} + }, + "expectError": { + "isError": true, + "errorContains": "Search index commands are only supported with Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "updateSearchIndex": "collection0", + "name": "test index", + "definition": {}, + "$db": "database0" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/lambda/README.md b/test/lambda/README.md new file mode 100644 index 0000000000..2727a2cee9 --- /dev/null +++ b/test/lambda/README.md @@ -0,0 +1,17 @@ +AWS Lambda Testing +------------------ + +Running locally +=============== + +Prerequisites: + +- AWS SAM CLI +- Docker daemon running + +Usage +===== + +- Start a local mongodb instance on port 27017 +- Run ``build.sh`` +- Run ``test.sh`` diff --git a/test/lambda/build.sh b/test/lambda/build.sh new file mode 100755 index 0000000000..c7cc24eab2 --- /dev/null +++ b/test/lambda/build.sh @@ -0,0 +1,28 @@ +#!/bin/bash +set -o errexit # Exit the script with error if any of the commands fail +set -o xtrace + +rm -rf mongodb/pymongo +rm -rf mongodb/gridfs +rm -rf mongodb/bson + +pushd ../.. +rm -f pymongo/*.so +rm -f bson/*.so +image="quay.io/pypa/manylinux2014_x86_64:latest" + +DOCKER=$(command -v docker) || true +if [ -z "$DOCKER" ]; then + PODMAN=$(command -v podman) || true + if [ -z "$PODMAN" ]; then + echo "docker or podman are required!" + exit 1 + fi + DOCKER=podman +fi + +$DOCKER run --rm -v "`pwd`:/src" $image /src/test/lambda/build_internal.sh +cp -r pymongo ./test/lambda/mongodb/pymongo +cp -r bson ./test/lambda/mongodb/bson +cp -r gridfs ./test/lambda/mongodb/gridfs +popd diff --git a/test/lambda/build_internal.sh b/test/lambda/build_internal.sh new file mode 100755 index 0000000000..fec488d32c --- /dev/null +++ b/test/lambda/build_internal.sh @@ -0,0 +1,5 @@ +#!/bin/bash -ex + +cd /src +PYTHON=/opt/python/cp39-cp39/bin/python +$PYTHON -m pip install -v -e . diff --git a/test/lambda/events/event.json b/test/lambda/events/event.json new file mode 100644 index 0000000000..a6197dea6c --- /dev/null +++ b/test/lambda/events/event.json @@ -0,0 +1,62 @@ +{ + "body": "{\"message\": \"hello world\"}", + "resource": "/hello", + "path": "/hello", + "httpMethod": "GET", + "isBase64Encoded": false, + "queryStringParameters": { + "foo": "bar" + }, + "pathParameters": { + "proxy": "/path/to/resource" + }, + "stageVariables": { + "baz": "qux" + }, + "headers": { + "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", + "Accept-Encoding": "gzip, deflate, sdch", + "Accept-Language": "en-US,en;q=0.8", + "Cache-Control": "max-age=0", + "CloudFront-Forwarded-Proto": "https", + "CloudFront-Is-Desktop-Viewer": "true", + "CloudFront-Is-Mobile-Viewer": "false", + "CloudFront-Is-SmartTV-Viewer": "false", + "CloudFront-Is-Tablet-Viewer": "false", + "CloudFront-Viewer-Country": "US", + "Host": "1234567890.execute-api.us-east-1.amazonaws.com", + "Upgrade-Insecure-Requests": "1", + "User-Agent": "Custom User Agent String", + "Via": "1.1 08f323deadbeefa7af34d5feb414ce27.cloudfront.net (CloudFront)", + "X-Amz-Cf-Id": "cDehVQoZnx43VYQb9j2-nvCh-9z396Uhbp027Y2JvkCPNLmGJHqlaA==", + "X-Forwarded-For": "127.0.0.1, 127.0.0.2", + "X-Forwarded-Port": "443", + "X-Forwarded-Proto": "https" + }, + "requestContext": { + "accountId": "123456789012", + "resourceId": "123456", + "stage": "prod", + "requestId": "c6af9ac6-7b61-11e6-9a41-93e8deadbeef", + "requestTime": "09/Apr/2015:12:34:56 +0000", + "requestTimeEpoch": 1428582896000, + "identity": { + "cognitoIdentityPoolId": null, + "accountId": null, + "cognitoIdentityId": null, + "caller": null, + "accessKey": null, + "sourceIp": "127.0.0.1", + "cognitoAuthenticationType": null, + "cognitoAuthenticationProvider": null, + "userArn": null, + "userAgent": "Custom User Agent String", + "user": null + }, + "path": "/prod/hello", + "resourcePath": "/hello", + "httpMethod": "POST", + "apiId": "1234567890", + "protocol": "HTTP/1.1" + } +} diff --git a/test/lambda/mongodb/Makefile b/test/lambda/mongodb/Makefile new file mode 100644 index 0000000000..3632dfb161 --- /dev/null +++ b/test/lambda/mongodb/Makefile @@ -0,0 +1,4 @@ + +build-MongoDBFunction: + cp -r . $(ARTIFACTS_DIR) + python -m pip install -t $(ARTIFACTS_DIR) dnspython diff --git a/test/lambda/mongodb/__init__.py b/test/lambda/mongodb/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/lambda/mongodb/app.py b/test/lambda/mongodb/app.py new file mode 100644 index 0000000000..65e6dc88ff --- /dev/null +++ b/test/lambda/mongodb/app.py @@ -0,0 +1,159 @@ +""" +Lambda function for Python Driver testing + +Creates the client that is cached for all requests, subscribes to +relevant events, and forces the connection pool to get populated. +""" +from __future__ import annotations + +import json +import os + +from bson import has_c as has_bson_c +from pymongo import MongoClient +from pymongo import has_c as has_pymongo_c +from pymongo.monitoring import ( + CommandListener, + ConnectionPoolListener, + ServerHeartbeatListener, +) + +open_connections = 0 +heartbeat_count = 0 +streaming_heartbeat_count = 0 +total_heartbeat_duration = 0 +total_commands = 0 +total_command_duration = 0 + +# Ensure we are using C extensions +assert has_bson_c() +assert has_pymongo_c() + + +class CommandHandler(CommandListener): + def started(self, event): + print("command started", event) + + def succeeded(self, event): + global total_commands, total_command_duration + total_commands += 1 + total_command_duration += event.duration_micros / 1e6 + print("command succeeded", event) + + def failed(self, event): + global total_commands, total_command_duration + total_commands += 1 + total_command_duration += event.duration_micros / 1e6 + print("command failed", event) + + +class ServerHeartbeatHandler(ServerHeartbeatListener): + def started(self, event): + print("server heartbeat started", event) + + def succeeded(self, event): + global heartbeat_count, total_heartbeat_duration, streaming_heartbeat_count + heartbeat_count += 1 + total_heartbeat_duration += event.duration + if event.awaited: + streaming_heartbeat_count += 1 + print("server heartbeat succeeded", event) + + def failed(self, event): + global heartbeat_count, total_heartbeat_duration + heartbeat_count += 1 + total_heartbeat_duration += event.duration + print("server heartbeat failed", event) + + +class ConnectionHandler(ConnectionPoolListener): + def connection_created(self, event): + global open_connections + open_connections += 1 + print("connection created") + + def connection_ready(self, event): + pass + + def connection_closed(self, event): + global open_connections + open_connections -= 1 + print("connection closed") + + def connection_check_out_started(self, event): + pass + + def connection_check_out_failed(self, event): + pass + + def connection_checked_out(self, event): + pass + + def connection_checked_in(self, event): + pass + + def pool_created(self, event): + pass + + def pool_ready(self, event): + pass + + def pool_cleared(self, event): + pass + + def pool_closed(self, event): + pass + + +listeners = [CommandHandler(), ServerHeartbeatHandler(), ConnectionHandler()] +print("Creating client") +client = MongoClient(os.environ["MONGODB_URI"], event_listeners=listeners) + + +# Populate the connection pool. +print("Connecting") +client.lambdaTest.list_collections() +print("Connected") + + +# Create the response to send back. +def create_response(): + return dict( + averageCommandDuration=total_command_duration / total_commands, + averageHeartbeatDuration=total_heartbeat_duration / heartbeat_count + if heartbeat_count + else 0, + openConnections=open_connections, + heartbeatCount=heartbeat_count, + ) + + +# Reset the numbers. +def reset(): + global open_connections, heartbeat_count, total_heartbeat_duration, total_commands, total_command_duration + open_connections = 0 + heartbeat_count = 0 + total_heartbeat_duration = 0 + total_commands = 0 + total_command_duration = 0 + + +def lambda_handler(event, context): + """ + The handler function itself performs an insert/delete and returns the + id of the document in play. + """ + print("initializing") + db = client.lambdaTest + collection = db.test + result = collection.insert_one({"n": 1}) + collection.delete_one({"_id": result.inserted_id}) + # Create the response and then reset the numbers. + response = json.dumps(create_response()) + reset() + print("finished!") + assert ( + streaming_heartbeat_count == 0 + ), f"streaming_heartbeat_count was {streaming_heartbeat_count} not 0" + + return dict(statusCode=200, body=response) diff --git a/test/lambda/run.sh b/test/lambda/run.sh new file mode 100755 index 0000000000..5f1980a5f9 --- /dev/null +++ b/test/lambda/run.sh @@ -0,0 +1,5 @@ +#!/bin/bash +set -o errexit # Exit the script with error if any of the commands fail + +sam build +sam local invoke --docker-network host --parameter-overrides "MongoDbUri=mongodb://host.docker.internal:27017" diff --git a/test/lambda/template.yaml b/test/lambda/template.yaml new file mode 100644 index 0000000000..651ac4a8f8 --- /dev/null +++ b/test/lambda/template.yaml @@ -0,0 +1,49 @@ +AWSTemplateFormatVersion: '2010-09-09' +Transform: AWS::Serverless-2016-10-31 +Description: > + Python driver lambda function test + +# More info about Globals: https://github.com/awslabs/serverless-application-model/blob/master/docs/globals.rst +Globals: + Function: + Timeout: 30 + MemorySize: 128 + +Parameters: + MongoDbUri: + Type: String + Description: The MongoDB connection string. + +Resources: + MongoDBFunction: + Type: AWS::Serverless::Function + Properties: + CodeUri: mongodb/ + Environment: + Variables: + MONGODB_URI: !Ref MongoDbUri + Handler: app.lambda_handler + Runtime: python3.9 + Architectures: + - x86_64 + Events: + MongoDB: + Type: Api + Properties: + Path: /mongodb + Method: get + # Use a custom build method to make sure *.so files are copied. + # https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/building-custom-runtimes.html + Metadata: + BuildMethod: makefile + +Outputs: + MongoDBApi: + Description: "API Gateway endpoint URL for Prod stage for Python driver lambda function" + Value: !Sub "https://${ServerlessRestApi}.execute-api.${AWS::Region}.amazonaws.com/Prod/hello/" + MongoDBFunction: + Description: "Python driver lambda Function ARN" + Value: !GetAtt MongoDBFunction.Arn + MongoDBFunctionIamRole: + Description: "Implicit IAM Role created for Python driver lambda function" + Value: !GetAtt MongoDBFunctionRole.Arn diff --git a/test/load_balancer/cursors.json b/test/load_balancer/cursors.json new file mode 100644 index 0000000000..e66c46c0c3 --- /dev/null +++ b/test/load_balancer/cursors.json @@ -0,0 +1,1238 @@ +{ + "description": "cursors are correctly pinned to connections for load-balanced clusters", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent", + "connectionReadyEvent", + "connectionClosedEvent", + "connectionCheckedOutEvent", + "connectionCheckedInEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + }, + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "coll1" + } + }, + { + "collection": { + "id": "collection2", + "database": "database0", + "collectionName": "coll2" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + }, + { + "collectionName": "coll1", + "databaseName": "database0Name", + "documents": [] + }, + { + "collectionName": "coll2", + "databaseName": "database0Name", + "documents": [] + } + ], + "tests": [ + { + "description": "no connection is pinned if all documents are returned in the initial batch", + "operations": [ + { + "name": "createFindCursor", + "object": "collection0", + "arguments": { + "filter": {} + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": {} + }, + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursor": { + "id": 0, + "firstBatch": { + "$$type": "array" + }, + "ns": { + "$$type": "string" + } + } + }, + "commandName": "find" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connections are returned when the cursor is drained", + "operations": [ + { + "name": "createFindCursor", + "object": "collection0", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 1 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 2 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 3 + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + }, + { + "name": "close", + "object": "cursor0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": {}, + "batchSize": 2 + }, + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursor": { + "id": { + "$$type": "long" + }, + "firstBatch": { + "$$type": "array" + }, + "ns": { + "$$type": "string" + } + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": "long" + }, + "collection": "coll0" + }, + "commandName": "getMore" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursor": { + "id": 0, + "ns": { + "$$type": "string" + }, + "nextBatch": { + "$$type": "array" + } + } + }, + "commandName": "getMore" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connections are returned to the pool when the cursor is closed", + "operations": [ + { + "name": "createFindCursor", + "object": "collection0", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "close", + "object": "cursor0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": {}, + "batchSize": 2 + }, + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursor": { + "id": { + "$$type": "long" + }, + "firstBatch": { + "$$type": "array" + }, + "ns": { + "$$type": "string" + } + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "commandName": "killCursors" + } + }, + { + "commandSucceededEvent": { + "commandName": "killCursors" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connections are returned after an network error during getMore", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "closeConnection": true + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection0", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 1 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 2 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectError": { + "isClientError": true + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + }, + { + "name": "close", + "object": "cursor0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": {}, + "batchSize": 2 + }, + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursor": { + "id": { + "$$type": "long" + }, + "firstBatch": { + "$$type": "array" + }, + "ns": { + "$$type": "string" + } + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": "long" + }, + "collection": "coll0" + }, + "commandName": "getMore" + } + }, + { + "commandFailedEvent": { + "commandName": "getMore" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionClosedEvent": { + "reason": "error" + } + } + ] + } + ] + }, + { + "description": "pinned connections are returned after a network error during a killCursors request", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "killCursors" + ], + "closeConnection": true + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection0", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "close", + "object": "cursor0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": {}, + "batchSize": 2 + }, + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursor": { + "id": { + "$$type": "long" + }, + "firstBatch": { + "$$type": "array" + }, + "ns": { + "$$type": "string" + } + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "commandName": "killCursors" + } + }, + { + "commandFailedEvent": { + "commandName": "killCursors" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionClosedEvent": { + "reason": "error" + } + } + ] + } + ] + }, + { + "description": "pinned connections are returned to the pool after a non-network error on getMore", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 7 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection0", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 1 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 2 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectError": { + "errorCode": 7 + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + }, + { + "name": "close", + "object": "cursor0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": {}, + "batchSize": 2 + }, + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursor": { + "id": { + "$$type": "long" + }, + "firstBatch": { + "$$type": "array" + }, + "ns": { + "$$type": "string" + } + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": "long" + }, + "collection": "coll0" + }, + "commandName": "getMore" + } + }, + { + "commandFailedEvent": { + "commandName": "getMore" + } + }, + { + "commandStartedEvent": { + "commandName": "killCursors" + } + }, + { + "commandSucceededEvent": { + "commandName": "killCursors" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "aggregate pins the cursor to a connection", + "operations": [ + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [], + "batchSize": 2 + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "cursor": { + "batchSize": 2 + } + }, + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": "long" + }, + "collection": "coll0" + }, + "commandName": "getMore" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursor": { + "id": 0, + "ns": { + "$$type": "string" + }, + "nextBatch": { + "$$type": "array" + } + } + }, + "commandName": "getMore" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "listCollections pins the cursor to a connection", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "listCollections", + "object": "database0", + "arguments": { + "filter": {}, + "batchSize": 2 + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "cursor": { + "batchSize": 2 + } + }, + "commandName": "listCollections", + "databaseName": "database0Name" + } + }, + { + "commandSucceededEvent": { + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": "long" + }, + "collection": { + "$$type": "string" + } + }, + "commandName": "getMore" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursor": { + "id": 0, + "ns": { + "$$type": "string" + }, + "nextBatch": { + "$$type": "array" + } + } + }, + "commandName": "getMore" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "listIndexes pins the cursor to a connection", + "operations": [ + { + "name": "createIndex", + "object": "collection0", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + }, + { + "name": "createIndex", + "object": "collection0", + "arguments": { + "keys": { + "y": 1 + }, + "name": "y_1" + } + }, + { + "name": "listIndexes", + "object": "collection0", + "arguments": { + "batchSize": 2 + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "createIndexes": "coll0", + "indexes": [ + { + "name": "x_1", + "key": { + "x": 1 + } + } + ] + }, + "commandName": "createIndexes" + } + }, + { + "commandSucceededEvent": { + "commandName": "createIndexes" + } + }, + { + "commandStartedEvent": { + "command": { + "createIndexes": "coll0", + "indexes": [ + { + "name": "y_1", + "key": { + "y": 1 + } + } + ] + }, + "commandName": "createIndexes" + } + }, + { + "commandSucceededEvent": { + "commandName": "createIndexes" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll0", + "cursor": { + "batchSize": 2 + } + }, + "commandName": "listIndexes", + "databaseName": "database0Name" + } + }, + { + "commandSucceededEvent": { + "commandName": "listIndexes" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": "long" + }, + "collection": "coll0" + }, + "commandName": "getMore" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursor": { + "id": 0, + "ns": { + "$$type": "string" + }, + "nextBatch": { + "$$type": "array" + } + } + }, + "commandName": "getMore" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "change streams pin to a connection", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "close", + "object": "changeStream0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + }, + { + "commandStartedEvent": { + "commandName": "killCursors" + } + }, + { + "commandSucceededEvent": { + "commandName": "killCursors" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/load_balancer/event-monitoring.json b/test/load_balancer/event-monitoring.json new file mode 100644 index 0000000000..938c70bf38 --- /dev/null +++ b/test/load_balancer/event-monitoring.json @@ -0,0 +1,184 @@ +{ + "description": "monitoring events include correct fields", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent", + "poolClearedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "databaseName": "database0", + "collectionName": "coll0", + "documents": [] + } + ], + "tests": [ + { + "description": "command started and succeeded events include serviceId", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "hasServiceId": true + } + }, + { + "commandSucceededEvent": { + "commandName": "insert", + "hasServiceId": true + } + } + ] + } + ] + }, + { + "description": "command failed events include serviceId", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "$or": true + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "hasServiceId": true + } + }, + { + "commandFailedEvent": { + "commandName": "find", + "hasServiceId": true + } + } + ] + } + ] + }, + { + "description": "poolClearedEvent events include serviceId", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "hasServiceId": true + } + }, + { + "commandFailedEvent": { + "commandName": "find", + "hasServiceId": true + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "poolClearedEvent": { + "hasServiceId": true + } + } + ] + } + ] + } + ] +} diff --git a/test/load_balancer/lb-connection-establishment.json b/test/load_balancer/lb-connection-establishment.json new file mode 100644 index 0000000000..0eaadf30c2 --- /dev/null +++ b/test/load_balancer/lb-connection-establishment.json @@ -0,0 +1,58 @@ +{ + "description": "connection establishment for load-balanced clusters", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "uriOptions": { + "loadBalanced": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + } + ], + "tests": [ + { + "description": "operations against load balancers fail if URI contains loadBalanced=false", + "skipReason": "servers have not implemented LB support yet so they will not fail the connection handshake in this case", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + } + ] +} diff --git a/test/load_balancer/non-lb-connection-establishment.json b/test/load_balancer/non-lb-connection-establishment.json new file mode 100644 index 0000000000..6aaa7bdf98 --- /dev/null +++ b/test/load_balancer/non-lb-connection-establishment.json @@ -0,0 +1,92 @@ +{ + "description": "connection establishment if loadBalanced is specified for non-load balanced clusters", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "topologies": [ + "single", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "lbTrueClient", + "useMultipleMongoses": false, + "uriOptions": { + "loadBalanced": true + } + } + }, + { + "database": { + "id": "lbTrueDatabase", + "client": "lbTrueClient", + "databaseName": "lbTrueDb" + } + }, + { + "client": { + "id": "lbFalseClient", + "uriOptions": { + "loadBalanced": false + } + } + }, + { + "database": { + "id": "lbFalseDatabase", + "client": "lbFalseClient", + "databaseName": "lbFalseDb" + } + } + ], + "_yamlAnchors": { + "runCommandArguments": [ + { + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + } + } + ] + }, + "tests": [ + { + "description": "operations against non-load balanced clusters fail if URI contains loadBalanced=true", + "operations": [ + { + "name": "runCommand", + "object": "lbTrueDatabase", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "errorContains": "Driver attempted to initialize in load balancing mode, but the server does not support this mode" + } + } + ] + }, + { + "description": "operations against non-load balanced clusters succeed if URI contains loadBalanced=false", + "operations": [ + { + "name": "runCommand", + "object": "lbFalseDatabase", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + } + } + ] + } + ] +} diff --git a/test/load_balancer/sdam-error-handling.json b/test/load_balancer/sdam-error-handling.json new file mode 100644 index 0000000000..8760b723fd --- /dev/null +++ b/test/load_balancer/sdam-error-handling.json @@ -0,0 +1,515 @@ +{ + "description": "state change errors are correctly handled", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "_yamlAnchors": { + "observedEvents": [ + "connectionCreatedEvent", + "connectionReadyEvent", + "connectionCheckedOutEvent", + "connectionCheckOutFailedEvent", + "connectionCheckedInEvent", + "connectionClosedEvent", + "poolClearedEvent" + ] + }, + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "singleClient", + "useMultipleMongoses": false, + "uriOptions": { + "appname": "lbSDAMErrorTestClient", + "retryWrites": false + }, + "observeEvents": [ + "connectionCreatedEvent", + "connectionReadyEvent", + "connectionCheckedOutEvent", + "connectionCheckOutFailedEvent", + "connectionCheckedInEvent", + "connectionClosedEvent", + "poolClearedEvent" + ] + } + }, + { + "database": { + "id": "singleDB", + "client": "singleClient", + "databaseName": "singleDB" + } + }, + { + "collection": { + "id": "singleColl", + "database": "singleDB", + "collectionName": "singleColl" + } + }, + { + "client": { + "id": "multiClient", + "useMultipleMongoses": true, + "uriOptions": { + "retryWrites": false + }, + "observeEvents": [ + "connectionCreatedEvent", + "connectionReadyEvent", + "connectionCheckedOutEvent", + "connectionCheckOutFailedEvent", + "connectionCheckedInEvent", + "connectionClosedEvent", + "poolClearedEvent" + ] + } + }, + { + "database": { + "id": "multiDB", + "client": "multiClient", + "databaseName": "multiDB" + } + }, + { + "collection": { + "id": "multiColl", + "database": "multiDB", + "collectionName": "multiColl" + } + } + ], + "initialData": [ + { + "collectionName": "singleColl", + "databaseName": "singleDB", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + }, + { + "collectionName": "multiColl", + "databaseName": "multiDB", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ], + "tests": [ + { + "description": "only connections for a specific serviceId are closed when pools are cleared", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "createFindCursor", + "object": "multiColl", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "createFindCursor", + "object": "multiColl", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor1" + }, + { + "name": "close", + "object": "cursor0" + }, + { + "name": "close", + "object": "cursor1" + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "multiClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 11600 + } + } + } + }, + { + "name": "insertOne", + "object": "multiColl", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "errorCode": 11600 + } + }, + { + "name": "insertOne", + "object": "multiColl", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "multiClient", + "eventType": "cmap", + "events": [ + { + "connectionCreatedEvent": {} + }, + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCreatedEvent": {} + }, + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "poolClearedEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionClosedEvent": { + "reason": "stale" + } + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "errors during the initial connection hello are ignored", + "runOnRequirements": [ + { + "minServerVersion": "4.9" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ismaster", + "isMaster", + "hello" + ], + "closeConnection": true, + "appName": "lbSDAMErrorTestClient" + } + } + } + }, + { + "name": "insertOne", + "object": "singleColl", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "singleClient", + "eventType": "cmap", + "events": [ + { + "connectionCreatedEvent": {} + }, + { + "connectionClosedEvent": { + "reason": "error" + } + }, + { + "connectionCheckOutFailedEvent": { + "reason": "connectionError" + } + } + ] + } + ] + }, + { + "description": "errors during authentication are processed", + "runOnRequirements": [ + { + "auth": true + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "closeConnection": true, + "appName": "lbSDAMErrorTestClient" + } + } + } + }, + { + "name": "insertOne", + "object": "singleColl", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "singleClient", + "eventType": "cmap", + "events": [ + { + "connectionCreatedEvent": {} + }, + { + "connectionClosedEvent": { + "reason": "error" + } + }, + { + "connectionCheckOutFailedEvent": { + "reason": "connectionError" + } + }, + { + "poolClearedEvent": {} + } + ] + } + ] + }, + { + "description": "stale errors are ignored", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "getMore" + ], + "closeConnection": true + } + } + } + }, + { + "name": "createFindCursor", + "object": "singleColl", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "createFindCursor", + "object": "singleColl", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor1" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectError": { + "isClientError": true + } + }, + { + "name": "close", + "object": "cursor0" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor1" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor1" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor1", + "expectError": { + "isClientError": true + } + }, + { + "name": "close", + "object": "cursor1" + } + ], + "expectEvents": [ + { + "client": "singleClient", + "eventType": "cmap", + "events": [ + { + "connectionCreatedEvent": {} + }, + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCreatedEvent": {} + }, + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "poolClearedEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionClosedEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionClosedEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/load_balancer/server-selection.json b/test/load_balancer/server-selection.json new file mode 100644 index 0000000000..00c7e4c95b --- /dev/null +++ b/test/load_balancer/server-selection.json @@ -0,0 +1,82 @@ +{ + "description": "server selection for load-balanced clusters", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "readPreference": { + "mode": "secondaryPreferred" + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [] + } + ], + "tests": [ + { + "description": "$readPreference is sent for load-balanced clusters", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": {}, + "$readPreference": { + "mode": "secondaryPreferred" + } + }, + "commandName": "find", + "databaseName": "database0Name" + } + } + ] + } + ] + } + ] +} diff --git a/test/load_balancer/transactions.json b/test/load_balancer/transactions.json new file mode 100644 index 0000000000..8cf24f4ca4 --- /dev/null +++ b/test/load_balancer/transactions.json @@ -0,0 +1,1621 @@ +{ + "description": "transactions are correctly pinned to connections for load-balanced clusters", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent", + "connectionReadyEvent", + "connectionClosedEvent", + "connectionCheckedOutEvent", + "connectionCheckedInEvent" + ] + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ], + "_yamlAnchors": { + "documents": [ + { + "_id": 4 + } + ] + }, + "tests": [ + { + "description": "sessions are reused in LB mode", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "assertSameLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ] + }, + { + "description": "all operations go to the same mongos", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "commitTransaction", + "object": "session0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + } + ] + } + ] + }, + { + "description": "transaction can be committed multiple times", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connection is not released after a non-transient CRUD error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 51 + } + } + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + }, + "expectError": { + "errorCode": 51, + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connection is not released after a non-transient commit error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 51 + } + } + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "commitTransaction", + "object": "session0", + "expectError": { + "errorCode": 51, + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connection is released after a non-transient abort error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 51 + } + } + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "abortTransaction", + "object": "session0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connection is released after a transient non-network CRUD error", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 24 + } + } + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + }, + "expectError": { + "errorCode": 24, + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + }, + { + "name": "abortTransaction", + "object": "session0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connection is released after a transient network CRUD error", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + }, + "expectError": { + "isClientError": true, + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + }, + { + "name": "abortTransaction", + "object": "session0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionClosedEvent": { + "reason": "error" + } + }, + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connection is released after a transient non-network commit error", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 24 + } + } + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "commitTransaction", + "object": "session0", + "expectError": { + "errorCode": 24, + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connection is released after a transient network commit error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "closeConnection": true + } + } + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "commitTransaction", + "object": "session0", + "ignoreResultAndError": true + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionClosedEvent": { + "reason": "error" + } + }, + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connection is released after a transient non-network abort error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 24 + } + } + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "abortTransaction", + "object": "session0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connection is released after a transient network abort error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "closeConnection": true + } + } + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "abortTransaction", + "object": "session0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionClosedEvent": { + "reason": "error" + } + }, + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connection is released on successful abort", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "abortTransaction", + "object": "session0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connection is returned when a new transaction is started", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "commitTransaction", + "object": "session0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connection is returned when a non-transaction operation uses the session", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "a connection can be shared by a transaction and a cursor", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "createFindCursor", + "object": "collection0", + "arguments": { + "filter": {}, + "batchSize": 2, + "session": "session0" + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "close", + "object": "cursor0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "abortTransaction", + "object": "session0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "commandName": "killCursors" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/load_balancer/wait-queue-timeouts.json b/test/load_balancer/wait-queue-timeouts.json new file mode 100644 index 0000000000..3dc6e46cff --- /dev/null +++ b/test/load_balancer/wait-queue-timeouts.json @@ -0,0 +1,153 @@ +{ + "description": "wait queue timeout errors include details about checked out connections", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "uriOptions": { + "maxPoolSize": 1, + "waitQueueTimeoutMS": 50 + }, + "observeEvents": [ + "connectionCheckedOutEvent", + "connectionCheckOutFailedEvent" + ] + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ], + "tests": [ + { + "description": "wait queue timeout errors include cursor statistics", + "operations": [ + { + "name": "createFindCursor", + "object": "collection0", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "isClientError": true, + "errorContains": "maxPoolSize: 1, connections in use by cursors: 1, connections in use by transactions: 0, connections in use by other operations: 0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckOutFailedEvent": {} + } + ] + } + ] + }, + { + "description": "wait queue timeout errors include transaction statistics", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "isClientError": true, + "errorContains": "maxPoolSize: 1, connections in use by cursors: 0, connections in use by transactions: 1, connections in use by other operations: 0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckOutFailedEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/max_staleness/ReplicaSetNoPrimary/DefaultNoMaxStaleness.json b/test/max_staleness/ReplicaSetNoPrimary/DefaultNoMaxStaleness.json index bf15fe7345..5afebbbdcb 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/DefaultNoMaxStaleness.json +++ b/test/max_staleness/ReplicaSetNoPrimary/DefaultNoMaxStaleness.json @@ -1,74 +1,74 @@ { - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } } - ], - "read_preference": { - "mode": "Nearest" + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "mode": "Nearest" + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + } }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1000001" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1000001" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" + } } + ] } diff --git a/test/max_staleness/ReplicaSetNoPrimary/Incompatible.json b/test/max_staleness/ReplicaSetNoPrimary/Incompatible.json deleted file mode 100644 index f0eceefc21..0000000000 --- a/test/max_staleness/ReplicaSetNoPrimary/Incompatible.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "error": true, - "read_preference": { - "maxStalenessSeconds": 120, - "mode": "Nearest" - }, - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 4, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" - } -} diff --git a/test/max_staleness/ReplicaSetNoPrimary/LastUpdateTime.json b/test/max_staleness/ReplicaSetNoPrimary/LastUpdateTime.json index 24a5c21cab..492d8a2f62 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/LastUpdateTime.json +++ b/test/max_staleness/ReplicaSetNoPrimary/LastUpdateTime.json @@ -1,88 +1,88 @@ { - "heartbeatFrequencyMS": 25000, - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 1, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 1, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 6 + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 25002, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 6 + }, + { + "address": "c:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 25001, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "maxWireVersion": 6 + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 150 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 1, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" } - ], - "read_preference": { - "maxStalenessSeconds": 150, - "mode": "Nearest" + }, + "maxWireVersion": 6 }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 1, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 25002, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 25002, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 6 + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 1, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 1, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 25002, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 25001, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" + }, + "maxWireVersion": 6 } + ] } diff --git a/test/max_staleness/ReplicaSetNoPrimary/MaxStalenessTooSmall.json b/test/max_staleness/ReplicaSetNoPrimary/MaxStalenessTooSmall.json new file mode 100644 index 0000000000..28e5e2aa4a --- /dev/null +++ b/test/max_staleness/ReplicaSetNoPrimary/MaxStalenessTooSmall.json @@ -0,0 +1,20 @@ +{ + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "Unknown" + }, + { + "address": "b:27017", + "type": "Unknown" + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 1 + }, + "error": true +} diff --git a/test/max_staleness/ReplicaSetNoPrimary/Nearest.json b/test/max_staleness/ReplicaSetNoPrimary/Nearest.json index 8f047ee901..6602561c1d 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/Nearest.json +++ b/test/max_staleness/ReplicaSetNoPrimary/Nearest.json @@ -1,88 +1,88 @@ { - "heartbeatFrequencyMS": 25000, - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 6 + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 6 + }, + { + "address": "c:27017", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "type": "RSSecondary", + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "maxWireVersion": 6 + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 150 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" } - ], - "read_preference": { - "maxStalenessSeconds": 150, - "mode": "Nearest" + }, + "maxWireVersion": 6 }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 6 + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" + }, + "maxWireVersion": 6 } + ] } diff --git a/test/max_staleness/ReplicaSetNoPrimary/Nearest2.json b/test/max_staleness/ReplicaSetNoPrimary/Nearest2.json index 18314cb6c7..16d9a673bd 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/Nearest2.json +++ b/test/max_staleness/ReplicaSetNoPrimary/Nearest2.json @@ -1,88 +1,88 @@ { - "heartbeatFrequencyMS": 25000, - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 6 + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 6 + }, + { + "address": "c:27017", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "type": "RSSecondary", + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "maxWireVersion": 6 + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 150 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" } - ], - "read_preference": { - "maxStalenessSeconds": 150, - "mode": "Nearest" + }, + "maxWireVersion": 6 }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 6 + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" + }, + "maxWireVersion": 6 } + ] } diff --git a/test/max_staleness/ReplicaSetNoPrimary/NoKnownServers.json b/test/max_staleness/ReplicaSetNoPrimary/NoKnownServers.json index 847f2874c1..5905fcbc60 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/NoKnownServers.json +++ b/test/max_staleness/ReplicaSetNoPrimary/NoKnownServers.json @@ -1,20 +1,21 @@ { - "error": true, - "read_preference": { - "maxStalenessSeconds": 1, - "mode": "Nearest" - }, - "topology_description": { - "servers": [ - { - "address": "a:27017", - "type": "Unknown" - }, - { - "address": "b:27017", - "type": "Unknown" - } - ], - "type": "ReplicaSetNoPrimary" - } + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "Unknown" + }, + { + "address": "b:27017", + "type": "Unknown" + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 90 + }, + "suitable_servers": [], + "in_latency_window": [] } diff --git a/test/max_staleness/ReplicaSetNoPrimary/OneKnownTwoUnavailable.json b/test/max_staleness/ReplicaSetNoPrimary/OneKnownTwoUnavailable.json new file mode 100644 index 0000000000..54f318872f --- /dev/null +++ b/test/max_staleness/ReplicaSetNoPrimary/OneKnownTwoUnavailable.json @@ -0,0 +1,60 @@ +{ + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "PossiblePrimary", + "avg_rtt_ms": 5, + "maxWireVersion": 0 + }, + { + "address": "b:27017", + "type": "Unknown", + "avg_rtt_ms": 5, + "maxWireVersion": 0 + }, + { + "address": "c:27017", + "type": "RSSecondary", + "maxWireVersion": 6, + "avg_rtt_ms": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 120 + }, + "suitable_servers": [ + { + "address": "c:27017", + "type": "RSSecondary", + "maxWireVersion": 6, + "avg_rtt_ms": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ], + "in_latency_window": [ + { + "address": "c:27017", + "type": "RSSecondary", + "maxWireVersion": 6, + "avg_rtt_ms": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] +} diff --git a/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred.json b/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred.json index 72fff11454..7956b8e516 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred.json +++ b/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred.json @@ -1,64 +1,64 @@ { - "heartbeatFrequencyMS": 25000, - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1000001" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } } - ], - "read_preference": { - "maxStalenessSeconds": 90, - "mode": "PrimaryPreferred" - }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1000001" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1000001" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" + } + ] + }, + "read_preference": { + "mode": "PrimaryPreferred", + "maxStalenessSeconds": 90 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + } } + ] } diff --git a/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred_tags.json b/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred_tags.json index 0f6865624d..453dce6605 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred_tags.json +++ b/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred_tags.json @@ -1,84 +1,84 @@ { - "heartbeatFrequencyMS": 25000, - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "tokyo" - }, - "type": "RSSecondary" + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 6, + "tags": { + "data_center": "tokyo" } - ], - "read_preference": { - "maxStalenessSeconds": 150, - "mode": "PrimaryPreferred", - "tag_sets": [ - { - "data_center": "nyc" - }, - { - "data_center": "tokyo" - } - ] - }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "tokyo" - }, - "type": "RSSecondary" + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "maxWireVersion": 6, + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "tokyo" - }, - "type": "RSSecondary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" + } + ] + }, + "read_preference": { + "mode": "PrimaryPreferred", + "maxStalenessSeconds": 150, + "tag_sets": [ + { + "data_center": "nyc" + }, + { + "data_center": "tokyo" + } + ] + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 6, + "tags": { + "data_center": "tokyo" + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 6, + "tags": { + "data_center": "tokyo" + } } + ] } diff --git a/test/max_staleness/ReplicaSetNoPrimary/Secondary.json b/test/max_staleness/ReplicaSetNoPrimary/Secondary.json index 2ea70b1629..b383f275dc 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/Secondary.json +++ b/test/max_staleness/ReplicaSetNoPrimary/Secondary.json @@ -1,111 +1,111 @@ { - "heartbeatFrequencyMS": 25000, - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "tags": { + "data_center": "tokyo" } - ], - "read_preference": { - "maxStalenessSeconds": 150, - "mode": "Secondary", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "tokyo" - }, - "type": "RSSecondary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "d:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "tokyo" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" + }, + { + "address": "c:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "tags": { + "data_center": "nyc" + } + }, + { + "address": "d:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "tokyo" + } + } + ] + }, + "read_preference": { + "mode": "Secondary", + "maxStalenessSeconds": 150, + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "nyc" + } } + ] } diff --git a/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred.json b/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred.json index db45042330..7bce7d0aa4 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred.json +++ b/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred.json @@ -1,63 +1,63 @@ { - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1000001" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } } - ], - "read_preference": { - "maxStalenessSeconds": 120, - "mode": "SecondaryPreferred" - }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1000001" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1000001" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" + } + ] + }, + "read_preference": { + "mode": "SecondaryPreferred", + "maxStalenessSeconds": 120 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + } } + ] } diff --git a/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred_tags.json b/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred_tags.json index ab4e5a3b23..32c9ca770b 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred_tags.json +++ b/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred_tags.json @@ -1,111 +1,111 @@ { - "heartbeatFrequencyMS": 25000, - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "tags": { + "data_center": "tokyo" } - ], - "read_preference": { - "maxStalenessSeconds": 150, - "mode": "SecondaryPreferred", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "tokyo" - }, - "type": "RSSecondary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "d:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "tokyo" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" + }, + { + "address": "c:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "tags": { + "data_center": "nyc" + } + }, + { + "address": "d:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "tokyo" + } + } + ] + }, + "read_preference": { + "mode": "SecondaryPreferred", + "maxStalenessSeconds": 150, + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "nyc" + } } + ] } diff --git a/test/max_staleness/ReplicaSetNoPrimary/ZeroMaxStaleness.json b/test/max_staleness/ReplicaSetNoPrimary/ZeroMaxStaleness.json index 23da14cad2..fd84cd1193 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/ZeroMaxStaleness.json +++ b/test/max_staleness/ReplicaSetNoPrimary/ZeroMaxStaleness.json @@ -1,36 +1,36 @@ { - "error": true, - "read_preference": { - "maxStalenessSeconds": 0, - "mode": "Nearest" - }, - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 4, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" - } + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + } + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 0 + }, + "error": true } diff --git a/test/max_staleness/ReplicaSetWithPrimary/DefaultNoMaxStaleness.json b/test/max_staleness/ReplicaSetWithPrimary/DefaultNoMaxStaleness.json index d8418c139e..35eaa9d69d 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/DefaultNoMaxStaleness.json +++ b/test/max_staleness/ReplicaSetWithPrimary/DefaultNoMaxStaleness.json @@ -1,74 +1,74 @@ { - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } } - ], - "read_preference": { - "mode": "Nearest" + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "mode": "Nearest" + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + } }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1000001" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1000001" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetWithPrimary" + } } + ] } diff --git a/test/max_staleness/ReplicaSetWithPrimary/Incompatible.json b/test/max_staleness/ReplicaSetWithPrimary/Incompatible.json deleted file mode 100644 index ec65af9cf8..0000000000 --- a/test/max_staleness/ReplicaSetWithPrimary/Incompatible.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "error": true, - "read_preference": { - "maxStalenessSeconds": 120, - "mode": "Nearest" - }, - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 4, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetWithPrimary" - } -} diff --git a/test/max_staleness/ReplicaSetWithPrimary/LastUpdateTime.json b/test/max_staleness/ReplicaSetWithPrimary/LastUpdateTime.json index 89725d9000..18450beaed 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/LastUpdateTime.json +++ b/test/max_staleness/ReplicaSetWithPrimary/LastUpdateTime.json @@ -1,88 +1,88 @@ { - "heartbeatFrequencyMS": 25000, - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 125001, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 50, + "lastUpdateTime": 1, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 6 + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 125001, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 6 + }, + { + "address": "c:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 125001, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "maxWireVersion": 6 + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 150 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 50, + "lastUpdateTime": 1, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" } - ], - "read_preference": { - "maxStalenessSeconds": 150, - "mode": "Nearest" + }, + "maxWireVersion": 6 }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 1, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 125001, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 125001, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 6 + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 125001, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 1, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 125001, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 125001, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetWithPrimary" + }, + "maxWireVersion": 6 } + ] } diff --git a/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat.json b/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat.json index 3ac92f341a..b9fb407f9e 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat.json +++ b/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat.json @@ -1,76 +1,76 @@ { - "heartbeatFrequencyMS": 120000, - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" + "heartbeatFrequencyMS": 120000, + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } } - ], - "read_preference": { - "maxStalenessSeconds": 130, - "mode": "Nearest" + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 130 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetWithPrimary" + } } + ] } diff --git a/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat2.json b/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat2.json index 2ed9f75f9e..b695e1caeb 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat2.json +++ b/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat2.json @@ -1,37 +1,37 @@ { - "error": true, - "heartbeatFrequencyMS": 120000, - "read_preference": { - "maxStalenessSeconds": 129, - "mode": "Nearest" - }, - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetWithPrimary" - } + "heartbeatFrequencyMS": 120000, + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 129 + }, + "error": true } diff --git a/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessTooSmall.json b/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessTooSmall.json index 049cd7c4cd..9b798d37da 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessTooSmall.json +++ b/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessTooSmall.json @@ -1,37 +1,37 @@ { - "error": true, - "heartbeatFrequencyMS": 500, - "read_preference": { - "maxStalenessSeconds": 89, - "mode": "Nearest" - }, - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetWithPrimary" - } + "heartbeatFrequencyMS": 500, + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 89 + }, + "error": true } diff --git a/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessWithModePrimary.json b/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessWithModePrimary.json index 54155d40fa..1fa7bb4dd0 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessWithModePrimary.json +++ b/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessWithModePrimary.json @@ -1,35 +1,35 @@ { - "error": true, - "read_preference": { - "maxStalenessSeconds": 120 - }, - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetWithPrimary" - } + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "maxStalenessSeconds": 120 + }, + "error": true } diff --git a/test/max_staleness/ReplicaSetWithPrimary/Nearest.json b/test/max_staleness/ReplicaSetWithPrimary/Nearest.json index ea8ba031d6..198be4a681 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/Nearest.json +++ b/test/max_staleness/ReplicaSetWithPrimary/Nearest.json @@ -1,88 +1,88 @@ { - "heartbeatFrequencyMS": 25000, - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 6 + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 6 + }, + { + "address": "c:27017", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "type": "RSSecondary", + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "maxWireVersion": 6 + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 150 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" } - ], - "read_preference": { - "maxStalenessSeconds": 150, - "mode": "Nearest" + }, + "maxWireVersion": 6 }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 6 + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetWithPrimary" + }, + "maxWireVersion": 6 } + ] } diff --git a/test/max_staleness/ReplicaSetWithPrimary/Nearest2.json b/test/max_staleness/ReplicaSetWithPrimary/Nearest2.json index 86bf3b988c..3ae629c898 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/Nearest2.json +++ b/test/max_staleness/ReplicaSetWithPrimary/Nearest2.json @@ -1,88 +1,88 @@ { - "heartbeatFrequencyMS": 25000, - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 6 + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 6 + }, + { + "address": "c:27017", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "type": "RSSecondary", + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "maxWireVersion": 6 + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 150 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" } - ], - "read_preference": { - "maxStalenessSeconds": 150, - "mode": "Nearest" + }, + "maxWireVersion": 6 }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 6 + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetWithPrimary" + }, + "maxWireVersion": 6 } + ] } diff --git a/test/max_staleness/ReplicaSetWithPrimary/Nearest_tags.json b/test/max_staleness/ReplicaSetWithPrimary/Nearest_tags.json index e147a57f67..675df82631 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/Nearest_tags.json +++ b/test/max_staleness/ReplicaSetWithPrimary/Nearest_tags.json @@ -1,84 +1,84 @@ { - "heartbeatFrequencyMS": 25000, - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "tokyo" - }, - "type": "RSPrimary" + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 6, + "tags": { + "data_center": "tokyo" } - ], - "read_preference": { - "maxStalenessSeconds": 150, - "mode": "Nearest", - "tag_sets": [ - { - "data_center": "nyc" - }, - { - "data_center": "tokyo" - } - ] - }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "tokyo" - }, - "type": "RSPrimary" + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "maxWireVersion": 6, + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "tokyo" - }, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetWithPrimary" + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 150, + "tag_sets": [ + { + "data_center": "nyc" + }, + { + "data_center": "tokyo" + } + ] + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 6, + "tags": { + "data_center": "tokyo" + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 6, + "tags": { + "data_center": "tokyo" + } } + ] } diff --git a/test/max_staleness/ReplicaSetWithPrimary/PrimaryPreferred.json b/test/max_staleness/ReplicaSetWithPrimary/PrimaryPreferred.json index b7dd197031..795b47a111 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/PrimaryPreferred.json +++ b/test/max_staleness/ReplicaSetWithPrimary/PrimaryPreferred.json @@ -1,64 +1,64 @@ { - "heartbeatFrequencyMS": 25000, - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } } - ], - "read_preference": { - "maxStalenessSeconds": 150, - "mode": "PrimaryPreferred" - }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetWithPrimary" + } + ] + }, + "read_preference": { + "mode": "PrimaryPreferred", + "maxStalenessSeconds": 150 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } } + ] } diff --git a/test/max_staleness/ReplicaSetWithPrimary/PrimaryPreferred_incompatible.json b/test/max_staleness/ReplicaSetWithPrimary/PrimaryPreferred_incompatible.json deleted file mode 100644 index 77ba55276b..0000000000 --- a/test/max_staleness/ReplicaSetWithPrimary/PrimaryPreferred_incompatible.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "error": true, - "read_preference": { - "maxStalenessSeconds": 150, - "mode": "PrimaryPreferred" - }, - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 4, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetWithPrimary" - } -} diff --git a/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred.json b/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred.json index 01a9aea7e9..5455708a70 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred.json +++ b/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred.json @@ -1,63 +1,63 @@ { - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1000001" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } } - ], - "read_preference": { - "maxStalenessSeconds": 120, - "mode": "SecondaryPreferred" - }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1000001" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1000001" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetWithPrimary" + } + ] + }, + "read_preference": { + "mode": "SecondaryPreferred", + "maxStalenessSeconds": 120 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + } } + ] } diff --git a/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags.json b/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags.json index 5ca04f4355..6670b54c89 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags.json +++ b/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags.json @@ -1,138 +1,138 @@ { - "heartbeatFrequencyMS": 25000, - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } } - ], - "read_preference": { - "maxStalenessSeconds": 150, - "mode": "SecondaryPreferred", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 1, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + }, + "tags": { + "data_center": "nyc" + } + }, + { + "address": "d:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "tags": { + "data_center": "nyc" + } + }, + { + "address": "e:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } }, - { - "address": "c:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 1, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1000001" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + "tags": { + "data_center": "tokyo" + } + } + ] + }, + "read_preference": { + "mode": "SecondaryPreferred", + "maxStalenessSeconds": 150, + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 1, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + }, + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 1, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1000001" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "d:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "e:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "tokyo" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetWithPrimary" + }, + "tags": { + "data_center": "nyc" + } } + ] } diff --git a/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags2.json b/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags2.json index f512ff2609..642fee1fb3 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags2.json +++ b/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags2.json @@ -1,96 +1,96 @@ { - "heartbeatFrequencyMS": 25000, - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "tokyo" - }, - "type": "RSSecondary" + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 6 + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 6, + "tags": { + "data_center": "tokyo" } - ], - "read_preference": { - "maxStalenessSeconds": 150, - "mode": "SecondaryPreferred", - "tag_sets": [ - { - "data_center": "nyc" - }, - { - "data_center": "tokyo" - } - ] - }, - "suitable_servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "tokyo" - }, - "type": "RSSecondary" + }, + { + "address": "c:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "maxWireVersion": 6, + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "tokyo" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetWithPrimary" + } + ] + }, + "read_preference": { + "mode": "SecondaryPreferred", + "maxStalenessSeconds": 150, + "tag_sets": [ + { + "data_center": "nyc" + }, + { + "data_center": "tokyo" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 6, + "tags": { + "data_center": "tokyo" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 6, + "tags": { + "data_center": "tokyo" + } } + ] } diff --git a/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags.json b/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags.json index 7df8eac73c..502120dce6 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags.json +++ b/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags.json @@ -1,138 +1,138 @@ { - "heartbeatFrequencyMS": 25000, - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } } - ], - "read_preference": { - "maxStalenessSeconds": 150, - "mode": "Secondary", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 1, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + }, + "tags": { + "data_center": "nyc" + } + }, + { + "address": "d:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "tags": { + "data_center": "nyc" + } + }, + { + "address": "e:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } }, - { - "address": "c:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 1, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1000001" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + "tags": { + "data_center": "tokyo" + } + } + ] + }, + "read_preference": { + "mode": "Secondary", + "maxStalenessSeconds": 150, + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 1, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + }, + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 1, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1000001" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "d:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "e:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "tokyo" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetWithPrimary" + }, + "tags": { + "data_center": "nyc" + } } + ] } diff --git a/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags2.json b/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags2.json index d74234c6b1..6978a1807b 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags2.json +++ b/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags2.json @@ -1,96 +1,96 @@ { - "heartbeatFrequencyMS": 25000, - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "tokyo" - }, - "type": "RSSecondary" + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 6 + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 6, + "tags": { + "data_center": "tokyo" } - ], - "read_preference": { - "maxStalenessSeconds": 150, - "mode": "Secondary", - "tag_sets": [ - { - "data_center": "nyc" - }, - { - "data_center": "tokyo" - } - ] - }, - "suitable_servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "tokyo" - }, - "type": "RSSecondary" + }, + { + "address": "c:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "maxWireVersion": 6, + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "tokyo" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetWithPrimary" + } + ] + }, + "read_preference": { + "mode": "Secondary", + "maxStalenessSeconds": 150, + "tag_sets": [ + { + "data_center": "nyc" + }, + { + "data_center": "tokyo" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 6, + "tags": { + "data_center": "tokyo" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 6, + "tags": { + "data_center": "tokyo" + } } + ] } diff --git a/test/max_staleness/ReplicaSetWithPrimary/ZeroMaxStaleness.json b/test/max_staleness/ReplicaSetWithPrimary/ZeroMaxStaleness.json index 126c19a19e..e1e4a7ffb7 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/ZeroMaxStaleness.json +++ b/test/max_staleness/ReplicaSetWithPrimary/ZeroMaxStaleness.json @@ -1,36 +1,36 @@ { - "error": true, - "read_preference": { - "maxStalenessSeconds": 0, - "mode": "Nearest" - }, - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 4, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetWithPrimary" - } + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + } + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 0 + }, + "error": true } diff --git a/test/max_staleness/Sharded/Incompatible.json b/test/max_staleness/Sharded/Incompatible.json deleted file mode 100644 index 5e954166de..0000000000 --- a/test/max_staleness/Sharded/Incompatible.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "error": true, - "read_preference": { - "maxStalenessSeconds": 120, - "mode": "Nearest" - }, - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "Mongos" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 4, - "type": "Mongos" - } - ], - "type": "Sharded" - } -} diff --git a/test/max_staleness/Sharded/SmallMaxStaleness.json b/test/max_staleness/Sharded/SmallMaxStaleness.json index 2656f66c63..91d89720d1 100644 --- a/test/max_staleness/Sharded/SmallMaxStaleness.json +++ b/test/max_staleness/Sharded/SmallMaxStaleness.json @@ -1,76 +1,76 @@ { - "heartbeatFrequencyMS": 10000, - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "Mongos" + "heartbeatFrequencyMS": 10000, + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "a:27017", + "type": "Mongos", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } } - ], - "read_preference": { - "maxStalenessSeconds": 1, - "mode": "Nearest" + }, + { + "address": "b:27017", + "type": "Mongos", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 1 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "Mongos", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "Mongos" - }, - { - "address": "b:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "Mongos" + { + "address": "b:27017", + "type": "Mongos", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "type": "Mongos", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "Mongos" - }, - { - "address": "b:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "Mongos" - } - ], - "type": "Sharded" + } } + ] } diff --git a/test/max_staleness/Single/Incompatible.json b/test/max_staleness/Single/Incompatible.json deleted file mode 100644 index 852202638f..0000000000 --- a/test/max_staleness/Single/Incompatible.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "error": true, - "read_preference": { - "maxStalenessSeconds": 120, - "mode": "Nearest" - }, - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 4, - "type": "Standalone" - } - ], - "type": "Single" - } -} diff --git a/test/max_staleness/Single/SmallMaxStaleness.json b/test/max_staleness/Single/SmallMaxStaleness.json index 7c1792861c..b8d2db24be 100644 --- a/test/max_staleness/Single/SmallMaxStaleness.json +++ b/test/max_staleness/Single/SmallMaxStaleness.json @@ -1,52 +1,52 @@ { - "heartbeatFrequencyMS": 10000, - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "Standalone" + "heartbeatFrequencyMS": 10000, + "topology_description": { + "type": "Single", + "servers": [ + { + "address": "a:27017", + "type": "Standalone", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } } - ], - "read_preference": { - "maxStalenessSeconds": 1, - "mode": "Nearest" - }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "Standalone" + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 1 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "Standalone", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "Standalone" - } - ], - "type": "Single" + } } + ], + "in_latency_window": [ + { + "address": "a:27017", + "type": "Standalone", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 6, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] } diff --git a/test/max_staleness/Unknown/SmallMaxStaleness.json b/test/max_staleness/Unknown/SmallMaxStaleness.json index fc196abc92..8d69f46a1e 100644 --- a/test/max_staleness/Unknown/SmallMaxStaleness.json +++ b/test/max_staleness/Unknown/SmallMaxStaleness.json @@ -1,18 +1,19 @@ { - "heartbeatFrequencyMS": 10000, - "in_latency_window": [], - "read_preference": { - "maxStalenessSeconds": 1, - "mode": "Nearest" - }, - "suitable_servers": [], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "type": "Unknown" - } - ], - "type": "Unknown" - } + "heartbeatFrequencyMS": 10000, + "topology_description": { + "type": "Unknown", + "servers": [ + { + "address": "a:27017", + "type": "Unknown", + "maxWireVersion": 6 + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 1 + }, + "suitable_servers": [], + "in_latency_window": [] } diff --git a/test/mockupdb/operations.py b/test/mockupdb/operations.py new file mode 100644 index 0000000000..34302aa551 --- /dev/null +++ b/test/mockupdb/operations.py @@ -0,0 +1,120 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"),; +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +from collections import namedtuple + +from mockupdb import OpMsgReply, OpReply + +from pymongo import ReadPreference + +__all__ = ["operations", "upgrades"] + + +Operation = namedtuple("Operation", ["name", "function", "reply", "op_type", "not_master"]) +"""Client operations on MongoDB. + +Each has a human-readable name, a function that actually executes a test, and +a type that maps to one of the types in the Server Selection Spec: +'may-use-secondary', 'must-use-primary', etc. + +The special type 'always-use-secondary' applies to an operation with an explicit +read mode, like the operation "command('c', read_preference=SECONDARY)". + +The not-master response is how a secondary responds to a must-use-primary op, +or how a recovering member responds to a may-use-secondary op. + +Example uses: + +We can use "find_one" to validate that the SlaveOk bit is set when querying a +standalone, even with mode PRIMARY, but that it isn't set when sent to a mongos +with mode PRIMARY. Or it can validate that "$readPreference" is included in +mongos queries except with mode PRIMARY or SECONDARY_PREFERRED (PYTHON-865). + +We can use "options_old" and "options_new" to test that the driver queries an +old server's system.namespaces collection, but uses the listCollections command +on a new server (PYTHON-857). + +"secondary command" is good to test that the client can direct reads to +secondaries in a replica set, or select a mongos for secondary reads in a +sharded cluster (PYTHON-868). +""" + +not_master_reply = OpMsgReply(ok=0, errmsg="not master") + +operations = [ + Operation( + "find_one", + lambda client: client.db.collection.find_one(), + reply={"cursor": {"id": 0, "firstBatch": []}}, + op_type="may-use-secondary", + not_master=not_master_reply, + ), + Operation( + "count_documents", + lambda client: client.db.collection.count_documents({}), + reply={"n": 1}, + op_type="may-use-secondary", + not_master=not_master_reply, + ), + Operation( + "estimated_document_count", + lambda client: client.db.collection.estimated_document_count(), + reply={"n": 1}, + op_type="may-use-secondary", + not_master=not_master_reply, + ), + Operation( + "aggregate", + lambda client: client.db.collection.aggregate([]), + reply={"cursor": {"id": 0, "firstBatch": []}}, + op_type="may-use-secondary", + not_master=not_master_reply, + ), + Operation( + "options", + lambda client: client.db.collection.options(), + reply={"cursor": {"id": 0, "firstBatch": []}}, + op_type="must-use-primary", + not_master=not_master_reply, + ), + Operation( + "command", + lambda client: client.db.command("foo"), + reply={"ok": 1}, + op_type="must-use-primary", # Ignores client's read preference. + not_master=not_master_reply, + ), + Operation( + "secondary command", + lambda client: client.db.command("foo", read_preference=ReadPreference.SECONDARY), + reply={"ok": 1}, + op_type="always-use-secondary", + not_master=OpReply(ok=0, errmsg="node is recovering"), + ), + Operation( + "listIndexes", + lambda client: client.db.collection.index_information(), + reply={"cursor": {"id": 0, "firstBatch": []}}, + op_type="must-use-primary", + not_master=not_master_reply, + ), +] + + +_ops_by_name = {op.name: op for op in operations} + +Upgrade = namedtuple("Upgrade", ["name", "function", "old", "new", "wire_version"]) + +upgrades = [] diff --git a/test/mockupdb/test_auth_recovering_member.py b/test/mockupdb/test_auth_recovering_member.py new file mode 100644 index 0000000000..2051a24af6 --- /dev/null +++ b/test/mockupdb/test_auth_recovering_member.py @@ -0,0 +1,56 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import unittest + +from mockupdb import MockupDB + +from pymongo import MongoClient +from pymongo.errors import ServerSelectionTimeoutError + + +class TestAuthRecoveringMember(unittest.TestCase): + def test_auth_recovering_member(self): + # Test that we don't attempt auth against a recovering RS member. + server = MockupDB() + server.autoresponds( + "ismaster", + { + "minWireVersion": 2, + "maxWireVersion": 6, + "ismaster": False, + "secondary": False, + "setName": "rs", + }, + ) + + server.run() + self.addCleanup(server.stop) + + client = MongoClient( + server.uri, replicaSet="rs", serverSelectionTimeoutMS=100, socketTimeoutMS=100 + ) + + self.addCleanup(client.close) + + # Should see there's no primary or secondary and raise selection timeout + # error. If it raises AutoReconnect we know it actually tried the + # server, and that's wrong. + with self.assertRaises(ServerSelectionTimeoutError): + client.db.command("ping") + + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_cluster_time.py b/test/mockupdb/test_cluster_time.py new file mode 100644 index 0000000000..a64804541b --- /dev/null +++ b/test/mockupdb/test_cluster_time.py @@ -0,0 +1,161 @@ +# Copyright 2017-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test $clusterTime handling.""" +from __future__ import annotations + +import unittest + +from mockupdb import MockupDB, going + +from bson import Timestamp +from pymongo import DeleteMany, InsertOne, MongoClient, UpdateOne + + +class TestClusterTime(unittest.TestCase): + def cluster_time_conversation(self, callback, replies): + cluster_time = Timestamp(0, 0) + server = MockupDB() + + # First test all commands include $clusterTime with wire version 6. + _ = server.autoresponds( + "ismaster", + { + "minWireVersion": 0, + "maxWireVersion": 6, + "$clusterTime": {"clusterTime": cluster_time}, + }, + ) + + server.run() + self.addCleanup(server.stop) + + client = MongoClient(server.uri) + self.addCleanup(client.close) + + with going(callback, client): + for reply in replies: + request = server.receives() + self.assertIn("$clusterTime", request) + self.assertEqual(request["$clusterTime"]["clusterTime"], cluster_time) + cluster_time = Timestamp(cluster_time.time, cluster_time.inc + 1) + reply["$clusterTime"] = {"clusterTime": cluster_time} + request.reply(reply) + + def test_command(self): + def callback(client): + client.db.command("ping") + client.db.command("ping") + + self.cluster_time_conversation(callback, [{"ok": 1}] * 2) + + def test_bulk(self): + def callback(client: MongoClient[dict]) -> None: + client.db.collection.bulk_write( + [InsertOne({}), InsertOne({}), UpdateOne({}, {"$inc": {"x": 1}}), DeleteMany({})] + ) + + self.cluster_time_conversation( + callback, + [{"ok": 1, "nInserted": 2}, {"ok": 1, "nModified": 1}, {"ok": 1, "nDeleted": 2}], + ) + + batches = [ + {"cursor": {"id": 123, "firstBatch": [{"a": 1}]}}, + {"cursor": {"id": 123, "nextBatch": [{"a": 2}]}}, + {"cursor": {"id": 0, "nextBatch": [{"a": 3}]}}, + ] + + def test_cursor(self): + def callback(client): + list(client.db.collection.find()) + + self.cluster_time_conversation(callback, self.batches) + + def test_aggregate(self): + def callback(client): + list(client.db.collection.aggregate([])) + + self.cluster_time_conversation(callback, self.batches) + + def test_explain(self): + def callback(client): + client.db.collection.find().explain() + + self.cluster_time_conversation(callback, [{"ok": 1}]) + + def test_monitor(self): + cluster_time = Timestamp(0, 0) + reply = { + "minWireVersion": 0, + "maxWireVersion": 6, + "$clusterTime": {"clusterTime": cluster_time}, + } + + server = MockupDB() + server.run() + self.addCleanup(server.stop) + + client = MongoClient(server.uri, heartbeatFrequencyMS=500) + self.addCleanup(client.close) + + request = server.receives("ismaster") + # No $clusterTime in first ismaster, only in subsequent ones + self.assertNotIn("$clusterTime", request) + request.ok(reply) + + # Next exchange: client returns first clusterTime, we send the second. + request = server.receives("ismaster") + self.assertIn("$clusterTime", request) + self.assertEqual(request["$clusterTime"]["clusterTime"], cluster_time) + cluster_time = Timestamp(cluster_time.time, cluster_time.inc + 1) + reply["$clusterTime"] = {"clusterTime": cluster_time} + request.reply(reply) + + # Third exchange: client returns second clusterTime. + request = server.receives("ismaster") + self.assertEqual(request["$clusterTime"]["clusterTime"], cluster_time) + + # Return command error with a new clusterTime. + cluster_time = Timestamp(cluster_time.time, cluster_time.inc + 1) + error = { + "ok": 0, + "code": 211, + "errmsg": "Cache Reader No keys found for HMAC ...", + "$clusterTime": {"clusterTime": cluster_time}, + } + request.reply(error) + + # PyMongo 3.11+ closes the monitoring connection on command errors. + + # Fourth exchange: the Monitor closes the connection and runs the + # handshake on a new connection. + request = server.receives("ismaster") + # No $clusterTime in first ismaster, only in subsequent ones + self.assertNotIn("$clusterTime", request) + + # Reply without $clusterTime. + reply.pop("$clusterTime") + request.reply(reply) + + # Fifth exchange: the Monitor attempt uses the clusterTime from + # the previous isMaster error. + request = server.receives("ismaster") + self.assertEqual(request["$clusterTime"]["clusterTime"], cluster_time) + request.reply(reply) + client.close() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_cursor.py b/test/mockupdb/test_cursor.py new file mode 100644 index 0000000000..96a7e17053 --- /dev/null +++ b/test/mockupdb/test_cursor.py @@ -0,0 +1,89 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test PyMongo cursor does not set exhaustAllowed automatically (PYTHON-4007).""" +from __future__ import annotations + +import unittest +from test import PyMongoTestCase + +from mockupdb import MockupDB, OpMsg, going + +from bson.objectid import ObjectId +from pymongo import MongoClient +from pymongo.errors import OperationFailure + + +class TestCursor(unittest.TestCase): + def test_getmore_load_balanced(self): + server = MockupDB() + server.autoresponds( + "hello", + isWritablePrimary=True, + msg="isdbgrid", + minWireVersion=0, + maxWireVersion=20, + helloOk=True, + serviceId=ObjectId(), + ) + server.run() + self.addCleanup(server.stop) + + client = MongoClient(server.uri, loadBalanced=True) + self.addCleanup(client.close) + collection = client.db.coll + cursor = collection.find() + with going(next, cursor): + request = server.receives(OpMsg({"find": "coll"})) + self.assertEqual(request.flags, 0, "exhaustAllowed should not be set") + # Respond with a different namespace. + request.reply({"cursor": {"id": 123, "firstBatch": [{}]}}) + + # 3 batches, check exhaustAllowed on all getMores. + for i in range(1, 3): + with going(next, cursor): + request = server.receives(OpMsg({"getMore": 123})) + self.assertEqual(request.flags, 0, "exhaustAllowed should not be set") + cursor_id = 123 if i < 2 else 0 + request.replies({"cursor": {"id": cursor_id, "nextBatch": [{}]}}) + + +class TestRetryableErrorCodeCatch(PyMongoTestCase): + def _test_fail_on_operation_failure_with_code(self, code): + """Test reads on error codes that should not be retried""" + server = MockupDB() + server.run() + self.addCleanup(server.stop) + server.autoresponds("ismaster", maxWireVersion=6) + + client = MongoClient(server.uri) + + with going(lambda: server.receives(OpMsg({"find": "collection"})).command_err(code=code)): + cursor = client.db.collection.find() + with self.assertRaises(OperationFailure) as ctx: + cursor.next() + self.assertEqual(ctx.exception.code, code) + + def test_fail_on_operation_failure_none(self): + self._test_fail_on_operation_failure_with_code(None) + + def test_fail_on_operation_failure_zero(self): + self._test_fail_on_operation_failure_with_code(0) + + def test_fail_on_operation_failure_one(self): + self._test_fail_on_operation_failure_with_code(1) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_cursor_namespace.py b/test/mockupdb/test_cursor_namespace.py new file mode 100644 index 0000000000..e6713abf10 --- /dev/null +++ b/test/mockupdb/test_cursor_namespace.py @@ -0,0 +1,141 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test list_indexes with more than one batch.""" +from __future__ import annotations + +import unittest + +from mockupdb import MockupDB, going + +from pymongo import MongoClient + + +class TestCursorNamespace(unittest.TestCase): + server: MockupDB + client: MongoClient + + @classmethod + def setUpClass(cls): + cls.server = MockupDB(auto_ismaster={"maxWireVersion": 6}) + cls.server.run() + cls.client = MongoClient(cls.server.uri) + + @classmethod + def tearDownClass(cls): + cls.client.close() + cls.server.stop() + + def _test_cursor_namespace(self, cursor_op, command): + with going(cursor_op) as docs: + request = self.server.receives(**{command: "collection", "namespace": "test"}) + # Respond with a different namespace. + request.reply( + { + "cursor": { + "firstBatch": [{"doc": 1}], + "id": 123, + "ns": "different_db.different.coll", + } + } + ) + # Client uses the namespace we returned. + request = self.server.receives( + getMore=123, namespace="different_db", collection="different.coll" + ) + + request.reply({"cursor": {"nextBatch": [{"doc": 2}], "id": 0}}) + + self.assertEqual([{"doc": 1}, {"doc": 2}], docs()) + + def test_aggregate_cursor(self): + def op(): + return list(self.client.test.collection.aggregate([])) + + self._test_cursor_namespace(op, "aggregate") + + def test_find_cursor(self): + def op(): + return list(self.client.test.collection.find()) + + self._test_cursor_namespace(op, "find") + + def test_list_indexes(self): + def op(): + return list(self.client.test.collection.list_indexes()) + + self._test_cursor_namespace(op, "listIndexes") + + +class TestKillCursorsNamespace(unittest.TestCase): + server: MockupDB + client: MongoClient + + @classmethod + def setUpClass(cls): + cls.server = MockupDB(auto_ismaster={"maxWireVersion": 6}) + cls.server.run() + cls.client = MongoClient(cls.server.uri) + + @classmethod + def tearDownClass(cls): + cls.client.close() + cls.server.stop() + + def _test_killCursors_namespace(self, cursor_op, command): + with going(cursor_op): + request = self.server.receives(**{command: "collection", "namespace": "test"}) + # Respond with a different namespace. + request.reply( + { + "cursor": { + "firstBatch": [{"doc": 1}], + "id": 123, + "ns": "different_db.different.coll", + } + } + ) + # Client uses the namespace we returned for killCursors. + request = self.server.receives( + **{"killCursors": "different.coll", "cursors": [123], "$db": "different_db"} + ) + request.reply( + { + "ok": 1, + "cursorsKilled": [123], + "cursorsNotFound": [], + "cursorsAlive": [], + "cursorsUnknown": [], + } + ) + + def test_aggregate_killCursor(self): + def op(): + cursor = self.client.test.collection.aggregate([], batchSize=1) + next(cursor) + cursor.close() + + self._test_killCursors_namespace(op, "aggregate") + + def test_find_killCursor(self): + def op(): + cursor = self.client.test.collection.find(batch_size=1) + next(cursor) + cursor.close() + + self._test_killCursors_namespace(op, "find") + + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_getmore_sharded.py b/test/mockupdb/test_getmore_sharded.py new file mode 100644 index 0000000000..b06b48f01c --- /dev/null +++ b/test/mockupdb/test_getmore_sharded.py @@ -0,0 +1,61 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test PyMongo cursor with a sharded cluster.""" +from __future__ import annotations + +import unittest +from queue import Queue + +from mockupdb import MockupDB, going + +from pymongo import MongoClient + + +class TestGetmoreSharded(unittest.TestCase): + def test_getmore_sharded(self): + servers = [MockupDB(), MockupDB()] + + # Collect queries to either server in one queue. + q: Queue = Queue() + for server in servers: + server.subscribe(q.put) + server.autoresponds( + "ismaster", ismaster=True, msg="isdbgrid", minWireVersion=2, maxWireVersion=6 + ) + server.run() + self.addCleanup(server.stop) + + client = MongoClient( + "mongodb://%s:%d,%s:%d" + % (servers[0].host, servers[0].port, servers[1].host, servers[1].port) + ) + self.addCleanup(client.close) + collection = client.db.collection + cursor = collection.find() + with going(next, cursor): + query = q.get(timeout=1) + query.replies({"cursor": {"id": 123, "firstBatch": [{}]}}) + + # 10 batches, all getMores go to same server. + for i in range(1, 10): + with going(next, cursor): + getmore = q.get(timeout=1) + self.assertEqual(query.server, getmore.server) + cursor_id = 123 if i < 9 else 0 + getmore.replies({"cursor": {"id": cursor_id, "nextBatch": [{}]}}) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_handshake.py b/test/mockupdb/test_handshake.py new file mode 100644 index 0000000000..00cae32ee5 --- /dev/null +++ b/test/mockupdb/test_handshake.py @@ -0,0 +1,268 @@ +# Copyright 2016 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import unittest + +from mockupdb import Command, MockupDB, OpMsg, OpMsgReply, OpQuery, OpReply, absent, go + +from bson.objectid import ObjectId +from pymongo import MongoClient +from pymongo import version as pymongo_version +from pymongo.errors import OperationFailure +from pymongo.server_api import ServerApi, ServerApiVersion + + +def _check_handshake_data(request): + assert "client" in request + data = request["client"] + + assert data["application"] == {"name": "my app"} + assert data["driver"] == {"name": "PyMongo", "version": pymongo_version} + + # Keep it simple, just check these fields exist. + assert "os" in data + assert "platform" in data + + +class TestHandshake(unittest.TestCase): + def hello_with_option_helper(self, protocol, **kwargs): + hello = "ismaster" if isinstance(protocol(), OpQuery) else "hello" + # `db.command("hello"|"ismaster")` commands are the same for primaries and + # secondaries, so we only need one server. + primary = MockupDB() + # Set up a custom handler to save the first request from the driver. + self.handshake_req = None + + def respond(r): + # Only save the very first request from the driver. + if self.handshake_req is None: + self.handshake_req = r + load_balanced_kwargs = {"serviceId": ObjectId()} if kwargs.get("loadBalanced") else {} + return r.reply( + OpMsgReply(minWireVersion=0, maxWireVersion=13, **kwargs, **load_balanced_kwargs) + ) + + primary.autoresponds(respond) + primary.run() + self.addCleanup(primary.stop) + + # We need a special dict because MongoClient uses "server_api" and all + # of the commands use "apiVersion". + k_map = {("apiVersion", "1"): ("server_api", ServerApi(ServerApiVersion.V1))} + client = MongoClient( + "mongodb://" + primary.address_string, + appname="my app", # For _check_handshake_data() + **dict([k_map.get((k, v), (k, v)) for k, v in kwargs.items()]), # type: ignore[arg-type] + ) + + self.addCleanup(client.close) + + # We have an autoresponder luckily, so no need for `go()`. + assert client.db.command(hello) + + # We do this checking here rather than in the autoresponder `respond()` + # because it runs in another Python thread so there are some funky things + # with error handling within that thread, and we want to be able to use + # self.assertRaises(). + self.handshake_req.assert_matches(protocol(hello, **kwargs)) + _check_handshake_data(self.handshake_req) + + def test_client_handshake_data(self): + primary, secondary = MockupDB(), MockupDB() + for server in primary, secondary: + server.run() + self.addCleanup(server.stop) + + hosts = [server.address_string for server in (primary, secondary)] + primary_response = OpReply( + "ismaster", True, setName="rs", hosts=hosts, minWireVersion=2, maxWireVersion=6 + ) + error_response = OpReply(0, errmsg="Cache Reader No keys found for HMAC ...", code=211) + + secondary_response = OpReply( + "ismaster", + False, + setName="rs", + hosts=hosts, + secondary=True, + minWireVersion=2, + maxWireVersion=6, + ) + + client = MongoClient( + primary.uri, replicaSet="rs", appname="my app", heartbeatFrequencyMS=500 + ) # Speed up the test. + + self.addCleanup(client.close) + + # New monitoring connections send data during handshake. + heartbeat = primary.receives("ismaster") + _check_handshake_data(heartbeat) + heartbeat.ok(primary_response) + + heartbeat = secondary.receives("ismaster") + _check_handshake_data(heartbeat) + heartbeat.ok(secondary_response) + + # Subsequent heartbeats have no client data. + primary.receives("ismaster", 1, client=absent).ok(error_response) + secondary.receives("ismaster", 1, client=absent).ok(error_response) + + # The heartbeat retry (on a new connection) does have client data. + heartbeat = primary.receives("ismaster") + _check_handshake_data(heartbeat) + heartbeat.ok(primary_response) + + heartbeat = secondary.receives("ismaster") + _check_handshake_data(heartbeat) + heartbeat.ok(secondary_response) + + # Still no client data. + primary.receives("ismaster", 1, client=absent).ok(primary_response) + secondary.receives("ismaster", 1, client=absent).ok(secondary_response) + + # After a disconnect, next ismaster has client data again. + primary.receives("ismaster", 1, client=absent).hangup() + heartbeat = primary.receives("ismaster") + _check_handshake_data(heartbeat) + heartbeat.ok(primary_response) + + secondary.autoresponds("ismaster", secondary_response) + + # Start a command, so the client opens an application socket. + future = go(client.db.command, "whatever") + + for request in primary: + if request.matches(Command("ismaster")): + if request.client_port == heartbeat.client_port: + # This is the monitor again, keep going. + request.ok(primary_response) + else: + # Handshaking a new application socket. + _check_handshake_data(request) + request.ok(primary_response) + else: + # Command succeeds. + request.assert_matches(OpMsg("whatever")) + request.ok() + assert future() + return + + def test_client_handshake_saslSupportedMechs(self): + server = MockupDB() + server.run() + self.addCleanup(server.stop) + + primary_response = OpReply("ismaster", True, minWireVersion=2, maxWireVersion=6) + client = MongoClient(server.uri, username="username", password="password") + + self.addCleanup(client.close) + + # New monitoring connections send data during handshake. + heartbeat = server.receives("ismaster") + heartbeat.ok(primary_response) + + future = go(client.db.command, "whatever") + for request in server: + if request.matches("ismaster"): + if request.client_port == heartbeat.client_port: + # This is the monitor again, keep going. + request.ok(primary_response) + else: + # Handshaking a new application socket should send + # saslSupportedMechs and speculativeAuthenticate. + self.assertEqual(request["saslSupportedMechs"], "admin.username") + self.assertIn("saslStart", request["speculativeAuthenticate"]) + auth = { + "conversationId": 1, + "done": False, + "payload": b"r=wPleNM8S5p8gMaffMDF7Py4ru9bnmmoqb0" + b"1WNPsil6o=pAvr6B1garhlwc6MKNQ93ZfFky" + b"tXdF9r,s=4dcxugMJq2P4hQaDbGXZR8uR3ei" + b"PHrSmh4uhkg==,i=15000", + } + request.ok( + "ismaster", + True, + saslSupportedMechs=["SCRAM-SHA-256"], + speculativeAuthenticate=auth, + minWireVersion=2, + maxWireVersion=6, + ) + # Authentication should immediately fail with: + # OperationFailure: Server returned an invalid nonce. + with self.assertRaises(OperationFailure): + future() + return + + def test_handshake_load_balanced(self): + self.hello_with_option_helper(OpMsg, loadBalanced=True) + with self.assertRaisesRegex(AssertionError, "does not match"): + self.hello_with_option_helper(Command, loadBalanced=True) + + def test_handshake_versioned_api(self): + self.hello_with_option_helper(OpMsg, apiVersion="1") + with self.assertRaisesRegex(AssertionError, "does not match"): + self.hello_with_option_helper(Command, apiVersion="1") + + def test_handshake_not_either(self): + # If we don't specify either option then it should be using + # OP_QUERY for the initial step of the handshake. + self.hello_with_option_helper(Command) + with self.assertRaisesRegex(AssertionError, "does not match"): + self.hello_with_option_helper(OpMsg) + + def test_handshake_max_wire(self): + server = MockupDB() + primary_response = {"hello": 1, "ok": 1, "minWireVersion": 0, "maxWireVersion": 6} + self.found_auth_msg = False + + def responder(request): + if request.matches(OpMsg, saslStart=1): + self.found_auth_msg = True + # Immediately closes the connection with + # OperationFailure: Server returned an invalid nonce. + request.reply( + OpMsgReply( + **primary_response, + payload=b"r=wPleNM8S5p8gMaffMDF7Py4ru9bnmmoqb0" + b"1WNPsil6o=pAvr6B1garhlwc6MKNQ93ZfFky" + b"tXdF9r," + b"s=4dcxugMJq2P4hQaDbGXZR8uR3ei" + b"PHrSmh4uhkg==,i=15000", + saslSupportedMechs=["SCRAM-SHA-1"], + ) + ) + return None + else: + return request.reply(**primary_response) + + server.autoresponds(responder) + self.addCleanup(server.stop) + server.run() + client = MongoClient( + server.uri, + username="username", + password="password", + ) + self.addCleanup(client.close) + self.assertRaises(OperationFailure, client.db.collection.find_one, {"a": 1}) + self.assertTrue( + self.found_auth_msg, "Could not find authentication command with correct protocol" + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_initial_ismaster.py b/test/mockupdb/test_initial_ismaster.py new file mode 100644 index 0000000000..97864dd257 --- /dev/null +++ b/test/mockupdb/test_initial_ismaster.py @@ -0,0 +1,45 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import time +import unittest + +from mockupdb import MockupDB, wait_until + +from pymongo import MongoClient + + +class TestInitialIsMaster(unittest.TestCase): + def test_initial_ismaster(self): + server = MockupDB() + server.run() + self.addCleanup(server.stop) + + start = time.time() + client = MongoClient(server.uri) + self.addCleanup(client.close) + + # A single ismaster is enough for the client to be connected. + self.assertFalse(client.nodes) + server.receives("ismaster").ok(ismaster=True, minWireVersion=2, maxWireVersion=6) + wait_until(lambda: client.nodes, "update nodes", timeout=1) + + # At least 10 seconds before next heartbeat. + server.receives("ismaster").ok(ismaster=True, minWireVersion=2, maxWireVersion=6) + self.assertGreaterEqual(time.time() - start, 10) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_list_indexes.py b/test/mockupdb/test_list_indexes.py new file mode 100644 index 0000000000..163c25c37b --- /dev/null +++ b/test/mockupdb/test_list_indexes.py @@ -0,0 +1,48 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test list_indexes with more than one batch.""" +from __future__ import annotations + +import unittest + +from mockupdb import MockupDB, going + +from bson import SON +from pymongo import MongoClient + + +class TestListIndexes(unittest.TestCase): + def test_list_indexes_command(self): + server = MockupDB(auto_ismaster={"maxWireVersion": 6}) + server.run() + self.addCleanup(server.stop) + client = MongoClient(server.uri) + self.addCleanup(client.close) + with going(client.test.collection.list_indexes) as cursor: + request = server.receives(listIndexes="collection", namespace="test") + request.reply({"cursor": {"firstBatch": [{"name": "index_0"}], "id": 123}}) + + with going(list, cursor()) as indexes: + request = server.receives(getMore=123, namespace="test", collection="collection") + + request.reply({"cursor": {"nextBatch": [{"name": "index_1"}], "id": 0}}) + + self.assertEqual([{"name": "index_0"}, {"name": "index_1"}], indexes()) + for index_info in indexes(): + self.assertIsInstance(index_info, SON) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_max_staleness.py b/test/mockupdb/test_max_staleness.py new file mode 100644 index 0000000000..88a3c13e63 --- /dev/null +++ b/test/mockupdb/test_max_staleness.py @@ -0,0 +1,66 @@ +# Copyright 2016 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import unittest + +from mockupdb import MockupDB, going + +from pymongo import MongoClient + + +class TestMaxStalenessMongos(unittest.TestCase): + def test_mongos(self): + mongos = MockupDB() + mongos.autoresponds("ismaster", maxWireVersion=6, ismaster=True, msg="isdbgrid") + mongos.run() + self.addCleanup(mongos.stop) + + # No maxStalenessSeconds. + uri = "mongodb://localhost:%d/?readPreference=secondary" % mongos.port + + client = MongoClient(uri) + self.addCleanup(client.close) + with going(client.db.coll.find_one) as future: + request = mongos.receives() + self.assertNotIn("maxStalenessSeconds", request.doc["$readPreference"]) + + self.assertTrue(request.slave_okay) + request.ok(cursor={"firstBatch": [], "id": 0}) + + # find_one succeeds with no result. + self.assertIsNone(future()) + + # Set maxStalenessSeconds to 1. Client has no minimum with mongos, + # we let mongos enforce the 90-second minimum and return an error: + # SERVER-27146. + uri = ( + "mongodb://localhost:%d/?readPreference=secondary" + "&maxStalenessSeconds=1" % mongos.port + ) + + client = MongoClient(uri) + self.addCleanup(client.close) + with going(client.db.coll.find_one) as future: + request = mongos.receives() + self.assertEqual(1, request.doc["$readPreference"]["maxStalenessSeconds"]) + + self.assertTrue(request.slave_okay) + request.ok(cursor={"firstBatch": [], "id": 0}) + + self.assertIsNone(future()) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_mixed_version_sharded.py b/test/mockupdb/test_mixed_version_sharded.py new file mode 100644 index 0000000000..515c279879 --- /dev/null +++ b/test/mockupdb/test_mixed_version_sharded.py @@ -0,0 +1,91 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test PyMongo with a mixed-version cluster.""" +from __future__ import annotations + +import time +import unittest +from queue import Queue + +from mockupdb import MockupDB, go +from operations import upgrades # type: ignore[import] + +from pymongo import MongoClient + + +class TestMixedVersionSharded(unittest.TestCase): + def setup_server(self, upgrade): + self.mongos_old, self.mongos_new = MockupDB(), MockupDB() + + # Collect queries to either server in one queue. + self.q: Queue = Queue() + for server in self.mongos_old, self.mongos_new: + server.subscribe(self.q.put) + server.autoresponds("getlasterror") + server.run() + self.addCleanup(server.stop) + + # Max wire version is too old for the upgraded operation. + self.mongos_old.autoresponds( + "ismaster", ismaster=True, msg="isdbgrid", maxWireVersion=upgrade.wire_version - 1 + ) + + # Up-to-date max wire version. + self.mongos_new.autoresponds( + "ismaster", ismaster=True, msg="isdbgrid", maxWireVersion=upgrade.wire_version + ) + + self.mongoses_uri = "mongodb://{},{}".format( + self.mongos_old.address_string, + self.mongos_new.address_string, + ) + + self.client = MongoClient(self.mongoses_uri) + + def tearDown(self): + if hasattr(self, "client") and self.client: + self.client.close() + + +def create_mixed_version_sharded_test(upgrade): + def test(self): + self.setup_server(upgrade) + start = time.time() + servers_used: set = set() + while len(servers_used) < 2: + go(upgrade.function, self.client) + request = self.q.get(timeout=1) + servers_used.add(request.server) + request.assert_matches( + upgrade.old if request.server is self.mongos_old else upgrade.new + ) + if time.time() > start + 10: + self.fail("never used both mongoses") + + return test + + +def generate_mixed_version_sharded_tests(): + for upgrade in upgrades: + test = create_mixed_version_sharded_test(upgrade) + test_name = "test_%s" % upgrade.name.replace(" ", "_") + test.__name__ = test_name + setattr(TestMixedVersionSharded, test_name, test) + + +generate_mixed_version_sharded_tests() + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_mongos_command_read_mode.py b/test/mockupdb/test_mongos_command_read_mode.py new file mode 100644 index 0000000000..dff1288e67 --- /dev/null +++ b/test/mockupdb/test_mongos_command_read_mode.py @@ -0,0 +1,123 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import itertools +import unittest + +from mockupdb import MockupDB, OpMsg, going +from operations import operations # type: ignore[import] + +from pymongo import MongoClient, ReadPreference +from pymongo.read_preferences import ( + _MONGOS_MODES, + make_read_preference, + read_pref_mode_from_name, +) + + +class TestMongosCommandReadMode(unittest.TestCase): + def test_aggregate(self): + server = MockupDB() + server.autoresponds( + "ismaster", ismaster=True, msg="isdbgrid", minWireVersion=2, maxWireVersion=6 + ) + self.addCleanup(server.stop) + server.run() + + client = MongoClient(server.uri) + self.addCleanup(client.close) + collection = client.test.collection + with going(collection.aggregate, []): + command = server.receives(aggregate="collection", pipeline=[]) + self.assertFalse(command.slave_ok, "SlaveOkay set") + command.ok(result=[{}]) + + secondary_collection = collection.with_options(read_preference=ReadPreference.SECONDARY) + + with going(secondary_collection.aggregate, []): + + command = server.receives( + OpMsg( + { + "aggregate": "collection", + "pipeline": [], + "$readPreference": {"mode": "secondary"}, + } + ) + ) + command.ok(result=[{}]) + self.assertTrue(command.slave_ok, "SlaveOkay not set") + + +def create_mongos_read_mode_test(mode, operation): + def test(self): + server = MockupDB() + self.addCleanup(server.stop) + server.run() + server.autoresponds( + "ismaster", ismaster=True, msg="isdbgrid", minWireVersion=2, maxWireVersion=6 + ) + + pref = make_read_preference(read_pref_mode_from_name(mode), tag_sets=None) + + client = MongoClient(server.uri, read_preference=pref) + self.addCleanup(client.close) + + with going(operation.function, client): + request = server.receive() + request.reply(operation.reply) + + if operation.op_type == "always-use-secondary": + self.assertEqual(ReadPreference.SECONDARY.document, request.doc.get("$readPreference")) + slave_ok = mode != "primary" + elif operation.op_type == "must-use-primary": + slave_ok = False + elif operation.op_type == "may-use-secondary": + slave_ok = mode != "primary" + actual_pref = request.doc.get("$readPreference") + if mode == "primary": + self.assertIsNone(actual_pref) + else: + self.assertEqual(pref.document, actual_pref) + else: + self.fail("unrecognized op_type %r" % operation.op_type) + + if slave_ok: + self.assertTrue(request.slave_ok, "SlaveOkay not set") + else: + self.assertFalse(request.slave_ok, "SlaveOkay set") + + return test + + +def generate_mongos_read_mode_tests(): + matrix = itertools.product(_MONGOS_MODES, operations) + + for entry in matrix: + mode, operation = entry + if mode == "primary" and operation.op_type == "always-use-secondary": + # Skip something like command('foo', read_preference=SECONDARY). + continue + test = create_mongos_read_mode_test(mode, operation) + test_name = "test_{}_with_mode_{}".format(operation.name.replace(" ", "_"), mode) + test.__name__ = test_name + setattr(TestMongosCommandReadMode, test_name, test) + + +generate_mongos_read_mode_tests() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_network_disconnect_primary.py b/test/mockupdb/test_network_disconnect_primary.py new file mode 100644 index 0000000000..d05cfb531a --- /dev/null +++ b/test/mockupdb/test_network_disconnect_primary.py @@ -0,0 +1,85 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import unittest + +from mockupdb import Future, MockupDB, OpReply, going, wait_until + +from pymongo import MongoClient +from pymongo.errors import ConnectionFailure +from pymongo.topology_description import TOPOLOGY_TYPE + + +class TestNetworkDisconnectPrimary(unittest.TestCase): + def test_network_disconnect_primary(self): + # Application operation fails against primary. Test that topology + # type changes from ReplicaSetWithPrimary to ReplicaSetNoPrimary. + # http://bit.ly/1B5ttuL + primary, secondary = MockupDB(), MockupDB() + for server in primary, secondary: + server.run() + self.addCleanup(server.stop) + + hosts = [server.address_string for server in (primary, secondary)] + primary_response = OpReply( + ismaster=True, setName="rs", hosts=hosts, minWireVersion=2, maxWireVersion=6 + ) + primary.autoresponds("ismaster", primary_response) + secondary.autoresponds( + "ismaster", + ismaster=False, + secondary=True, + setName="rs", + hosts=hosts, + minWireVersion=2, + maxWireVersion=6, + ) + + client = MongoClient(primary.uri, replicaSet="rs") + self.addCleanup(client.close) + wait_until(lambda: client.primary == primary.address, "discover primary") + + topology = client._topology + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, topology.description.topology_type) + + # Open a socket in the application pool (calls ismaster). + with going(client.db.command, "buildinfo"): + primary.receives("buildinfo").ok() + + # The primary hangs replying to ismaster. + ismaster_future = Future() + primary.autoresponds("ismaster", lambda r: r.ok(ismaster_future.result())) + + # Network error on application operation. + with self.assertRaises(ConnectionFailure): + with going(client.db.command, "buildinfo"): + primary.receives("buildinfo").hangup() + + # Topology type is updated. + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetNoPrimary, topology.description.topology_type) + + # Let ismasters through again. + ismaster_future.set_result(primary_response) + + # Demand a primary. + with going(client.db.command, "buildinfo"): + wait_until(lambda: client.primary == primary.address, "rediscover primary") + primary.receives("buildinfo").ok() + + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, topology.description.topology_type) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_op_msg.py b/test/mockupdb/test_op_msg.py new file mode 100644 index 0000000000..dd95254967 --- /dev/null +++ b/test/mockupdb/test_op_msg.py @@ -0,0 +1,317 @@ +# Copyright 2018-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import unittest +from collections import namedtuple + +from mockupdb import OP_MSG_FLAGS, MockupDB, OpMsg, OpMsgReply, going + +from pymongo import MongoClient, WriteConcern +from pymongo.cursor import CursorType +from pymongo.operations import DeleteOne, InsertOne, UpdateOne + +Operation = namedtuple("Operation", ["name", "function", "request", "reply"]) + +operations = [ + Operation( + "find_one", + lambda coll: coll.find_one({}), + request=OpMsg({"find": "coll"}, flags=0), + reply={"ok": 1, "cursor": {"firstBatch": [], "id": 0}}, + ), + Operation( + "aggregate", + lambda coll: coll.aggregate([]), + request=OpMsg({"aggregate": "coll"}, flags=0), + reply={"ok": 1, "cursor": {"firstBatch": [], "id": 0}}, + ), + Operation( + "insert_one", + lambda coll: coll.insert_one({}), + request=OpMsg({"insert": "coll"}, flags=0), + reply={"ok": 1, "n": 1}, + ), + Operation( + "insert_one-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).insert_one({}), + request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), + Operation( + "insert_many", + lambda coll: coll.insert_many([{}, {}, {}]), + request=OpMsg({"insert": "coll"}, flags=0), + reply={"ok": 1, "n": 3}, + ), + Operation( + "insert_many-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).insert_many([{}, {}, {}]), + request=OpMsg({"insert": "coll"}, flags=0), + reply={"ok": 1, "n": 3}, + ), + Operation( + "insert_many-w0-unordered", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).insert_many( + [{}, {}, {}], ordered=False + ), + request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), + Operation( + "replace_one", + lambda coll: coll.replace_one({"_id": 1}, {"new": 1}), + request=OpMsg({"update": "coll"}, flags=0), + reply={"ok": 1, "n": 1, "nModified": 1}, + ), + Operation( + "replace_one-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).replace_one( + {"_id": 1}, {"new": 1} + ), + request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), + Operation( + "update_one", + lambda coll: coll.update_one({"_id": 1}, {"$set": {"new": 1}}), + request=OpMsg({"update": "coll"}, flags=0), + reply={"ok": 1, "n": 1, "nModified": 1}, + ), + Operation( + "replace_one-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).update_one( + {"_id": 1}, {"$set": {"new": 1}} + ), + request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), + Operation( + "update_many", + lambda coll: coll.update_many({"_id": 1}, {"$set": {"new": 1}}), + request=OpMsg({"update": "coll"}, flags=0), + reply={"ok": 1, "n": 1, "nModified": 1}, + ), + Operation( + "update_many-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).update_many( + {"_id": 1}, {"$set": {"new": 1}} + ), + request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), + Operation( + "delete_one", + lambda coll: coll.delete_one({"a": 1}), + request=OpMsg({"delete": "coll"}, flags=0), + reply={"ok": 1, "n": 1}, + ), + Operation( + "delete_one-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).delete_one({"a": 1}), + request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), + Operation( + "delete_many", + lambda coll: coll.delete_many({"a": 1}), + request=OpMsg({"delete": "coll"}, flags=0), + reply={"ok": 1, "n": 1}, + ), + Operation( + "delete_many-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).delete_many({"a": 1}), + request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), + # Legacy methods + Operation( + "bulk_write_insert", + lambda coll: coll.bulk_write([InsertOne[dict]({}), InsertOne[dict]({})]), + request=OpMsg({"insert": "coll"}, flags=0), + reply={"ok": 1, "n": 2}, + ), + Operation( + "bulk_write_insert-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( + [InsertOne[dict]({}), InsertOne[dict]({})] + ), + request=OpMsg({"insert": "coll"}, flags=0), + reply={"ok": 1, "n": 2}, + ), + Operation( + "bulk_write_insert-w0-unordered", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( + [InsertOne[dict]({}), InsertOne[dict]({})], ordered=False + ), + request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), + Operation( + "bulk_write_update", + lambda coll: coll.bulk_write( + [ + UpdateOne({"_id": 1}, {"$set": {"new": 1}}), + UpdateOne({"_id": 2}, {"$set": {"new": 1}}), + ] + ), + request=OpMsg({"update": "coll"}, flags=0), + reply={"ok": 1, "n": 2, "nModified": 2}, + ), + Operation( + "bulk_write_update-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( + [ + UpdateOne({"_id": 1}, {"$set": {"new": 1}}), + UpdateOne({"_id": 2}, {"$set": {"new": 1}}), + ] + ), + request=OpMsg({"update": "coll"}, flags=0), + reply={"ok": 1, "n": 2, "nModified": 2}, + ), + Operation( + "bulk_write_update-w0-unordered", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( + [ + UpdateOne({"_id": 1}, {"$set": {"new": 1}}), + UpdateOne({"_id": 2}, {"$set": {"new": 1}}), + ], + ordered=False, + ), + request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), + Operation( + "bulk_write_delete", + lambda coll: coll.bulk_write([DeleteOne({"_id": 1}), DeleteOne({"_id": 2})]), + request=OpMsg({"delete": "coll"}, flags=0), + reply={"ok": 1, "n": 2}, + ), + Operation( + "bulk_write_delete-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( + [DeleteOne({"_id": 1}), DeleteOne({"_id": 2})] + ), + request=OpMsg({"delete": "coll"}, flags=0), + reply={"ok": 1, "n": 2}, + ), + Operation( + "bulk_write_delete-w0-unordered", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( + [DeleteOne({"_id": 1}), DeleteOne({"_id": 2})], ordered=False + ), + request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), +] + +operations_312 = [ + Operation( + "find_raw_batches", + lambda coll: list(coll.find_raw_batches({})), + request=[ + OpMsg({"find": "coll"}, flags=0), + OpMsg({"getMore": 7}, flags=0), + ], + reply=[ + {"ok": 1, "cursor": {"firstBatch": [{}], "id": 7}}, + {"ok": 1, "cursor": {"nextBatch": [{}], "id": 0}}, + ], + ), + Operation( + "aggregate_raw_batches", + lambda coll: list(coll.aggregate_raw_batches([])), + request=[ + OpMsg({"aggregate": "coll"}, flags=0), + OpMsg({"getMore": 7}, flags=0), + ], + reply=[ + {"ok": 1, "cursor": {"firstBatch": [], "id": 7}}, + {"ok": 1, "cursor": {"nextBatch": [{}], "id": 0}}, + ], + ), + Operation( + "find_exhaust_cursor", + lambda coll: list(coll.find({}, cursor_type=CursorType.EXHAUST)), + request=[ + OpMsg({"find": "coll"}, flags=0), + OpMsg({"getMore": 7}, flags=1 << 16), + ], + reply=[ + OpMsgReply({"ok": 1, "cursor": {"firstBatch": [{}], "id": 7}}, flags=0), + OpMsgReply({"ok": 1, "cursor": {"nextBatch": [{}], "id": 7}}, flags=2), + OpMsgReply({"ok": 1, "cursor": {"nextBatch": [{}], "id": 7}}, flags=2), + OpMsgReply({"ok": 1, "cursor": {"nextBatch": [{}], "id": 0}}, flags=0), + ], + ), +] + + +class TestOpMsg(unittest.TestCase): + server: MockupDB + client: MongoClient + + @classmethod + def setUpClass(cls): + cls.server = MockupDB(auto_ismaster=True, max_wire_version=8) + cls.server.run() + cls.client = MongoClient(cls.server.uri) + + @classmethod + def tearDownClass(cls): + cls.server.stop() + cls.client.close() + + def _test_operation(self, op): + coll = self.client.db.coll + with going(op.function, coll) as future: + expected_requests = op.request + replies = op.reply + if not isinstance(op.request, list): + expected_requests = [op.request] + replies = [op.reply] + + for expected_request in expected_requests: + request = self.server.receives(expected_request) + reply = None + if replies: + reply = replies.pop(0) + if reply is not None: + request.reply(reply) + for reply in replies: + if reply is not None: + request.reply(reply) + + future() # No error. + + +def operation_test(op): + def test(self): + self._test_operation(op) + + return test + + +def create_tests(ops): + for op in ops: + test_name = f"test_op_msg_{op.name}" + setattr(TestOpMsg, test_name, operation_test(op)) + + +create_tests(operations) + +create_tests(operations_312) + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_op_msg_read_preference.py b/test/mockupdb/test_op_msg_read_preference.py new file mode 100644 index 0000000000..0fa7b84861 --- /dev/null +++ b/test/mockupdb/test_op_msg_read_preference.py @@ -0,0 +1,195 @@ +# Copyright 2018-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import copy +import itertools +import unittest +from typing import Any + +from mockupdb import CommandBase, MockupDB, going +from operations import operations # type: ignore[import] + +from pymongo import MongoClient, ReadPreference +from pymongo.read_preferences import ( + _MONGOS_MODES, + make_read_preference, + read_pref_mode_from_name, +) + + +class OpMsgReadPrefBase(unittest.TestCase): + single_mongod = False + primary: MockupDB + secondary: MockupDB + + @classmethod + def setUpClass(cls): + super().setUpClass() + + @classmethod + def add_test(cls, mode, test_name, test): + setattr(cls, test_name, test) + + def setup_client(self, read_preference): + client = MongoClient(self.primary.uri, read_preference=read_preference) + self.addCleanup(client.close) + return client + + +class TestOpMsgMongos(OpMsgReadPrefBase): + @classmethod + def setUpClass(cls): + super().setUpClass() + auto_ismaster = { + "ismaster": True, + "msg": "isdbgrid", # Mongos. + "minWireVersion": 2, + "maxWireVersion": 6, + } + cls.primary = MockupDB(auto_ismaster=auto_ismaster) + cls.primary.run() + cls.secondary = cls.primary + + @classmethod + def tearDownClass(cls): + cls.primary.stop() + super().tearDownClass() + + +class TestOpMsgReplicaSet(OpMsgReadPrefBase): + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.primary, cls.secondary = MockupDB(), MockupDB() + for server in cls.primary, cls.secondary: + server.run() + + hosts = [server.address_string for server in (cls.primary, cls.secondary)] + + primary_ismaster = { + "ismaster": True, + "setName": "rs", + "hosts": hosts, + "minWireVersion": 2, + "maxWireVersion": 6, + } + cls.primary.autoresponds(CommandBase("ismaster"), primary_ismaster) + secondary_ismaster = copy.copy(primary_ismaster) + secondary_ismaster["ismaster"] = False + secondary_ismaster["secondary"] = True + cls.secondary.autoresponds(CommandBase("ismaster"), secondary_ismaster) + + @classmethod + def tearDownClass(cls): + for server in cls.primary, cls.secondary: + server.stop() + super().tearDownClass() + + @classmethod + def add_test(cls, mode, test_name, test): + # Skip nearest tests since we don't know if we will select the primary + # or secondary. + if mode != "nearest": + setattr(cls, test_name, test) + + def setup_client(self, read_preference): + client = MongoClient(self.primary.uri, replicaSet="rs", read_preference=read_preference) + + # Run a command on a secondary to discover the topology. This ensures + # that secondaryPreferred commands will select the secondary. + client.admin.command("ismaster", read_preference=ReadPreference.SECONDARY) + self.addCleanup(client.close) + return client + + +class TestOpMsgSingle(OpMsgReadPrefBase): + single_mongod = True + + @classmethod + def setUpClass(cls): + super().setUpClass() + auto_ismaster = { + "ismaster": True, + "minWireVersion": 2, + "maxWireVersion": 6, + } + cls.primary = MockupDB(auto_ismaster=auto_ismaster) + cls.primary.run() + cls.secondary = cls.primary + + @classmethod + def tearDownClass(cls): + cls.primary.stop() + super().tearDownClass() + + +def create_op_msg_read_mode_test(mode, operation): + def test(self): + pref = make_read_preference(read_pref_mode_from_name(mode), tag_sets=None) + + client = self.setup_client(read_preference=pref) + expected_pref: Any + if operation.op_type == "always-use-secondary": + expected_server = self.secondary + expected_pref = ReadPreference.SECONDARY + elif operation.op_type == "must-use-primary": + expected_server = self.primary + expected_pref = None + elif operation.op_type == "may-use-secondary": + if mode == "primary": + expected_server = self.primary + expected_pref = None + elif mode == "primaryPreferred": + expected_server = self.primary + expected_pref = pref + else: + expected_server = self.secondary + expected_pref = pref + else: + self.fail("unrecognized op_type %r" % operation.op_type) + # For single mongod we omit the read preference. + if self.single_mongod: + expected_pref = None + with going(operation.function, client): + request = expected_server.receive() + request.reply(operation.reply) + + actual_pref = request.doc.get("$readPreference") + if expected_pref: + self.assertEqual(expected_pref.document, actual_pref) + else: + self.assertIsNone(actual_pref) + self.assertNotIn("$query", request.doc) + + return test + + +def generate_op_msg_read_mode_tests(): + matrix = itertools.product(_MONGOS_MODES, operations) + + for entry in matrix: + mode, operation = entry + test = create_op_msg_read_mode_test(mode, operation) + test_name = "test_{}_with_mode_{}".format(operation.name.replace(" ", "_"), mode) + test.__name__ = test_name + for cls in TestOpMsgMongos, TestOpMsgReplicaSet, TestOpMsgSingle: + cls.add_test(mode, test_name, test) + + +generate_op_msg_read_mode_tests() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_query_read_pref_sharded.py b/test/mockupdb/test_query_read_pref_sharded.py new file mode 100644 index 0000000000..5297709886 --- /dev/null +++ b/test/mockupdb/test_query_read_pref_sharded.py @@ -0,0 +1,74 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test PyMongo query and read preference with a sharded cluster.""" +from __future__ import annotations + +import unittest + +from mockupdb import MockupDB, OpMsg, going + +from bson import SON +from pymongo import MongoClient +from pymongo.read_preferences import ( + Nearest, + Primary, + PrimaryPreferred, + Secondary, + SecondaryPreferred, +) + + +class TestQueryAndReadModeSharded(unittest.TestCase): + def test_query_and_read_mode_sharded_op_msg(self): + """Test OP_MSG sends non-primary $readPreference and never $query.""" + server = MockupDB() + server.autoresponds( + "ismaster", ismaster=True, msg="isdbgrid", minWireVersion=2, maxWireVersion=6 + ) + server.run() + self.addCleanup(server.stop) + + client = MongoClient(server.uri) + self.addCleanup(client.close) + + read_prefs = ( + Primary(), + SecondaryPreferred(), + PrimaryPreferred(), + Secondary(), + Nearest(), + SecondaryPreferred([{"tag": "value"}]), + ) + + for query in ( + {"a": 1}, + {"$query": {"a": 1}}, + ): + for pref in read_prefs: + collection = client.db.get_collection("test", read_preference=pref) + cursor = collection.find(query.copy()) + with going(next, cursor): + request = server.receives() + # Command is not nested in $query. + expected_cmd = SON([("find", "test"), ("filter", {"a": 1})]) + if pref.mode: + expected_cmd["$readPreference"] = pref.document + request.assert_matches(OpMsg(expected_cmd)) + + request.replies({"cursor": {"id": 0, "firstBatch": [{}]}}) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_reset_and_request_check.py b/test/mockupdb/test_reset_and_request_check.py new file mode 100644 index 0000000000..12c0bec9ac --- /dev/null +++ b/test/mockupdb/test_reset_and_request_check.py @@ -0,0 +1,155 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import itertools +import time +import unittest + +from mockupdb import MockupDB, going, wait_until +from operations import operations # type: ignore[import] + +from pymongo import MongoClient +from pymongo.errors import ConnectionFailure +from pymongo.server_type import SERVER_TYPE + + +class TestResetAndRequestCheck(unittest.TestCase): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.ismaster_time = 0.0 + self.client = None + self.server = None + + def setup_server(self): + self.server = MockupDB() + + def responder(request): + self.ismaster_time = time.time() + return request.ok(ismaster=True, minWireVersion=2, maxWireVersion=6) + + self.server.autoresponds("ismaster", responder) + self.server.run() + self.addCleanup(self.server.stop) + + kwargs = {"socketTimeoutMS": 100} + # Disable retryable reads when pymongo supports it. + kwargs["retryReads"] = False + self.client = MongoClient(self.server.uri, **kwargs) # type: ignore + wait_until(lambda: self.client.nodes, "connect to standalone") + + def tearDown(self): + if hasattr(self, "client") and self.client: + self.client.close() + + def _test_disconnect(self, operation): + # Application operation fails. Test that client resets server + # description and does *not* schedule immediate check. + self.setup_server() + assert self.server is not None + assert self.client is not None + + # Network error on application operation. + with self.assertRaises(ConnectionFailure): + with going(operation.function, self.client): + self.server.receives().hangup() + + # Server is Unknown. + topology = self.client._topology + with self.assertRaises(ConnectionFailure): + topology.select_server_by_address(self.server.address, 0) + + time.sleep(0.5) + after = time.time() + + # Demand a reconnect. + with going(self.client.db.command, "buildinfo"): + self.server.receives("buildinfo").ok() + + last = self.ismaster_time + self.assertGreaterEqual(last, after, "called ismaster before needed") + + def _test_timeout(self, operation): + # Application operation times out. Test that client does *not* reset + # server description and does *not* schedule immediate check. + self.setup_server() + assert self.server is not None + assert self.client is not None + + with self.assertRaises(ConnectionFailure): + with going(operation.function, self.client): + self.server.receives() + before = self.ismaster_time + time.sleep(0.5) + + # Server is *not* Unknown. + topology = self.client._topology + server = topology.select_server_by_address(self.server.address, 0) + assert server is not None + self.assertEqual(SERVER_TYPE.Standalone, server.description.server_type) + + after = self.ismaster_time + self.assertEqual(after, before, "unneeded ismaster call") + + def _test_not_master(self, operation): + # Application operation gets a "not master" error. + self.setup_server() + assert self.server is not None + assert self.client is not None + + with self.assertRaises(ConnectionFailure): + with going(operation.function, self.client): + request = self.server.receives() + before = self.ismaster_time + request.replies(operation.not_master) + time.sleep(1) + + # Server is rediscovered. + topology = self.client._topology + server = topology.select_server_by_address(self.server.address, 0) + assert server is not None + self.assertEqual(SERVER_TYPE.Standalone, server.description.server_type) + + after = self.ismaster_time + self.assertGreater(after, before, "ismaster not called") + + +def create_reset_test(operation, test_method): + def test(self): + test_method(self, operation) + + return test + + +def generate_reset_tests(): + test_methods = [ + (TestResetAndRequestCheck._test_disconnect, "test_disconnect"), + (TestResetAndRequestCheck._test_timeout, "test_timeout"), + (TestResetAndRequestCheck._test_not_master, "test_not_master"), + ] + + matrix = itertools.product(operations, test_methods) + + for entry in matrix: + operation, (test_method, name) = entry + test = create_reset_test(operation, test_method) + test_name = "{}_{}".format(name, operation.name.replace(" ", "_")) + test.__name__ = test_name + setattr(TestResetAndRequestCheck, test_name, test) + + +generate_reset_tests() + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_rsghost.py b/test/mockupdb/test_rsghost.py new file mode 100644 index 0000000000..c9f2b89f07 --- /dev/null +++ b/test/mockupdb/test_rsghost.py @@ -0,0 +1,60 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test connections to RSGhost nodes.""" +from __future__ import annotations + +import datetime +import unittest + +from mockupdb import MockupDB, going + +from pymongo import MongoClient +from pymongo.errors import ServerSelectionTimeoutError + + +class TestRSGhost(unittest.TestCase): + def test_rsghost(self): + rsother_response = { + "ok": 1.0, + "ismaster": False, + "secondary": False, + "info": "Does not have a valid replica set config", + "isreplicaset": True, + "maxBsonObjectSize": 16777216, + "maxMessageSizeBytes": 48000000, + "maxWriteBatchSize": 100000, + "localTime": datetime.datetime(2021, 11, 30, 0, 53, 4, 99000), + "logicalSessionTimeoutMinutes": 30, + "connectionId": 3, + "minWireVersion": 0, + "maxWireVersion": 15, + "readOnly": False, + } + server = MockupDB(auto_ismaster=rsother_response) + server.run() + self.addCleanup(server.stop) + # Default auto discovery yields a server selection timeout. + with MongoClient(server.uri, serverSelectionTimeoutMS=250) as client: + with self.assertRaises(ServerSelectionTimeoutError): + client.test.command("ping") + # Direct connection succeeds. + with MongoClient(server.uri, directConnection=True) as client: + with going(client.test.command, "ping"): + request = server.receives(ping=1) + request.reply() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_slave_okay_rs.py b/test/mockupdb/test_slave_okay_rs.py new file mode 100644 index 0000000000..ba5b976d6b --- /dev/null +++ b/test/mockupdb/test_slave_okay_rs.py @@ -0,0 +1,84 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test PyMongo's SlaveOkay with a replica set connection. + +Just make sure SlaveOkay is *not* set on primary reads. +""" +from __future__ import annotations + +import unittest + +from mockupdb import MockupDB, going +from operations import operations # type: ignore[import] + +from pymongo import MongoClient + + +class TestSlaveOkayRS(unittest.TestCase): + def setup_server(self): + self.primary, self.secondary = MockupDB(), MockupDB() + for server in self.primary, self.secondary: + server.run() + self.addCleanup(server.stop) + + hosts = [server.address_string for server in (self.primary, self.secondary)] + self.primary.autoresponds( + "ismaster", ismaster=True, setName="rs", hosts=hosts, minWireVersion=2, maxWireVersion=6 + ) + self.secondary.autoresponds( + "ismaster", + ismaster=False, + secondary=True, + setName="rs", + hosts=hosts, + minWireVersion=2, + maxWireVersion=6, + ) + + +def create_slave_ok_rs_test(operation): + def test(self): + self.setup_server() + assert operation.op_type != "always-use-secondary" + + client = MongoClient(self.primary.uri, replicaSet="rs") + self.addCleanup(client.close) + with going(operation.function, client): + request = self.primary.receive() + request.reply(operation.reply) + + self.assertFalse(request.slave_ok, 'SlaveOkay set read mode "primary"') + + return test + + +def generate_slave_ok_rs_tests(): + for operation in operations: + # Don't test secondary operations with MockupDB, the server enforces the + # SlaveOkay bit so integration tests prove we set it. + if operation.op_type == "always-use-secondary": + continue + test = create_slave_ok_rs_test(operation) + + test_name = "test_%s" % operation.name.replace(" ", "_") + test.__name__ = test_name + setattr(TestSlaveOkayRS, test_name, test) + + +generate_slave_ok_rs_tests() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_slave_okay_sharded.py b/test/mockupdb/test_slave_okay_sharded.py new file mode 100644 index 0000000000..45b7d51ba0 --- /dev/null +++ b/test/mockupdb/test_slave_okay_sharded.py @@ -0,0 +1,95 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test PyMongo's SlaveOkay with: + +- A direct connection to a standalone. +- A direct connection to a slave. +- A direct connection to a mongos. +""" +from __future__ import annotations + +import itertools +import unittest +from queue import Queue + +from mockupdb import MockupDB, going +from operations import operations # type: ignore[import] + +from pymongo import MongoClient +from pymongo.read_preferences import make_read_preference, read_pref_mode_from_name + + +class TestSlaveOkaySharded(unittest.TestCase): + def setup_server(self): + self.mongos1, self.mongos2 = MockupDB(), MockupDB() + + # Collect queries to either server in one queue. + self.q: Queue = Queue() + for server in self.mongos1, self.mongos2: + server.subscribe(self.q.put) + server.run() + self.addCleanup(server.stop) + server.autoresponds( + "ismaster", minWireVersion=2, maxWireVersion=6, ismaster=True, msg="isdbgrid" + ) + + self.mongoses_uri = f"mongodb://{self.mongos1.address_string},{self.mongos2.address_string}" + + +def create_slave_ok_sharded_test(mode, operation): + def test(self): + self.setup_server() + if operation.op_type == "always-use-secondary": + slave_ok = True + elif operation.op_type == "may-use-secondary": + slave_ok = mode != "primary" + elif operation.op_type == "must-use-primary": + slave_ok = False + else: + raise AssertionError("unrecognized op_type %r" % operation.op_type) + + pref = make_read_preference(read_pref_mode_from_name(mode), tag_sets=None) + + client = MongoClient(self.mongoses_uri, read_preference=pref) + self.addCleanup(client.close) + with going(operation.function, client): + request = self.q.get(timeout=1) + request.reply(operation.reply) + + if slave_ok: + self.assertTrue(request.slave_ok, "SlaveOkay not set") + else: + self.assertFalse(request.slave_ok, "SlaveOkay set") + + return test + + +def generate_slave_ok_sharded_tests(): + modes = "primary", "secondary", "nearest" + matrix = itertools.product(modes, operations) + + for entry in matrix: + mode, operation = entry + test = create_slave_ok_sharded_test(mode, operation) + test_name = "test_{}_with_mode_{}".format(operation.name.replace(" ", "_"), mode) + + test.__name__ = test_name + setattr(TestSlaveOkaySharded, test_name, test) + + +generate_slave_ok_sharded_tests() + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_slave_okay_single.py b/test/mockupdb/test_slave_okay_single.py new file mode 100644 index 0000000000..b03232807e --- /dev/null +++ b/test/mockupdb/test_slave_okay_single.py @@ -0,0 +1,96 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test PyMongo's SlaveOkay with: + +- A direct connection to a standalone. +- A direct connection to a slave. +- A direct connection to a mongos. +""" +from __future__ import annotations + +import itertools +import unittest + +from mockupdb import MockupDB, going +from operations import operations # type: ignore[import] + +from pymongo import MongoClient +from pymongo.read_preferences import make_read_preference, read_pref_mode_from_name +from pymongo.topology_description import TOPOLOGY_TYPE + + +def topology_type_name(client): + topology_type = client._topology._description.topology_type + return TOPOLOGY_TYPE._fields[topology_type] + + +class TestSlaveOkaySingle(unittest.TestCase): + def setUp(self): + self.server = MockupDB() + self.server.run() + self.addCleanup(self.server.stop) + + +def create_slave_ok_single_test(mode, server_type, ismaster, operation): + def test(self): + ismaster_with_version = ismaster.copy() + ismaster_with_version["minWireVersion"] = 2 + ismaster_with_version["maxWireVersion"] = 6 + self.server.autoresponds("ismaster", **ismaster_with_version) + self.assertIn( + operation.op_type, ("always-use-secondary", "may-use-secondary", "must-use-primary") + ) + pref = make_read_preference(read_pref_mode_from_name(mode), tag_sets=None) + + client = MongoClient(self.server.uri, read_preference=pref) + self.addCleanup(client.close) + with going(operation.function, client): + request = self.server.receive() + request.reply(operation.reply) + + self.assertIn(topology_type_name(client), ["Sharded", "Single"]) + + return test + + +def generate_slave_ok_single_tests(): + modes = "primary", "secondary", "nearest" + server_types = [ + ("standalone", {"ismaster": True}), + ("slave", {"ismaster": False}), + ("mongos", {"ismaster": True, "msg": "isdbgrid"}), + ] + + matrix = itertools.product(modes, server_types, operations) + + for entry in matrix: + mode, (server_type, ismaster), operation = entry + test = create_slave_ok_single_test(mode, server_type, ismaster, operation) + + test_name = "test_{}_{}_with_mode_{}".format( + operation.name.replace(" ", "_"), + server_type, + mode, + ) + + test.__name__ = test_name + setattr(TestSlaveOkaySingle, test_name, test) + + +generate_slave_ok_single_tests() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/mod_wsgi_test/README.rst b/test/mod_wsgi_test/README.rst index 2ea50c9074..2c204f7ac5 100644 --- a/test/mod_wsgi_test/README.rst +++ b/test/mod_wsgi_test/README.rst @@ -15,7 +15,7 @@ Test Matrix PyMongo should be tested with several versions of mod_wsgi and a selection of Python versions. Each combination of mod_wsgi and Python version should -be tested with a standalone and a replica set. ``mod_wsgi_test.wsgi`` +be tested with a standalone and a replica set. ``mod_wsgi_test.py`` detects if the deployment is a replica set and connects to the whole set. Setup @@ -74,31 +74,37 @@ Run the test Run the included ``test_client.py`` script:: python test/mod_wsgi_test/test_client.py -n 2500 -t 100 parallel \ - http://localhost/${WORKSPACE} + http://localhost/interpreter1${WORKSPACE} http://localhost/interpreter2${WORKSPACE} ...where the "n" argument is the total number of requests to make to Apache, and "t" specifies the number of threads. ``WORKSPACE`` is the location of -the PyMongo checkout. +the PyMongo checkout. Note that multiple URLs are passed, each one corresponds +to a different sub interpreter. Run this script again with different arguments to make serial requests:: python test/mod_wsgi_test/test_client.py -n 25000 serial \ - http://localhost/${WORKSPACE} + http://localhost/interpreter1${WORKSPACE} http://localhost/interpreter2${WORKSPACE} The ``test_client.py`` script merely makes HTTP requests to Apache. Its exit code is non-zero if any of its requests fails, for example with an HTTP 500. -The core of the test is in the WSGI script, ``mod_wsgi_test.wsgi``. +The core of the test is in the WSGI script, ``mod_wsgi_test.py``. This script inserts some documents into MongoDB at startup, then queries documents for each HTTP request. If PyMongo is leaking connections and "n" is much greater than the ulimit, the test will fail when PyMongo exhausts its file descriptors. +The script also encodes and decodes all BSON types to ensure that +multiple sub interpreters in the same process are supported. This tests +the workaround added in `PYTHON-569 `_. + Automation ---------- At MongoDB, Inc. we use a continuous integration job that tests each combination in the matrix. The job starts up Apache, starts a single server or replica set, and runs ``test_client.py`` with the proper arguments. +See `run-mod-wsgi-tests.sh `_ diff --git a/test/mod_wsgi_test/apache22amazon.conf b/test/mod_wsgi_test/apache22amazon.conf index d9d892bbe2..7755336b07 100644 --- a/test/mod_wsgi_test/apache22amazon.conf +++ b/test/mod_wsgi_test/apache22amazon.conf @@ -31,4 +31,4 @@ CustomLog ${PWD}/access_log combined Allow from All -Include ${PROJECT_DIRECTORY}/test/mod_wsgi_test/mod_wsgi_test.conf +Include ${PROJECT_DIRECTORY}/test/mod_wsgi_test/${MOD_WSGI_CONF} diff --git a/test/mod_wsgi_test/apache22ubuntu1204.conf b/test/mod_wsgi_test/apache22ubuntu1204.conf index 281c5862c2..9fa4b2060b 100644 --- a/test/mod_wsgi_test/apache22ubuntu1204.conf +++ b/test/mod_wsgi_test/apache22ubuntu1204.conf @@ -26,4 +26,4 @@ CustomLog ${PWD}/access_log combined Allow from All -Include ${PROJECT_DIRECTORY}/test/mod_wsgi_test/mod_wsgi_test.conf +Include ${PROJECT_DIRECTORY}/test/mod_wsgi_test/${MOD_WSGI_CONF} diff --git a/test/mod_wsgi_test/apache24ubuntu161404.conf b/test/mod_wsgi_test/apache24ubuntu161404.conf index f0a1734375..eb5414f0f7 100644 --- a/test/mod_wsgi_test/apache24ubuntu161404.conf +++ b/test/mod_wsgi_test/apache24ubuntu161404.conf @@ -25,4 +25,4 @@ CustomLog ${PWD}/access_log combined Require all granted -Include ${PROJECT_DIRECTORY}/test/mod_wsgi_test/mod_wsgi_test.conf +Include ${PROJECT_DIRECTORY}/test/mod_wsgi_test/${MOD_WSGI_CONF} diff --git a/test/mod_wsgi_test/mod_wsgi_test.conf b/test/mod_wsgi_test/mod_wsgi_test.conf index 9505933e96..a5b09e437f 100644 --- a/test/mod_wsgi_test/mod_wsgi_test.conf +++ b/test/mod_wsgi_test/mod_wsgi_test.conf @@ -1,4 +1,4 @@ -# Copyright 2012-2015 MongoDB, Inc. +# Copyright 2012-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -20,17 +20,13 @@ LoadModule wsgi_module ${MOD_WSGI_SO} WSGISocketPrefix /tmp/ - ServerName localhost - WSGIDaemonProcess mod_wsgi_test processes=1 threads=15 display-name=mod_wsgi_test - WSGIProcessGroup mod_wsgi_test - - # For the convienience of unittests, rather than hard-code the location of - # mod_wsgi_test.wsgi, include it in the URL, so - # http://localhost/location-of-pymongo-checkout will work: - - WSGIScriptAliasMatch ^/(.+) $1/test/mod_wsgi_test/mod_wsgi_test.wsgi - + # Mount the script twice so that multiple interpreters are used. + # For the convenience of unittests, rather than hard-code the location of + # mod_wsgi_test.py, include it in the URL, so + # http://localhost/interpreter1/location-of-pymongo-checkout will work: + WSGIScriptAliasMatch ^/interpreter1/(.+) $1/test/mod_wsgi_test/mod_wsgi_test.py + WSGIScriptAliasMatch ^/interpreter2/(.+) $1/test/mod_wsgi_test/mod_wsgi_test.py diff --git a/test/mod_wsgi_test/mod_wsgi_test.py b/test/mod_wsgi_test/mod_wsgi_test.py new file mode 100644 index 0000000000..c5f5c3086a --- /dev/null +++ b/test/mod_wsgi_test/mod_wsgi_test.py @@ -0,0 +1,111 @@ +# Copyright 2012-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Minimal test of PyMongo in a WSGI application, see bug PYTHON-353 +""" +from __future__ import annotations + +import datetime +import os +import re +import sys +import uuid + +this_path = os.path.dirname(os.path.join(os.getcwd(), __file__)) + +# Location of PyMongo checkout +repository_path = os.path.normpath(os.path.join(this_path, "..", "..")) +sys.path.insert(0, repository_path) + +import bson +import pymongo +from bson.binary import STANDARD, Binary +from bson.code import Code +from bson.codec_options import CodecOptions +from bson.datetime_ms import DatetimeConversion, DatetimeMS +from bson.dbref import DBRef +from bson.objectid import ObjectId +from bson.regex import Regex +from pymongo.mongo_client import MongoClient + +# Ensure the C extensions are installed. +assert bson.has_c() +assert pymongo.has_c() + +OPTS: CodecOptions[dict] = CodecOptions( + uuid_representation=STANDARD, datetime_conversion=DatetimeConversion.DATETIME_AUTO +) +client: MongoClient[dict] = MongoClient() +# Use a unique collection name for each process: +coll_name = f"test-{uuid.uuid4()}" +collection = client.test.get_collection(coll_name, codec_options=OPTS) +ndocs = 20 +collection.drop() +doc = { + "int32": 2 << 15, + "int64": 2 << 50, + "null": None, + "bool": True, + "float": 1.5, + "str": "string", + "list": [1, 2, 3], + "dict": {"a": 1, "b": 2, "c": 3}, + "datetime": datetime.datetime.fromtimestamp(1690328577.446), + "datetime_ms_out_of_range": DatetimeMS(-2 << 60), + "regex_native": re.compile("regex*"), + "regex_pymongo": Regex("regex*"), + "binary": Binary(b"bytes", 128), + "oid": ObjectId(), + "dbref": DBRef("test", 1), + "code": Code("function(){ return true; }"), + "code_w_scope": Code("return function(){ return x; }", scope={"x": False}), + "bytes": b"bytes", + "uuid": uuid.uuid4(), +} +collection.insert_many([dict(i=i, **doc) for i in range(ndocs)]) +client.close() # Discard main thread's request socket. +client = MongoClient() +collection = client.test.get_collection(coll_name, codec_options=OPTS) + +try: + from mod_wsgi import version as mod_wsgi_version # type: ignore[import] +except: + mod_wsgi_version = None + + +def application(environ, start_response): + results = list(collection.find().batch_size(10)) + assert len(results) == ndocs, f"n_actual={len(results)} n_expected={ndocs}" + # Test encoding and decoding works (for sub interpreter support). + decoded = bson.decode(bson.encode(doc, codec_options=OPTS), codec_options=OPTS) + for key, value in doc.items(): + # Native regex objects are decoded as bson Regex. + if isinstance(value, re.Pattern): + value = Regex.from_native(value) + assert decoded[key] == value, f"failed on doc[{key!r}]: {decoded[key]!r} != {value!r}" + assert isinstance( + decoded[key], type(value) + ), f"failed on doc[{key}]: {decoded[key]!r} is not an instance of {type(value)}" + + output = ( + f" python {sys.version}, mod_wsgi {mod_wsgi_version}," + f" pymongo {pymongo.version}," + f' mod_wsgi.process_group = {environ["mod_wsgi.process_group"]!r}' + f' mod_wsgi.application_group = {environ["mod_wsgi.application_group"]!r}' + f' wsgi.multithread = {environ["wsgi.multithread"]!r}' + "\n" + ) + response_headers = [("Content-Length", str(len(output)))] + start_response("200 OK", response_headers) + return [output.encode("ascii")] diff --git a/test/mod_wsgi_test/mod_wsgi_test.wsgi b/test/mod_wsgi_test/mod_wsgi_test.wsgi deleted file mode 100644 index 9b435b1edf..0000000000 --- a/test/mod_wsgi_test/mod_wsgi_test.wsgi +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright 2012-2015 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Minimal test of PyMongo in a WSGI application, see bug PYTHON-353 -""" - -import os -import sys - -this_path = os.path.dirname(os.path.join(os.getcwd(), __file__)) - -# Location of PyMongo checkout -repository_path = os.path.normpath(os.path.join(this_path, '..', '..')) -sys.path.insert(0, repository_path) - -import pymongo -from pymongo.mongo_client import MongoClient - -client = MongoClient() - -# If the deployment is a replica set, connect to the whole set. -replica_set_name = client.admin.command('ismaster').get('setName') -if replica_set_name: - client = MongoClient(replicaSet=replica_set_name) - -collection = client.test.test - -ndocs = 20 - -collection.drop() -collection.insert_many([{'i': i} for i in range(ndocs)]) -client.close() # Discard main thread's request socket. - -try: - from mod_wsgi import version as mod_wsgi_version -except: - mod_wsgi_version = None - - -def application(environ, start_response): - results = list(collection.find().batch_size(10)) - assert len(results) == ndocs - output = ' python %s, mod_wsgi %s, pymongo %s ' % ( - sys.version, mod_wsgi_version, pymongo.version) - response_headers = [('Content-Length', str(len(output)))] - start_response('200 OK', response_headers) - return [output.encode('ascii')] diff --git a/test/mod_wsgi_test/mod_wsgi_test_embedded.conf b/test/mod_wsgi_test/mod_wsgi_test_embedded.conf new file mode 100644 index 0000000000..306dab4ab6 --- /dev/null +++ b/test/mod_wsgi_test/mod_wsgi_test_embedded.conf @@ -0,0 +1,30 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Minimal test of PyMongo in an *Embedded mode* WSGI application. + +LoadModule wsgi_module ${MOD_WSGI_SO} + +# Avoid permissions issues +WSGISocketPrefix /tmp/ + + + ServerName localhost + # Mount the script twice so that multiple interpreters are used. + # For the convenience of unittests, rather than hard-code the location of + # mod_wsgi_test.py, include it in the URL, so + # http://localhost/interpreter1/location-of-pymongo-checkout will work: + WSGIScriptAliasMatch ^/interpreter1/(.+) $1/test/mod_wsgi_test/mod_wsgi_test.py + WSGIScriptAliasMatch ^/interpreter2/(.+) $1/test/mod_wsgi_test/mod_wsgi_test.py + diff --git a/test/mod_wsgi_test/test_client.py b/test/mod_wsgi_test/test_client.py index 61cf8df674..63ae883473 100644 --- a/test/mod_wsgi_test/test_client.py +++ b/test/mod_wsgi_test/test_client.py @@ -12,70 +12,79 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Test client for mod_wsgi application, see bug PYTHON-353. -""" +"""Test client for mod_wsgi application, see bug PYTHON-353.""" +from __future__ import annotations +import _thread as thread +import random import sys import threading import time - from optparse import OptionParser - -try: - from urllib2 import urlopen -except ImportError: - # Python 3. - from urllib.request import urlopen - - -try: - import thread -except ImportError: - # Python 3. - import _thread as thread +from urllib.request import urlopen def parse_args(): - parser = OptionParser("""usage: %prog [options] mode url + parser = OptionParser( + """usage: %prog [options] mode url [...] - mode:\tparallel or serial""") + mode:\tparallel or serial""" + ) # Should be enough that any connection leak will exhaust available file # descriptors. parser.add_option( - "-n", "--nrequests", type="int", - dest="nrequests", default=50 * 1000, - help="Number of times to GET the URL, in total") + "-n", + "--nrequests", + type="int", + dest="nrequests", + default=50 * 1000, + help="Number of times to GET the URLs, in total", + ) parser.add_option( - "-t", "--nthreads", type="int", - dest="nthreads", default=100, - help="Number of threads with mode 'parallel'") + "-t", + "--nthreads", + type="int", + dest="nthreads", + default=100, + help="Number of threads with mode 'parallel'", + ) parser.add_option( - "-q", "--quiet", - action="store_false", dest="verbose", default=True, - help="Don't print status messages to stdout") + "-q", + "--quiet", + action="store_false", + dest="verbose", + default=True, + help="Don't print status messages to stdout", + ) parser.add_option( - "-c", "--continue", - action="store_true", dest="continue_", default=False, - help="Continue after HTTP errors") + "-c", + "--continue", + action="store_true", + dest="continue_", + default=False, + help="Continue after HTTP errors", + ) try: - options, (mode, url) = parser.parse_args() - except ValueError: + options, args = parser.parse_args() + mode, urls = args[0], args[1:] + except (ValueError, IndexError): parser.print_usage() sys.exit(1) - if mode not in ('parallel', 'serial'): + if mode not in ("parallel", "serial"): parser.print_usage() sys.exit(1) - return options, mode, url + return options, mode, urls -def get(url): +def get(urls): + url = random.choice(urls) urlopen(url).read().strip() @@ -84,17 +93,17 @@ class URLGetterThread(threading.Thread): counter_lock = threading.Lock() counter = 0 - def __init__(self, options, url, nrequests_per_thread): - super(URLGetterThread, self).__init__() + def __init__(self, options, urls, nrequests_per_thread): + super().__init__() self.options = options - self.url = url + self.urls = urls self.nrequests_per_thread = nrequests_per_thread self.errors = 0 def run(self): - for i in range(self.nrequests_per_thread): + for _i in range(self.nrequests_per_thread): try: - get(url) + get(urls) except Exception as e: print(e) @@ -114,21 +123,24 @@ def run(self): print(counter) -def main(options, mode, url): +def main(options, mode, urls): start_time = time.time() errors = 0 - if mode == 'parallel': + if mode == "parallel": nrequests_per_thread = options.nrequests // options.nthreads if options.verbose: - print ( - 'Getting %s %s times total in %s threads, ' - '%s times per thread' % ( - url, nrequests_per_thread * options.nthreads, - options.nthreads, nrequests_per_thread)) + print( + "Getting {} {} times total in {} threads, " + "{} times per thread".format( + urls, + nrequests_per_thread * options.nthreads, + options.nthreads, + nrequests_per_thread, + ) + ) threads = [ - URLGetterThread(options, url, nrequests_per_thread) - for _ in range(options.nthreads) + URLGetterThread(options, urls, nrequests_per_thread) for _ in range(options.nthreads) ] for t in threads: @@ -140,18 +152,15 @@ def main(options, mode, url): errors = sum([t.errors for t in threads]) nthreads_with_errors = len([t for t in threads if t.errors]) if nthreads_with_errors: - print('%d threads had errors! %d errors in total' % ( - nthreads_with_errors, errors)) + print("%d threads had errors! %d errors in total" % (nthreads_with_errors, errors)) else: - assert mode == 'serial' + assert mode == "serial" if options.verbose: - print('Getting %s %s times in one thread' % ( - url, options.nrequests - )) + print(f"Getting {urls} {options.nrequests} times in one thread") for i in range(1, options.nrequests + 1): try: - get(url) + get(urls) except Exception as e: print(e) if not options.continue_: @@ -163,16 +172,16 @@ def main(options, mode, url): print(i) if errors: - print('%d errors!' % errors) + print("%d errors!" % errors) if options.verbose: - print('Completed in %.2f seconds' % (time.time() - start_time)) + print("Completed in %.2f seconds" % (time.time() - start_time)) if errors: # Failure sys.exit(1) -if __name__ == '__main__': - options, mode, url = parse_args() - main(options, mode, url) +if __name__ == "__main__": + options, mode, urls = parse_args() + main(options, mode, urls) diff --git a/test/mypy_fails/insert_many_dict.py b/test/mypy_fails/insert_many_dict.py new file mode 100644 index 0000000000..5f9a2d45a9 --- /dev/null +++ b/test/mypy_fails/insert_many_dict.py @@ -0,0 +1,8 @@ +from __future__ import annotations + +from pymongo import MongoClient + +client: MongoClient = MongoClient() +client.test.test.insert_many( + {"a": 1} +) # error: Dict entry 0 has incompatible type "str": "int"; expected "Mapping[str, Any]": "int" diff --git a/test/mypy_fails/insert_one_list.py b/test/mypy_fails/insert_one_list.py new file mode 100644 index 0000000000..7c27d5cac9 --- /dev/null +++ b/test/mypy_fails/insert_one_list.py @@ -0,0 +1,8 @@ +from __future__ import annotations + +from pymongo import MongoClient + +client: MongoClient = MongoClient() +client.test.test.insert_one( + [{}] +) # error: Argument 1 to "insert_one" of "Collection" has incompatible type "List[Dict[, ]]"; expected "Mapping[str, Any]" diff --git a/test/mypy_fails/raw_bson_document.py b/test/mypy_fails/raw_bson_document.py new file mode 100644 index 0000000000..49f3659e90 --- /dev/null +++ b/test/mypy_fails/raw_bson_document.py @@ -0,0 +1,15 @@ +from __future__ import annotations + +from bson.raw_bson import RawBSONDocument +from pymongo import MongoClient + +client = MongoClient(document_class=RawBSONDocument) +coll = client.test.test +doc = {"my": "doc"} +coll.insert_one(doc) +retrieved = coll.find_one({"_id": doc["_id"]}) +assert retrieved is not None +assert len(retrieved.raw) > 0 +retrieved[ + "foo" +] = "bar" # error: Unsupported target for indexed assignment ("RawBSONDocument") [index] diff --git a/test/mypy_fails/typedict_client.py b/test/mypy_fails/typedict_client.py new file mode 100644 index 0000000000..37c3f0bfcc --- /dev/null +++ b/test/mypy_fails/typedict_client.py @@ -0,0 +1,20 @@ +from __future__ import annotations + +from typing import TypedDict + +from pymongo import MongoClient + + +class Movie(TypedDict): + name: str + year: int + + +client: MongoClient[Movie] = MongoClient() +coll = client.test.test +retrieved = coll.find_one({"_id": "foo"}) +assert retrieved is not None +assert retrieved["year"] == 1 +assert ( + retrieved["name"] == 2 +) # error: Non-overlapping equality check (left operand type: "str", right operand type: "Literal[2]") [comparison-overlap] diff --git a/test/ocsp/test_ocsp.py b/test/ocsp/test_ocsp.py new file mode 100644 index 0000000000..de2714cc00 --- /dev/null +++ b/test/ocsp/test_ocsp.py @@ -0,0 +1,76 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test OCSP.""" +from __future__ import annotations + +import logging +import os +import sys +import unittest + +sys.path[0:0] = [""] + +import pymongo +from pymongo.errors import ServerSelectionTimeoutError + +CA_FILE = os.environ.get("CA_FILE") +OCSP_TLS_SHOULD_SUCCEED = os.environ.get("OCSP_TLS_SHOULD_SUCCEED") == "true" + +# Enable logs in this format: +# 2020-06-08 23:49:35,982 DEBUG ocsp_support Peer did not staple an OCSP response +FORMAT = "%(asctime)s %(levelname)s %(module)s %(message)s" +logging.basicConfig(format=FORMAT, level=logging.DEBUG) + +if sys.platform == "win32": + # The non-stapled OCSP endpoint check is slow on Windows. + TIMEOUT_MS = 5000 +else: + TIMEOUT_MS = 500 + + +def _connect(options): + uri = ("mongodb://localhost:27017/?serverSelectionTimeoutMS={}&tlsCAFile={}&{}").format( + TIMEOUT_MS, + CA_FILE, + options, + ) + print(uri) + client = pymongo.MongoClient(uri) + client.admin.command("ping") + + +class TestOCSP(unittest.TestCase): + def test_tls_insecure(self): + # Should always succeed + options = "tls=true&tlsInsecure=true" + _connect(options) + + def test_allow_invalid_certificates(self): + # Should always succeed + options = "tls=true&tlsAllowInvalidCertificates=true" + _connect(options) + + def test_tls(self): + options = "tls=true" + if not OCSP_TLS_SHOULD_SUCCEED: + self.assertRaisesRegex( + ServerSelectionTimeoutError, "invalid status response", _connect, options + ) + else: + _connect(options) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/performance/perf_test.py b/test/performance/perf_test.py index 68baadecbc..2ad4edaf8f 100644 --- a/test/performance/perf_test.py +++ b/test/performance/perf_test.py @@ -13,61 +13,67 @@ # limitations under the License. """Tests for the MongoDB Driver Performance Benchmarking Spec.""" +from __future__ import annotations import multiprocessing as mp import os import sys import tempfile +import time import warnings +from typing import Any, List try: import simplejson as json except ImportError: - import json + import json # type: ignore[no-redef] sys.path[0:0] = [""] +from test import client_context, host, port, unittest + from bson import decode, encode from bson.json_util import loads from gridfs import GridFSBucket from pymongo import MongoClient -from pymongo.monotonic import time -from test import client_context, host, port, unittest NUM_ITERATIONS = 100 MAX_ITERATION_TIME = 300 NUM_DOCS = 10000 -TEST_PATH = os.environ.get('TEST_PATH', os.path.join( - os.path.dirname(os.path.realpath(__file__)), - os.path.join('data'))) +TEST_PATH = os.environ.get( + "TEST_PATH", os.path.join(os.path.dirname(os.path.realpath(__file__)), os.path.join("data")) +) -OUTPUT_FILE = os.environ.get('OUTPUT_FILE') +OUTPUT_FILE = os.environ.get("OUTPUT_FILE") + +result_data: List = [] -result_data = [] def tearDownModule(): - output = json.dumps({ - 'results': result_data - }, indent=4) + output = json.dumps(result_data, indent=4) if OUTPUT_FILE: - with open(OUTPUT_FILE, 'w') as opf: + with open(OUTPUT_FILE, "w") as opf: opf.write(output) else: print(output) -class Timer(object): +class Timer: def __enter__(self): - self.start = time() + self.start = time.monotonic() return self def __exit__(self, *args): - self.end = time() + self.end = time.monotonic() self.interval = self.end - self.start -class PerformanceTest(object): +class PerformanceTest: + dataset: Any + data_size: Any + do_task: Any + fail: Any @classmethod def setUpClass(cls): @@ -79,17 +85,21 @@ def setUp(self): def tearDown(self): name = self.__class__.__name__ median = self.percentile(50) - result = self.data_size / median - print('Running %s. MEDIAN=%s' % (self.__class__.__name__, - self.percentile(50))) - result_data.append({ - 'name': name, - 'results': { - '1': { - 'ops_per_sec': result - } + bytes_per_sec = self.data_size / median + print(f"Running {self.__class__.__name__}. MEDIAN={self.percentile(50)}") + result_data.append( + { + "info": { + "test_name": name, + "args": { + "threads": 1, + }, + }, + "metrics": [ + {"name": "bytes_per_sec", "value": bytes_per_sec}, + ], } - }) + ) def before(self): pass @@ -98,20 +108,21 @@ def after(self): pass def percentile(self, percentile): - if hasattr(self, 'results'): + if hasattr(self, "results"): sorted_results = sorted(self.results) percentile_index = int(len(sorted_results) * percentile / 100) - 1 return sorted_results[percentile_index] else: - self.fail('Test execution failed') + self.fail("Test execution failed") + return None def runTest(self): results = [] - start = time() + start = time.monotonic() self.max_iterations = NUM_ITERATIONS for i in range(NUM_ITERATIONS): - if time() - start > MAX_ITERATION_TIME: - warnings.warn('Test timed out, completed %s iterations.' % i) + if time.monotonic() - start > MAX_ITERATION_TIME: + warnings.warn("Test timed out, completed %s iterations." % i) break self.before() with Timer() as timer: @@ -126,9 +137,7 @@ def runTest(self): class BsonEncodingTest(PerformanceTest): def setUp(self): # Location of test data. - with open( - os.path.join(TEST_PATH, - os.path.join('extended_bson', self.dataset))) as data: + with open(os.path.join(TEST_PATH, os.path.join("extended_bson", self.dataset))) as data: self.document = loads(data.read()) def do_task(self): @@ -139,9 +148,7 @@ def do_task(self): class BsonDecodingTest(PerformanceTest): def setUp(self): # Location of test data. - with open( - os.path.join(TEST_PATH, - os.path.join('extended_bson', self.dataset))) as data: + with open(os.path.join(TEST_PATH, os.path.join("extended_bson", self.dataset))) as data: self.document = encode(json.loads(data.read())) def do_task(self): @@ -150,76 +157,77 @@ def do_task(self): class TestFlatEncoding(BsonEncodingTest, unittest.TestCase): - dataset = 'flat_bson.json' + dataset = "flat_bson.json" data_size = 75310000 class TestFlatDecoding(BsonDecodingTest, unittest.TestCase): - dataset = 'flat_bson.json' + dataset = "flat_bson.json" data_size = 75310000 class TestDeepEncoding(BsonEncodingTest, unittest.TestCase): - dataset = 'deep_bson.json' + dataset = "deep_bson.json" data_size = 19640000 class TestDeepDecoding(BsonDecodingTest, unittest.TestCase): - dataset = 'deep_bson.json' + dataset = "deep_bson.json" data_size = 19640000 class TestFullEncoding(BsonEncodingTest, unittest.TestCase): - dataset = 'full_bson.json' + dataset = "full_bson.json" data_size = 57340000 class TestFullDecoding(BsonDecodingTest, unittest.TestCase): - dataset = 'full_bson.json' + dataset = "full_bson.json" data_size = 57340000 # SINGLE-DOC BENCHMARKS class TestRunCommand(PerformanceTest, unittest.TestCase): data_size = 160000 + def setUp(self): self.client = client_context.client - self.client.drop_database('perftest') + self.client.drop_database("perftest") def do_task(self): command = self.client.perftest.command for _ in range(NUM_DOCS): - command("ismaster") + command("ping") class TestDocument(PerformanceTest): def setUp(self): # Location of test data. with open( - os.path.join( - TEST_PATH, os.path.join( - 'single_and_multi_document', self.dataset)), 'r') as data: + os.path.join(TEST_PATH, os.path.join("single_and_multi_document", self.dataset)) + ) as data: self.document = json.loads(data.read()) self.client = client_context.client - self.client.drop_database('perftest') + self.client.drop_database("perftest") def tearDown(self): - super(TestDocument, self).tearDown() - self.client.drop_database('perftest') + super().tearDown() + self.client.drop_database("perftest") def before(self): - self.corpus = self.client.perftest.create_collection('corpus') + self.corpus = self.client.perftest.create_collection("corpus") def after(self): - self.client.perftest.drop_collection('corpus') + self.client.perftest.drop_collection("corpus") class TestFindOneByID(TestDocument, unittest.TestCase): data_size = 16220000 + def setUp(self): - self.dataset = 'tweet.json' - super(TestFindOneByID, self).setUp() + self.dataset = "tweet.json" + super().setUp() documents = [self.document.copy() for _ in range(NUM_DOCS)] self.corpus = self.client.perftest.corpus @@ -229,7 +237,7 @@ def setUp(self): def do_task(self): find_one = self.corpus.find_one for _id in self.inserted_ids: - find_one({'_id': _id}) + find_one({"_id": _id}) def before(self): pass @@ -240,9 +248,10 @@ def after(self): class TestSmallDocInsertOne(TestDocument, unittest.TestCase): data_size = 2750000 + def setUp(self): - self.dataset = 'small_doc.json' - super(TestSmallDocInsertOne, self).setUp() + self.dataset = "small_doc.json" + super().setUp() self.documents = [self.document.copy() for _ in range(NUM_DOCS)] @@ -254,9 +263,10 @@ def do_task(self): class TestLargeDocInsertOne(TestDocument, unittest.TestCase): data_size = 27310890 + def setUp(self): - self.dataset = 'large_doc.json' - super(TestLargeDocInsertOne, self).setUp() + self.dataset = "large_doc.json" + super().setUp() self.documents = [self.document.copy() for _ in range(10)] @@ -269,14 +279,13 @@ def do_task(self): # MULTI-DOC BENCHMARKS class TestFindManyAndEmptyCursor(TestDocument, unittest.TestCase): data_size = 16220000 + def setUp(self): - self.dataset = 'tweet.json' - super(TestFindManyAndEmptyCursor, self).setUp() + self.dataset = "tweet.json" + super().setUp() for _ in range(10): - self.client.perftest.command( - 'insert', 'corpus', - documents=[self.document] * 1000) + self.client.perftest.command("insert", "corpus", documents=[self.document] * 1000) self.corpus = self.client.perftest.corpus def do_task(self): @@ -291,13 +300,14 @@ def after(self): class TestSmallDocBulkInsert(TestDocument, unittest.TestCase): data_size = 2750000 + def setUp(self): - self.dataset = 'small_doc.json' - super(TestSmallDocBulkInsert, self).setUp() + self.dataset = "small_doc.json" + super().setUp() self.documents = [self.document.copy() for _ in range(NUM_DOCS)] def before(self): - self.corpus = self.client.perftest.create_collection('corpus') + self.corpus = self.client.perftest.create_collection("corpus") def do_task(self): self.corpus.insert_many(self.documents, ordered=True) @@ -305,13 +315,14 @@ def do_task(self): class TestLargeDocBulkInsert(TestDocument, unittest.TestCase): data_size = 27310890 + def setUp(self): - self.dataset = 'large_doc.json' - super(TestLargeDocBulkInsert, self).setUp() + self.dataset = "large_doc.json" + super().setUp() self.documents = [self.document.copy() for _ in range(10)] def before(self): - self.corpus = self.client.perftest.create_collection('corpus') + self.corpus = self.client.perftest.create_collection("corpus") def do_task(self): self.corpus.insert_many(self.documents, ordered=True) @@ -319,47 +330,48 @@ def do_task(self): class TestGridFsUpload(PerformanceTest, unittest.TestCase): data_size = 52428800 + def setUp(self): self.client = client_context.client - self.client.drop_database('perftest') + self.client.drop_database("perftest") gridfs_path = os.path.join( - TEST_PATH, - os.path.join('single_and_multi_document', 'gridfs_large.bin')) - with open(gridfs_path, 'rb') as data: + TEST_PATH, os.path.join("single_and_multi_document", "gridfs_large.bin") + ) + with open(gridfs_path, "rb") as data: self.document = data.read() self.bucket = GridFSBucket(self.client.perftest) def tearDown(self): - super(TestGridFsUpload, self).tearDown() - self.client.drop_database('perftest') + super().tearDown() + self.client.drop_database("perftest") def before(self): - self.bucket.upload_from_stream('init', b'x') + self.bucket.upload_from_stream("init", b"x") def do_task(self): - self.bucket.upload_from_stream('gridfstest', self.document) + self.bucket.upload_from_stream("gridfstest", self.document) class TestGridFsDownload(PerformanceTest, unittest.TestCase): data_size = 52428800 + def setUp(self): self.client = client_context.client - self.client.drop_database('perftest') + self.client.drop_database("perftest") gridfs_path = os.path.join( - TEST_PATH, - os.path.join('single_and_multi_document', 'gridfs_large.bin')) + TEST_PATH, os.path.join("single_and_multi_document", "gridfs_large.bin") + ) self.bucket = GridFSBucket(self.client.perftest) - with open(gridfs_path, 'rb') as gfile: - self.uploaded_id = self.bucket.upload_from_stream( - 'gridfstest', gfile) + with open(gridfs_path, "rb") as gfile: + self.uploaded_id = self.bucket.upload_from_stream("gridfstest", gfile) def tearDown(self): - super(TestGridFsDownload, self).tearDown() - self.client.drop_database('perftest') + super().tearDown() + self.client.drop_database("perftest") def do_task(self): self.bucket.open_download_stream(self.uploaded_id).read() @@ -381,41 +393,46 @@ def mp_map(map_func, files): def insert_json_file(filename): - with open(filename, 'r') as data: + assert proc_client is not None + with open(filename) as data: coll = proc_client.perftest.corpus coll.insert_many([json.loads(line) for line in data]) def insert_json_file_with_file_id(filename): documents = [] - with open(filename, 'r') as data: + with open(filename) as data: for line in data: doc = json.loads(line) - doc['file'] = filename + doc["file"] = filename documents.append(doc) + assert proc_client is not None coll = proc_client.perftest.corpus coll.insert_many(documents) def read_json_file(filename): + assert proc_client is not None coll = proc_client.perftest.corpus - temp = tempfile.TemporaryFile() + temp = tempfile.TemporaryFile(mode="w") try: temp.writelines( - [json.dumps(doc) + '\n' for - doc in coll.find({'file': filename}, {'_id': False})]) + [json.dumps(doc) + "\n" for doc in coll.find({"file": filename}, {"_id": False})] + ) finally: temp.close() def insert_gridfs_file(filename): + assert proc_client is not None bucket = GridFSBucket(proc_client.perftest) - with open(filename, 'rb') as gfile: + with open(filename, "rb") as gfile: bucket.upload_from_stream(filename, gfile) def read_gridfs_file(filename): + assert proc_client is not None bucket = GridFSBucket(proc_client.perftest) temp = tempfile.TemporaryFile() @@ -427,41 +444,39 @@ def read_gridfs_file(filename): class TestJsonMultiImport(PerformanceTest, unittest.TestCase): data_size = 565000000 + def setUp(self): self.client = client_context.client - self.client.drop_database('perftest') + self.client.drop_database("perftest") def before(self): - self.client.perftest.command({'create': 'corpus'}) + self.client.perftest.command({"create": "corpus"}) self.corpus = self.client.perftest.corpus - ldjson_path = os.path.join( - TEST_PATH, os.path.join('parallel', 'ldjson_multi')) - self.files = [os.path.join( - ldjson_path, s) for s in os.listdir(ldjson_path)] + ldjson_path = os.path.join(TEST_PATH, os.path.join("parallel", "ldjson_multi")) + self.files = [os.path.join(ldjson_path, s) for s in os.listdir(ldjson_path)] def do_task(self): mp_map(insert_json_file, self.files) def after(self): - self.client.perftest.drop_collection('corpus') + self.client.perftest.drop_collection("corpus") def tearDown(self): - super(TestJsonMultiImport, self).tearDown() - self.client.drop_database('perftest') + super().tearDown() + self.client.drop_database("perftest") class TestJsonMultiExport(PerformanceTest, unittest.TestCase): data_size = 565000000 + def setUp(self): self.client = client_context.client - self.client.drop_database('perftest') - self.client.perfest.corpus.create_index('file') + self.client.drop_database("perftest") + self.client.perfest.corpus.create_index("file") - ldjson_path = os.path.join( - TEST_PATH, os.path.join('parallel', 'ldjson_multi')) - self.files = [os.path.join( - ldjson_path, s) for s in os.listdir(ldjson_path)] + ldjson_path = os.path.join(TEST_PATH, os.path.join("parallel", "ldjson_multi")) + self.files = [os.path.join(ldjson_path, s) for s in os.listdir(ldjson_path)] mp_map(insert_json_file_with_file_id, self.files) @@ -469,57 +484,55 @@ def do_task(self): mp_map(read_json_file, self.files) def tearDown(self): - super(TestJsonMultiExport, self).tearDown() - self.client.drop_database('perftest') + super().tearDown() + self.client.drop_database("perftest") class TestGridFsMultiFileUpload(PerformanceTest, unittest.TestCase): data_size = 262144000 + def setUp(self): self.client = client_context.client - self.client.drop_database('perftest') + self.client.drop_database("perftest") def before(self): - self.client.perftest.drop_collection('fs.files') - self.client.perftest.drop_collection('fs.chunks') + self.client.perftest.drop_collection("fs.files") + self.client.perftest.drop_collection("fs.chunks") self.bucket = GridFSBucket(self.client.perftest) - gridfs_path = os.path.join( - TEST_PATH, os.path.join('parallel', 'gridfs_multi')) - self.files = [os.path.join( - gridfs_path, s) for s in os.listdir(gridfs_path)] + gridfs_path = os.path.join(TEST_PATH, os.path.join("parallel", "gridfs_multi")) + self.files = [os.path.join(gridfs_path, s) for s in os.listdir(gridfs_path)] def do_task(self): mp_map(insert_gridfs_file, self.files) def tearDown(self): - super(TestGridFsMultiFileUpload, self).tearDown() - self.client.drop_database('perftest') + super().tearDown() + self.client.drop_database("perftest") class TestGridFsMultiFileDownload(PerformanceTest, unittest.TestCase): data_size = 262144000 + def setUp(self): self.client = client_context.client - self.client.drop_database('perftest') + self.client.drop_database("perftest") bucket = GridFSBucket(self.client.perftest) - gridfs_path = os.path.join( - TEST_PATH, os.path.join('parallel', 'gridfs_multi')) - self.files = [os.path.join( - gridfs_path, s) for s in os.listdir(gridfs_path)] + gridfs_path = os.path.join(TEST_PATH, os.path.join("parallel", "gridfs_multi")) + self.files = [os.path.join(gridfs_path, s) for s in os.listdir(gridfs_path)] for fname in self.files: - with open(fname, 'rb') as gfile: + with open(fname, "rb") as gfile: bucket.upload_from_stream(fname, gfile) def do_task(self): mp_map(read_gridfs_file, self.files) def tearDown(self): - super(TestGridFsMultiFileDownload, self).tearDown() - self.client.drop_database('perftest') + super().tearDown() + self.client.drop_database("perftest") if __name__ == "__main__": diff --git a/test/pymongo_mocks.py b/test/pymongo_mocks.py index 7ed2e8a303..2b291c7bd3 100644 --- a/test/pymongo_mocks.py +++ b/test/pymongo_mocks.py @@ -13,21 +13,20 @@ # limitations under the License. """Tools for mocking parts of PyMongo to test other parts.""" +from __future__ import annotations import contextlib -from functools import partial import weakref +from functools import partial +from test import client_context -from pymongo import common -from pymongo import MongoClient +from pymongo import MongoClient, common from pymongo.errors import AutoReconnect, NetworkTimeout -from pymongo.ismaster import IsMaster +from pymongo.hello import Hello, HelloCompat from pymongo.monitor import Monitor from pymongo.pool import Pool from pymongo.server_description import ServerDescription -from test import client_context - class MockPool(Pool): def __init__(self, client, pair, *args, **kwargs): @@ -40,56 +39,75 @@ def __init__(self, client, pair, *args, **kwargs): Pool.__init__(self, (client_context.host, client_context.port), *args, **kwargs) @contextlib.contextmanager - def get_socket(self, all_credentials, checkout=False): + def checkout(self, handler=None): client = self.client - host_and_port = '%s:%s' % (self.mock_host, self.mock_port) + host_and_port = f"{self.mock_host}:{self.mock_port}" if host_and_port in client.mock_down_hosts: - raise AutoReconnect('mock error') + raise AutoReconnect("mock error") assert host_and_port in ( - client.mock_standalones - + client.mock_members - + client.mock_mongoses), "bad host: %s" % host_and_port + client.mock_standalones + client.mock_members + client.mock_mongoses + ), ("bad host: %s" % host_and_port) + + with Pool.checkout(self, handler) as conn: + conn.mock_host = self.mock_host + conn.mock_port = self.mock_port + yield conn - with Pool.get_socket(self, all_credentials) as sock_info: - sock_info.mock_host = self.mock_host - sock_info.mock_port = self.mock_port - yield sock_info + +class DummyMonitor: + def __init__(self, server_description, topology, pool, topology_settings): + self._server_description = server_description + self.opened = False + + def cancel_check(self): + pass + + def join(self): + pass + + def open(self): + self.opened = True + + def request_check(self): + pass + + def close(self): + self.opened = False class MockMonitor(Monitor): - def __init__( - self, - client, - server_description, - topology, - pool, - topology_settings): - # MockMonitor gets a 'client' arg, regular monitors don't. - self.client = client - Monitor.__init__( - self, - server_description, - topology, - pool, - topology_settings) - - def _check_once(self, metadata=None, cluster_time=None): + def __init__(self, client, server_description, topology, pool, topology_settings): + # MockMonitor gets a 'client' arg, regular monitors don't. Weakref it + # to avoid cycles. + self.client = weakref.proxy(client) + Monitor.__init__(self, server_description, topology, pool, topology_settings) + + def _check_once(self): + client = self.client address = self._server_description.address - response, rtt = self.client.mock_is_master('%s:%d' % address) - return ServerDescription(address, IsMaster(response), rtt) + response, rtt = client.mock_hello("%s:%d" % address) # type: ignore[str-format] + return ServerDescription(address, Hello(response), rtt) class MockClient(MongoClient): def __init__( - self, standalones, members, mongoses, ismaster_hosts=None, - *args, **kwargs): + self, + standalones, + members, + mongoses, + hello_hosts=None, + arbiters=None, + down_hosts=None, + *args, + **kwargs, + ): """A MongoClient connected to the default server, with a mock topology. - standalones, members, mongoses determine the configuration of the - topology. They are formatted like ['a:1', 'b:2']. ismaster_hosts - provides an alternative host list for the server's mocked ismaster - response; see test_connect_with_internal_ips. + standalones, members, mongoses, arbiters, and down_hosts determine the + configuration of the topology. They are formatted like ['a:1', 'b:2']. + hello_hosts provides an alternative host list for the server's + mocked hello response; see test_connect_with_internal_ips. """ self.mock_standalones = standalones[:] self.mock_members = members[:] @@ -99,15 +117,18 @@ def __init__( else: self.mock_primary = None - if ismaster_hosts is not None: - self.mock_ismaster_hosts = ismaster_hosts + # Hosts that should be considered an arbiter. + self.mock_arbiters = arbiters[:] if arbiters else [] + + if hello_hosts is not None: + self.mock_hello_hosts = hello_hosts else: - self.mock_ismaster_hosts = members[:] + self.mock_hello_hosts = members[:] self.mock_mongoses = mongoses[:] # Hosts that should raise socket errors. - self.mock_down_hosts = [] + self.mock_down_hosts = down_hosts[:] if down_hosts else [] # Hostname -> (min wire version, max wire version) self.mock_wire_versions = {} @@ -118,13 +139,13 @@ def __init__( # Hostname -> round trip time self.mock_rtts = {} - kwargs['_pool_class'] = partial(MockPool, self) - kwargs['_monitor_class'] = partial(MockMonitor, self) + kwargs["_pool_class"] = partial(MockPool, self) + kwargs["_monitor_class"] = partial(MockMonitor, self) client_options = client_context.default_client_options.copy() client_options.update(kwargs) - super(MockClient, self).__init__(*args, **client_options) + super().__init__(*args, **client_options) def kill_host(self, host): """Host is like 'a:1'.""" @@ -140,8 +161,8 @@ def set_wire_version_range(self, host, min_version, max_version): def set_max_write_batch_size(self, host, size): self.mock_max_write_batch_sizes[host] = size - def mock_is_master(self, host): - """Return mock ismaster response (a dict) and round trip time.""" + def mock_hello(self, host): + """Return mock hello response (a dict) and round trip time.""" if host in self.mock_wire_versions: min_wire_version, max_wire_version = self.mock_wire_versions[host] else: @@ -149,49 +170,57 @@ def mock_is_master(self, host): max_wire_version = common.MAX_SUPPORTED_WIRE_VERSION max_write_batch_size = self.mock_max_write_batch_sizes.get( - host, common.MAX_WRITE_BATCH_SIZE) + host, common.MAX_WRITE_BATCH_SIZE + ) rtt = self.mock_rtts.get(host, 0) # host is like 'a:1'. if host in self.mock_down_hosts: - raise NetworkTimeout('mock timeout') + raise NetworkTimeout("mock timeout") elif host in self.mock_standalones: response = { - 'ok': 1, - 'ismaster': True, - 'minWireVersion': min_wire_version, - 'maxWireVersion': max_wire_version, - 'maxWriteBatchSize': max_write_batch_size} + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "minWireVersion": min_wire_version, + "maxWireVersion": max_wire_version, + "maxWriteBatchSize": max_write_batch_size, + } elif host in self.mock_members: - ismaster = (host == self.mock_primary) + primary = host == self.mock_primary # Simulate a replica set member. response = { - 'ok': 1, - 'ismaster': ismaster, - 'secondary': not ismaster, - 'setName': 'rs', - 'hosts': self.mock_ismaster_hosts, - 'minWireVersion': min_wire_version, - 'maxWireVersion': max_wire_version, - 'maxWriteBatchSize': max_write_batch_size} + "ok": 1, + HelloCompat.LEGACY_CMD: primary, + "secondary": not primary, + "setName": "rs", + "hosts": self.mock_hello_hosts, + "minWireVersion": min_wire_version, + "maxWireVersion": max_wire_version, + "maxWriteBatchSize": max_write_batch_size, + } if self.mock_primary: - response['primary'] = self.mock_primary + response["primary"] = self.mock_primary + + if host in self.mock_arbiters: + response["arbiterOnly"] = True + response["secondary"] = False elif host in self.mock_mongoses: response = { - 'ok': 1, - 'ismaster': True, - 'minWireVersion': min_wire_version, - 'maxWireVersion': max_wire_version, - 'msg': 'isdbgrid', - 'maxWriteBatchSize': max_write_batch_size} + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "minWireVersion": min_wire_version, + "maxWireVersion": max_wire_version, + "msg": "isdbgrid", + "maxWriteBatchSize": max_write_batch_size, + } else: # In test_internal_ips(), we try to connect to a host listed - # in ismaster['hosts'] but not publicly accessible. - raise AutoReconnect('Unknown host: %s' % host) + # in hello['hosts'] but not publicly accessible. + raise AutoReconnect("Unknown host: %s" % host) return response, rtt diff --git a/test/qcheck.py b/test/qcheck.py index 4d039f75b5..739d4948ec 100644 --- a/test/qcheck.py +++ b/test/qcheck.py @@ -11,23 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations -import random -import traceback import datetime +import random import re import sys +import traceback + sys.path[0:0] = [""] -from bson.binary import Binary from bson.dbref import DBRef from bson.objectid import ObjectId -from bson.py3compat import MAXSIZE, PY3, iteritems from bson.son import SON -if PY3: - unichr = chr - gen_target = 100 reduction_attempts = 10 examples = 5 @@ -59,7 +56,7 @@ def gen_int(): def gen_float(): - return lambda: (random.random() - 0.5) * MAXSIZE + return lambda: (random.random() - 0.5) * sys.maxsize def gen_boolean(): @@ -74,12 +71,8 @@ def gen_printable_string(gen_length): return lambda: "".join(gen_list(gen_printable_char(), gen_length)()) -if PY3: - def gen_char(set=None): - return lambda: bytes([random.randint(0, 255)]) -else: - def gen_char(set=None): - return lambda: chr(random.randint(0, 255)) +def gen_char(set=None): + return lambda: bytes([random.randint(0, 255)]) def gen_string(gen_length): @@ -87,13 +80,11 @@ def gen_string(gen_length): def gen_unichar(): - return lambda: unichr(random.randint(1, 0xFFF)) + return lambda: chr(random.randint(1, 0xFFF)) def gen_unicode(gen_length): - return lambda: u"".join([x for x in - gen_list(gen_unichar(), gen_length)() if - x not in ".$"]) + return lambda: "".join([x for x in gen_list(gen_unichar(), gen_length)() if x not in ".$"]) def gen_list(generator, gen_length): @@ -101,22 +92,24 @@ def gen_list(generator, gen_length): def gen_datetime(): - return lambda: datetime.datetime(random.randint(1970, 2037), - random.randint(1, 12), - random.randint(1, 28), - random.randint(0, 23), - random.randint(0, 59), - random.randint(0, 59), - random.randint(0, 999) * 1000) + return lambda: datetime.datetime( + random.randint(1970, 2037), + random.randint(1, 12), + random.randint(1, 28), + random.randint(0, 23), + random.randint(0, 59), + random.randint(0, 59), + random.randint(0, 999) * 1000, + ) def gen_dict(gen_key, gen_value, gen_length): - def a_dict(gen_key, gen_value, length): result = {} for _ in range(length): result[gen_key()] = gen_value() return result + return lambda: a_dict(gen_key, gen_value, gen_length()) @@ -124,7 +117,8 @@ def gen_regexp(gen_length): # TODO our patterns only consist of one letter. # this is because of a bug in CPython's regex equality testing, # which I haven't quite tracked down, so I'm just ignoring it... - pattern = lambda: u"".join(gen_list(choose_lifted(u"a"), gen_length)()) + def pattern(): + return "".join(gen_list(choose_lifted("a"), gen_length)()) def gen_flags(): flags = 0 @@ -136,6 +130,7 @@ def gen_flags(): flags = flags | re.VERBOSE return flags + return lambda: re.compile(pattern(), gen_flags()) @@ -150,21 +145,17 @@ def gen_dbref(): def gen_mongo_value(depth, ref): - bintype = Binary - if PY3: - # If we used Binary in python3 tests would fail since we - # decode BSON binary subtype 0 to bytes. Testing this with - # bytes in python3 makes a lot more sense. - bintype = bytes - choices = [gen_unicode(gen_range(0, 50)), - gen_printable_string(gen_range(0, 50)), - my_map(gen_string(gen_range(0, 1000)), bintype), - gen_int(), - gen_float(), - gen_boolean(), - gen_datetime(), - gen_objectid(), - lift(None)] + choices = [ + gen_unicode(gen_range(0, 50)), + gen_printable_string(gen_range(0, 50)), + my_map(gen_string(gen_range(0, 1000)), bytes), + gen_int(), + gen_float(), + gen_boolean(), + gen_datetime(), + gen_objectid(), + lift(None), + ] if ref: choices.append(gen_dbref()) if depth > 0: @@ -178,9 +169,10 @@ def gen_mongo_list(depth, ref): def gen_mongo_dict(depth, ref=True): - return my_map(gen_dict(gen_unicode(gen_range(0, 20)), - gen_mongo_value(depth - 1, ref), - gen_range(0, 10)), SON) + return my_map( + gen_dict(gen_unicode(gen_range(0, 20)), gen_mongo_value(depth - 1, ref), gen_range(0, 10)), + SON, + ) def simplify(case): # TODO this is a hack @@ -195,7 +187,7 @@ def simplify(case): # TODO this is a hack return (True, simplified) else: # simplify a value - simplified_items = list(iteritems(simplified)) + simplified_items = list(simplified.items()) if not len(simplified_items): return (False, case) (key, value) = random.choice(simplified_items) @@ -240,9 +232,9 @@ def check(predicate, generator): try: if not predicate(case): reduction = reduce(case, predicate) - counter_examples.append("after %s reductions: %r" % reduction) + counter_examples.append("after {} reductions: {!r}".format(*reduction)) except: - counter_examples.append("%r : %s" % (case, traceback.format_exc())) + counter_examples.append(f"{case!r} : {traceback.format_exc()}") return counter_examples @@ -250,8 +242,10 @@ def check_unittest(test, predicate, generator): counter_examples = check(predicate, generator) if counter_examples: failures = len(counter_examples) - message = "\n".join([" -> %s" % f for f in - counter_examples[:examples]]) - message = ("found %d counter examples, displaying first %d:\n%s" % - (failures, min(failures, examples), message)) + message = "\n".join([" -> %s" % f for f in counter_examples[:examples]]) + message = "found %d counter examples, displaying first %d:\n%s" % ( + failures, + min(failures, examples), + message, + ) test.fail(message) diff --git a/test/read_write_concern/connection-string/read-concern.json b/test/read_write_concern/connection-string/read-concern.json index dd2b792b29..1ecad8c268 100644 --- a/test/read_write_concern/connection-string/read-concern.json +++ b/test/read_write_concern/connection-string/read-concern.json @@ -24,6 +24,24 @@ "readConcern": { "level": "majority" } + }, + { + "description": "linearizable specified", + "uri": "mongodb://localhost/?readConcernLevel=linearizable", + "valid": true, + "warning": false, + "readConcern": { + "level": "linearizable" + } + }, + { + "description": "available specified", + "uri": "mongodb://localhost/?readConcernLevel=available", + "valid": true, + "warning": false, + "readConcern": { + "level": "available" + } } ] } diff --git a/test/read_write_concern/document/read-concern.json b/test/read_write_concern/document/read-concern.json index ef2bafdf55..187397dae5 100644 --- a/test/read_write_concern/document/read-concern.json +++ b/test/read_write_concern/document/read-concern.json @@ -28,6 +28,39 @@ "level": "local" }, "isServerDefault": false + }, + { + "description": "Linearizable", + "valid": true, + "readConcern": { + "level": "linearizable" + }, + "readConcernDocument": { + "level": "linearizable" + }, + "isServerDefault": false + }, + { + "description": "Snapshot", + "valid": true, + "readConcern": { + "level": "snapshot" + }, + "readConcernDocument": { + "level": "snapshot" + }, + "isServerDefault": false + }, + { + "description": "Available", + "valid": true, + "readConcern": { + "level": "available" + }, + "readConcernDocument": { + "level": "available" + }, + "isServerDefault": false } ] } diff --git a/test/read_write_concern/operation/default-write-concern-2.6.json b/test/read_write_concern/operation/default-write-concern-2.6.json new file mode 100644 index 0000000000..c623298cd7 --- /dev/null +++ b/test/read_write_concern/operation/default-write-concern-2.6.json @@ -0,0 +1,544 @@ +{ + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "default_write_concern_coll", + "database_name": "default_write_concern_db", + "runOn": [ + { + "minServerVersion": "2.6" + } + ], + "tests": [ + { + "description": "DeleteOne omits default write concern", + "operations": [ + { + "name": "deleteOne", + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "arguments": { + "filter": {} + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "delete": "default_write_concern_coll", + "deletes": [ + { + "q": {}, + "limit": 1 + } + ], + "writeConcern": null + } + } + } + ] + }, + { + "description": "DeleteMany omits default write concern", + "operations": [ + { + "name": "deleteMany", + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "arguments": { + "filter": {} + }, + "result": { + "deletedCount": 2 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "delete": "default_write_concern_coll", + "deletes": [ + { + "q": {}, + "limit": 0 + } + ], + "writeConcern": null + } + } + } + ] + }, + { + "description": "BulkWrite with all models omits default write concern", + "operations": [ + { + "name": "bulkWrite", + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "arguments": { + "ordered": true, + "requests": [ + { + "name": "deleteMany", + "arguments": { + "filter": {} + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 2 + } + } + }, + { + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 2 + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 3 + } + } + }, + { + "name": "updateMany", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 3 + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 3 + } + } + } + ] + } + } + ], + "outcome": { + "collection": { + "name": "default_write_concern_coll", + "data": [ + { + "_id": 1, + "x": 3 + }, + { + "_id": 2 + } + ] + } + }, + "expectations": [ + { + "command_started_event": { + "command": { + "delete": "default_write_concern_coll", + "deletes": [ + { + "q": {}, + "limit": 0 + } + ], + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "insert": "default_write_concern_coll", + "documents": [ + { + "_id": 1 + } + ], + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "update": "default_write_concern_coll", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 1 + } + } + } + ], + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "insert": "default_write_concern_coll", + "documents": [ + { + "_id": 2 + } + ], + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "update": "default_write_concern_coll", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "x": 2 + } + } + ], + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "insert": "default_write_concern_coll", + "documents": [ + { + "_id": 3 + } + ], + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "update": "default_write_concern_coll", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 3 + } + }, + "multi": true + } + ], + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "delete": "default_write_concern_coll", + "deletes": [ + { + "q": { + "_id": 3 + }, + "limit": 1 + } + ], + "writeConcern": null + } + } + } + ] + }, + { + "description": "InsertOne and InsertMany omit default write concern", + "operations": [ + { + "name": "insertOne", + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "arguments": { + "document": { + "_id": 3 + } + } + }, + { + "name": "insertMany", + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "arguments": { + "documents": [ + { + "_id": 4 + }, + { + "_id": 5 + } + ] + } + } + ], + "outcome": { + "collection": { + "name": "default_write_concern_coll", + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3 + }, + { + "_id": 4 + }, + { + "_id": 5 + } + ] + } + }, + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "default_write_concern_coll", + "documents": [ + { + "_id": 3 + } + ], + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "insert": "default_write_concern_coll", + "documents": [ + { + "_id": 4 + }, + { + "_id": 5 + } + ], + "writeConcern": null + } + } + } + ] + }, + { + "description": "UpdateOne, UpdateMany, and ReplaceOne omit default write concern", + "operations": [ + { + "name": "updateOne", + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "arguments": { + "filter": { + "_id": 2 + }, + "update": { + "$set": { + "x": 2 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "arguments": { + "filter": { + "_id": 2 + }, + "replacement": { + "x": 3 + } + } + } + ], + "outcome": { + "collection": { + "name": "default_write_concern_coll", + "data": [ + { + "_id": 1, + "x": 1 + }, + { + "_id": 2, + "x": 3 + } + ] + } + }, + "expectations": [ + { + "command_started_event": { + "command": { + "update": "default_write_concern_coll", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 1 + } + } + } + ], + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "update": "default_write_concern_coll", + "updates": [ + { + "q": { + "_id": 2 + }, + "u": { + "$set": { + "x": 2 + } + }, + "multi": true + } + ], + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "update": "default_write_concern_coll", + "updates": [ + { + "q": { + "_id": 2 + }, + "u": { + "x": 3 + } + } + ], + "writeConcern": null + } + } + } + ] + } + ] +} diff --git a/test/read_write_concern/operation/default-write-concern-3.2.json b/test/read_write_concern/operation/default-write-concern-3.2.json new file mode 100644 index 0000000000..04dd231f04 --- /dev/null +++ b/test/read_write_concern/operation/default-write-concern-3.2.json @@ -0,0 +1,125 @@ +{ + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "default_write_concern_coll", + "database_name": "default_write_concern_db", + "runOn": [ + { + "minServerVersion": "3.2" + } + ], + "tests": [ + { + "description": "findAndModify operations omit default write concern", + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "arguments": { + "filter": { + "_id": 2 + }, + "replacement": { + "x": 2 + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "arguments": { + "filter": { + "_id": 2 + } + } + } + ], + "outcome": { + "collection": { + "name": "default_write_concern_coll", + "data": [ + { + "_id": 1, + "x": 1 + } + ] + } + }, + "expectations": [ + { + "command_started_event": { + "command": { + "findAndModify": "default_write_concern_coll", + "query": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + }, + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default_write_concern_coll", + "query": { + "_id": 2 + }, + "update": { + "x": 2 + }, + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default_write_concern_coll", + "query": { + "_id": 2 + }, + "remove": true, + "writeConcern": null + } + } + } + ] + } + ] +} diff --git a/test/read_write_concern/operation/default-write-concern-3.4.json b/test/read_write_concern/operation/default-write-concern-3.4.json new file mode 100644 index 0000000000..6519f6f089 --- /dev/null +++ b/test/read_write_concern/operation/default-write-concern-3.4.json @@ -0,0 +1,216 @@ +{ + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "default_write_concern_coll", + "database_name": "default_write_concern_db", + "runOn": [ + { + "minServerVersion": "3.4" + } + ], + "tests": [ + { + "description": "Aggregate with $out omits default write concern", + "operations": [ + { + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_collection_name" + } + ] + } + } + ], + "outcome": { + "collection": { + "name": "other_collection_name", + "data": [ + { + "_id": 2, + "x": 22 + } + ] + } + }, + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "default_write_concern_coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_collection_name" + } + ], + "writeConcern": null + } + } + } + ] + }, + { + "description": "RunCommand with a write command omits default write concern (runCommand should never inherit write concern)", + "operations": [ + { + "object": "database", + "databaseOptions": { + "writeConcern": {} + }, + "name": "runCommand", + "command_name": "delete", + "arguments": { + "command": { + "delete": "default_write_concern_coll", + "deletes": [ + { + "q": {}, + "limit": 1 + } + ] + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "delete": "default_write_concern_coll", + "deletes": [ + { + "q": {}, + "limit": 1 + } + ], + "writeConcern": null + } + } + } + ] + }, + { + "description": "CreateIndex and dropIndex omits default write concern", + "operations": [ + { + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "name": "createIndex", + "arguments": { + "keys": { + "x": 1 + } + } + }, + { + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "name": "dropIndex", + "arguments": { + "name": "x_1" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "createIndexes": "default_write_concern_coll", + "indexes": [ + { + "name": "x_1", + "key": { + "x": 1 + } + } + ], + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "dropIndexes": "default_write_concern_coll", + "index": "x_1", + "writeConcern": null + } + } + } + ] + }, + { + "description": "MapReduce omits default write concern", + "operations": [ + { + "name": "mapReduce", + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "arguments": { + "map": { + "$code": "function inc() { return emit(0, this.x + 1) }" + }, + "reduce": { + "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" + }, + "out": { + "inline": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "mapReduce": "default_write_concern_coll", + "map": { + "$code": "function inc() { return emit(0, this.x + 1) }" + }, + "reduce": { + "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" + }, + "out": { + "inline": 1 + }, + "writeConcern": null + } + } + } + ] + } + ] +} diff --git a/test/read_write_concern/operation/default-write-concern-4.2.json b/test/read_write_concern/operation/default-write-concern-4.2.json new file mode 100644 index 0000000000..fef192d1a3 --- /dev/null +++ b/test/read_write_concern/operation/default-write-concern-4.2.json @@ -0,0 +1,87 @@ +{ + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "default_write_concern_coll", + "database_name": "default_write_concern_db", + "runOn": [ + { + "minServerVersion": "4.2" + } + ], + "tests": [ + { + "description": "Aggregate with $merge omits default write concern", + "operations": [ + { + "object": "collection", + "databaseOptions": { + "writeConcern": {} + }, + "collectionOptions": { + "writeConcern": {} + }, + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_collection_name" + } + } + ] + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "default_write_concern_coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_collection_name" + } + } + ], + "writeConcern": null + } + } + } + ], + "outcome": { + "collection": { + "name": "other_collection_name", + "data": [ + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/retryable_reads/legacy/aggregate-merge.json b/test/retryable_reads/legacy/aggregate-merge.json new file mode 100644 index 0000000000..b401d741ba --- /dev/null +++ b/test/retryable_reads/legacy/aggregate-merge.json @@ -0,0 +1,98 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.11" + } + ], + "database_name": "retryable-reads-tests", + "collection_name": "coll", + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "tests": [ + { + "description": "Aggregate with $merge does not retry", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + }, + "operations": [ + { + "object": "collection", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$merge": { + "into": "output-collection" + } + } + ] + }, + "error": true + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$merge": { + "into": "output-collection" + } + } + ] + }, + "command_name": "aggregate", + "database_name": "retryable-reads-tests" + } + } + ] + } + ] +} diff --git a/test/retryable_reads/aggregate-serverErrors.json b/test/retryable_reads/legacy/aggregate-serverErrors.json similarity index 98% rename from test/retryable_reads/aggregate-serverErrors.json rename to test/retryable_reads/legacy/aggregate-serverErrors.json index 04208bc95b..1155f808dc 100644 --- a/test/retryable_reads/aggregate-serverErrors.json +++ b/test/retryable_reads/legacy/aggregate-serverErrors.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], @@ -218,7 +219,7 @@ ] }, { - "description": "Aggregate succeeds after NotMaster", + "description": "Aggregate succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -311,7 +312,7 @@ ] }, { - "description": "Aggregate succeeds after NotMasterNoSlaveOk", + "description": "Aggregate succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -404,7 +405,7 @@ ] }, { - "description": "Aggregate succeeds after NotMasterOrSecondary", + "description": "Aggregate succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -1055,7 +1056,7 @@ ] }, { - "description": "Aggregate fails after two NotMaster errors", + "description": "Aggregate fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -1139,7 +1140,7 @@ ] }, { - "description": "Aggregate fails after NotMaster when retryReads is false", + "description": "Aggregate fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/aggregate.json b/test/retryable_reads/legacy/aggregate.json similarity index 99% rename from test/retryable_reads/aggregate.json rename to test/retryable_reads/legacy/aggregate.json index 30a6e05e69..f23d5c6793 100644 --- a/test/retryable_reads/aggregate.json +++ b/test/retryable_reads/legacy/aggregate.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_reads/changeStreams-client.watch-serverErrors.json b/test/retryable_reads/legacy/changeStreams-client.watch-serverErrors.json similarity index 97% rename from test/retryable_reads/changeStreams-client.watch-serverErrors.json rename to test/retryable_reads/legacy/changeStreams-client.watch-serverErrors.json index cf6c230ec8..73dbfee916 100644 --- a/test/retryable_reads/changeStreams-client.watch-serverErrors.json +++ b/test/retryable_reads/legacy/changeStreams-client.watch-serverErrors.json @@ -9,8 +9,10 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" - ] + "sharded", + "load-balanced" + ], + "serverless": "forbid" } ], "database_name": "retryable-reads-tests", @@ -141,7 +143,7 @@ ] }, { - "description": "client.watch succeeds after NotMaster", + "description": "client.watch succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -196,7 +198,7 @@ ] }, { - "description": "client.watch succeeds after NotMasterNoSlaveOk", + "description": "client.watch succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -251,7 +253,7 @@ ] }, { - "description": "client.watch succeeds after NotMasterOrSecondary", + "description": "client.watch succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -636,7 +638,7 @@ ] }, { - "description": "client.watch fails after two NotMaster errors", + "description": "client.watch fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -692,7 +694,7 @@ ] }, { - "description": "client.watch fails after NotMaster when retryReads is false", + "description": "client.watch fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/changeStreams-client.watch.json b/test/retryable_reads/legacy/changeStreams-client.watch.json similarity index 98% rename from test/retryable_reads/changeStreams-client.watch.json rename to test/retryable_reads/legacy/changeStreams-client.watch.json index 9a2ccad095..30a53037ad 100644 --- a/test/retryable_reads/changeStreams-client.watch.json +++ b/test/retryable_reads/legacy/changeStreams-client.watch.json @@ -9,8 +9,10 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" - ] + "sharded", + "load-balanced" + ], + "serverless": "forbid" } ], "database_name": "retryable-reads-tests", diff --git a/test/retryable_reads/changeStreams-db.coll.watch-serverErrors.json b/test/retryable_reads/legacy/changeStreams-db.coll.watch-serverErrors.json similarity index 96% rename from test/retryable_reads/changeStreams-db.coll.watch-serverErrors.json rename to test/retryable_reads/legacy/changeStreams-db.coll.watch-serverErrors.json index eb7df1e264..77b3af04f4 100644 --- a/test/retryable_reads/changeStreams-db.coll.watch-serverErrors.json +++ b/test/retryable_reads/legacy/changeStreams-db.coll.watch-serverErrors.json @@ -9,8 +9,10 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" - ] + "sharded", + "load-balanced" + ], + "serverless": "forbid" } ], "database_name": "retryable-reads-tests", @@ -133,7 +135,7 @@ ] }, { - "description": "db.coll.watch succeeds after NotMaster", + "description": "db.coll.watch succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -184,7 +186,7 @@ ] }, { - "description": "db.coll.watch succeeds after NotMasterNoSlaveOk", + "description": "db.coll.watch succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -235,7 +237,7 @@ ] }, { - "description": "db.coll.watch succeeds after NotMasterOrSecondary", + "description": "db.coll.watch succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -592,7 +594,7 @@ ] }, { - "description": "db.coll.watch fails after two NotMaster errors", + "description": "db.coll.watch fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -644,7 +646,7 @@ ] }, { - "description": "db.coll.watch fails after NotMaster when retryReads is false", + "description": "db.coll.watch fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/changeStreams-db.coll.watch.json b/test/retryable_reads/legacy/changeStreams-db.coll.watch.json similarity index 98% rename from test/retryable_reads/changeStreams-db.coll.watch.json rename to test/retryable_reads/legacy/changeStreams-db.coll.watch.json index 3408c84236..27f6105a4b 100644 --- a/test/retryable_reads/changeStreams-db.coll.watch.json +++ b/test/retryable_reads/legacy/changeStreams-db.coll.watch.json @@ -9,8 +9,10 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" - ] + "sharded", + "load-balanced" + ], + "serverless": "forbid" } ], "database_name": "retryable-reads-tests", diff --git a/test/retryable_reads/changeStreams-db.watch-serverErrors.json b/test/retryable_reads/legacy/changeStreams-db.watch-serverErrors.json similarity index 97% rename from test/retryable_reads/changeStreams-db.watch-serverErrors.json rename to test/retryable_reads/legacy/changeStreams-db.watch-serverErrors.json index e070f56a01..7a87534508 100644 --- a/test/retryable_reads/changeStreams-db.watch-serverErrors.json +++ b/test/retryable_reads/legacy/changeStreams-db.watch-serverErrors.json @@ -9,8 +9,10 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" - ] + "sharded", + "load-balanced" + ], + "serverless": "forbid" } ], "database_name": "retryable-reads-tests", @@ -133,7 +135,7 @@ ] }, { - "description": "db.watch succeeds after NotMaster", + "description": "db.watch succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -184,7 +186,7 @@ ] }, { - "description": "db.watch succeeds after NotMasterNoSlaveOk", + "description": "db.watch succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -235,7 +237,7 @@ ] }, { - "description": "db.watch succeeds after NotMasterOrSecondary", + "description": "db.watch succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -592,7 +594,7 @@ ] }, { - "description": "db.watch fails after two NotMaster errors", + "description": "db.watch fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -644,7 +646,7 @@ ] }, { - "description": "db.watch fails after NotMaster when retryReads is false", + "description": "db.watch fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/changeStreams-db.watch.json b/test/retryable_reads/legacy/changeStreams-db.watch.json similarity index 98% rename from test/retryable_reads/changeStreams-db.watch.json rename to test/retryable_reads/legacy/changeStreams-db.watch.json index bec09c49b7..e6b0b9b781 100644 --- a/test/retryable_reads/changeStreams-db.watch.json +++ b/test/retryable_reads/legacy/changeStreams-db.watch.json @@ -9,8 +9,10 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" - ] + "sharded", + "load-balanced" + ], + "serverless": "forbid" } ], "database_name": "retryable-reads-tests", diff --git a/test/retryable_reads/count-serverErrors.json b/test/retryable_reads/legacy/count-serverErrors.json similarity index 96% rename from test/retryable_reads/count-serverErrors.json rename to test/retryable_reads/legacy/count-serverErrors.json index 839680fe59..36a0c17cab 100644 --- a/test/retryable_reads/count-serverErrors.json +++ b/test/retryable_reads/legacy/count-serverErrors.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], @@ -114,7 +115,7 @@ ] }, { - "description": "Count succeeds after NotMaster", + "description": "Count succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -157,7 +158,7 @@ ] }, { - "description": "Count succeeds after NotMasterNoSlaveOk", + "description": "Count succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -200,7 +201,7 @@ ] }, { - "description": "Count succeeds after NotMasterOrSecondary", + "description": "Count succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -501,7 +502,7 @@ ] }, { - "description": "Count fails after two NotMaster errors", + "description": "Count fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -544,7 +545,7 @@ ] }, { - "description": "Count fails after NotMaster when retryReads is false", + "description": "Count fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/count.json b/test/retryable_reads/legacy/count.json similarity index 98% rename from test/retryable_reads/count.json rename to test/retryable_reads/legacy/count.json index 0ccf4982ba..139a545131 100644 --- a/test/retryable_reads/count.json +++ b/test/retryable_reads/legacy/count.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_reads/countDocuments-serverErrors.json b/test/retryable_reads/legacy/countDocuments-serverErrors.json similarity index 97% rename from test/retryable_reads/countDocuments-serverErrors.json rename to test/retryable_reads/legacy/countDocuments-serverErrors.json index f45eadfa0c..782ea5e4f1 100644 --- a/test/retryable_reads/countDocuments-serverErrors.json +++ b/test/retryable_reads/legacy/countDocuments-serverErrors.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], @@ -166,7 +167,7 @@ ] }, { - "description": "CountDocuments succeeds after NotMaster", + "description": "CountDocuments succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -235,7 +236,7 @@ ] }, { - "description": "CountDocuments succeeds after NotMasterNoSlaveOk", + "description": "CountDocuments succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -304,7 +305,7 @@ ] }, { - "description": "CountDocuments succeeds after NotMasterOrSecondary", + "description": "CountDocuments succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -787,7 +788,7 @@ ] }, { - "description": "CountDocuments fails after two NotMaster errors", + "description": "CountDocuments fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -856,7 +857,7 @@ ] }, { - "description": "CountDocuments fails after NotMaster when retryReads is false", + "description": "CountDocuments fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/countDocuments.json b/test/retryable_reads/legacy/countDocuments.json similarity index 99% rename from test/retryable_reads/countDocuments.json rename to test/retryable_reads/legacy/countDocuments.json index b4ccf36684..57a64e45b7 100644 --- a/test/retryable_reads/countDocuments.json +++ b/test/retryable_reads/legacy/countDocuments.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_reads/distinct-serverErrors.json b/test/retryable_reads/legacy/distinct-serverErrors.json similarity index 97% rename from test/retryable_reads/distinct-serverErrors.json rename to test/retryable_reads/legacy/distinct-serverErrors.json index 50fd6a5505..d7c6018a62 100644 --- a/test/retryable_reads/distinct-serverErrors.json +++ b/test/retryable_reads/legacy/distinct-serverErrors.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], @@ -158,7 +159,7 @@ ] }, { - "description": "Distinct succeeds after NotMaster", + "description": "Distinct succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -221,7 +222,7 @@ ] }, { - "description": "Distinct succeeds after NotMasterNoSlaveOk", + "description": "Distinct succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -284,7 +285,7 @@ ] }, { - "description": "Distinct succeeds after NotMasterOrSecondary", + "description": "Distinct succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -725,7 +726,7 @@ ] }, { - "description": "Distinct fails after two NotMaster errors", + "description": "Distinct fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -785,7 +786,7 @@ ] }, { - "description": "Distinct fails after NotMaster when retryReads is false", + "description": "Distinct fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/distinct.json b/test/retryable_reads/legacy/distinct.json similarity index 99% rename from test/retryable_reads/distinct.json rename to test/retryable_reads/legacy/distinct.json index b5885e27eb..1fd415da81 100644 --- a/test/retryable_reads/distinct.json +++ b/test/retryable_reads/legacy/distinct.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_reads/estimatedDocumentCount-serverErrors.json b/test/retryable_reads/legacy/estimatedDocumentCount-serverErrors.json similarity index 96% rename from test/retryable_reads/estimatedDocumentCount-serverErrors.json rename to test/retryable_reads/legacy/estimatedDocumentCount-serverErrors.json index 1af21d1fe9..6bb128f5f3 100644 --- a/test/retryable_reads/estimatedDocumentCount-serverErrors.json +++ b/test/retryable_reads/legacy/estimatedDocumentCount-serverErrors.json @@ -108,7 +108,7 @@ ] }, { - "description": "EstimatedDocumentCount succeeds after NotMaster", + "description": "EstimatedDocumentCount succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -148,7 +148,7 @@ ] }, { - "description": "EstimatedDocumentCount succeeds after NotMasterNoSlaveOk", + "description": "EstimatedDocumentCount succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -188,7 +188,7 @@ ] }, { - "description": "EstimatedDocumentCount succeeds after NotMasterOrSecondary", + "description": "EstimatedDocumentCount succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -468,7 +468,7 @@ ] }, { - "description": "EstimatedDocumentCount fails after two NotMaster errors", + "description": "EstimatedDocumentCount fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -508,7 +508,7 @@ ] }, { - "description": "EstimatedDocumentCount fails after NotMaster when retryReads is false", + "description": "EstimatedDocumentCount fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/estimatedDocumentCount.json b/test/retryable_reads/legacy/estimatedDocumentCount.json similarity index 100% rename from test/retryable_reads/estimatedDocumentCount.json rename to test/retryable_reads/legacy/estimatedDocumentCount.json diff --git a/test/retryable_reads/find-serverErrors.json b/test/retryable_reads/legacy/find-serverErrors.json similarity index 98% rename from test/retryable_reads/find-serverErrors.json rename to test/retryable_reads/legacy/find-serverErrors.json index 44ecf34d2f..f6b96c6dcb 100644 --- a/test/retryable_reads/find-serverErrors.json +++ b/test/retryable_reads/legacy/find-serverErrors.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], @@ -188,7 +189,7 @@ ] }, { - "description": "Find succeeds after NotMaster", + "description": "Find succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -262,7 +263,7 @@ ] }, { - "description": "Find succeeds after NotMasterNoSlaveOk", + "description": "Find succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -336,7 +337,7 @@ ] }, { - "description": "Find succeeds after NotMasterOrSecondary", + "description": "Find succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -854,7 +855,7 @@ ] }, { - "description": "Find fails after two NotMaster errors", + "description": "Find fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -911,7 +912,7 @@ ] }, { - "description": "Find fails after NotMaster when retryReads is false", + "description": "Find fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/find.json b/test/retryable_reads/legacy/find.json similarity index 99% rename from test/retryable_reads/find.json rename to test/retryable_reads/legacy/find.json index 56479ff1d8..00d419c0da 100644 --- a/test/retryable_reads/find.json +++ b/test/retryable_reads/legacy/find.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_reads/findOne-serverErrors.json b/test/retryable_reads/legacy/findOne-serverErrors.json similarity index 97% rename from test/retryable_reads/findOne-serverErrors.json rename to test/retryable_reads/legacy/findOne-serverErrors.json index b8229483d2..d039ef247e 100644 --- a/test/retryable_reads/findOne-serverErrors.json +++ b/test/retryable_reads/legacy/findOne-serverErrors.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], @@ -148,7 +149,7 @@ ] }, { - "description": "FindOne succeeds after NotMaster", + "description": "FindOne succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -202,7 +203,7 @@ ] }, { - "description": "FindOne succeeds after NotMasterNoSlaveOk", + "description": "FindOne succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -256,7 +257,7 @@ ] }, { - "description": "FindOne succeeds after NotMasterOrSecondary", + "description": "FindOne succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -634,7 +635,7 @@ ] }, { - "description": "FindOne fails after two NotMaster errors", + "description": "FindOne fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -685,7 +686,7 @@ ] }, { - "description": "FindOne fails after NotMaster when retryReads is false", + "description": "FindOne fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/findOne.json b/test/retryable_reads/legacy/findOne.json similarity index 99% rename from test/retryable_reads/findOne.json rename to test/retryable_reads/legacy/findOne.json index d296a9cdb5..b9deb73d2a 100644 --- a/test/retryable_reads/findOne.json +++ b/test/retryable_reads/legacy/findOne.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_reads/gridfs-download-serverErrors.json b/test/retryable_reads/legacy/gridfs-download-serverErrors.json similarity index 98% rename from test/retryable_reads/gridfs-download-serverErrors.json rename to test/retryable_reads/legacy/gridfs-download-serverErrors.json index 84e50e370c..cec3a5016a 100644 --- a/test/retryable_reads/gridfs-download-serverErrors.json +++ b/test/retryable_reads/legacy/gridfs-download-serverErrors.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], @@ -191,7 +192,7 @@ ] }, { - "description": "Download succeeds after NotMaster", + "description": "Download succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -261,7 +262,7 @@ ] }, { - "description": "Download succeeds after NotMasterNoSlaveOk", + "description": "Download succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -331,7 +332,7 @@ ] }, { - "description": "Download succeeds after NotMasterOrSecondary", + "description": "Download succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -821,7 +822,7 @@ ] }, { - "description": "Download fails after two NotMaster errors", + "description": "Download fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -876,7 +877,7 @@ ] }, { - "description": "Download fails after NotMaster when retryReads is false", + "description": "Download fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/gridfs-download.json b/test/retryable_reads/legacy/gridfs-download.json similarity index 99% rename from test/retryable_reads/gridfs-download.json rename to test/retryable_reads/legacy/gridfs-download.json index a5c5ef4d55..4d0d5a17e4 100644 --- a/test/retryable_reads/gridfs-download.json +++ b/test/retryable_reads/legacy/gridfs-download.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_reads/gridfs-downloadByName-serverErrors.json b/test/retryable_reads/legacy/gridfs-downloadByName-serverErrors.json similarity index 97% rename from test/retryable_reads/gridfs-downloadByName-serverErrors.json rename to test/retryable_reads/legacy/gridfs-downloadByName-serverErrors.json index de439ce4b2..a64230d38a 100644 --- a/test/retryable_reads/gridfs-downloadByName-serverErrors.json +++ b/test/retryable_reads/legacy/gridfs-downloadByName-serverErrors.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], @@ -179,7 +180,7 @@ ] }, { - "description": "DownloadByName succeeds after NotMaster", + "description": "DownloadByName succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -243,7 +244,7 @@ ] }, { - "description": "DownloadByName succeeds after NotMasterNoSlaveOk", + "description": "DownloadByName succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -307,7 +308,7 @@ ] }, { - "description": "DownloadByName succeeds after NotMasterOrSecondary", + "description": "DownloadByName succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -755,7 +756,7 @@ ] }, { - "description": "DownloadByName fails after two NotMaster errors", + "description": "DownloadByName fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -804,7 +805,7 @@ ] }, { - "description": "DownloadByName fails after NotMaster when retryReads is false", + "description": "DownloadByName fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/gridfs-downloadByName.json b/test/retryable_reads/legacy/gridfs-downloadByName.json similarity index 99% rename from test/retryable_reads/gridfs-downloadByName.json rename to test/retryable_reads/legacy/gridfs-downloadByName.json index 0634a09bff..48f2168cfc 100644 --- a/test/retryable_reads/gridfs-downloadByName.json +++ b/test/retryable_reads/legacy/gridfs-downloadByName.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_reads/listCollectionNames-serverErrors.json b/test/retryable_reads/legacy/listCollectionNames-serverErrors.json similarity index 95% rename from test/retryable_reads/listCollectionNames-serverErrors.json rename to test/retryable_reads/legacy/listCollectionNames-serverErrors.json index 27c13d6301..bbdce625ad 100644 --- a/test/retryable_reads/listCollectionNames-serverErrors.json +++ b/test/retryable_reads/legacy/listCollectionNames-serverErrors.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], @@ -93,7 +94,7 @@ ] }, { - "description": "ListCollectionNames succeeds after NotMaster", + "description": "ListCollectionNames succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -130,7 +131,7 @@ ] }, { - "description": "ListCollectionNames succeeds after NotMasterNoSlaveOk", + "description": "ListCollectionNames succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -167,7 +168,7 @@ ] }, { - "description": "ListCollectionNames succeeds after NotMasterOrSecondary", + "description": "ListCollectionNames succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -426,7 +427,7 @@ ] }, { - "description": "ListCollectionNames fails after two NotMaster errors", + "description": "ListCollectionNames fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -464,7 +465,7 @@ ] }, { - "description": "ListCollectionNames fails after NotMaster when retryReads is false", + "description": "ListCollectionNames fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/listCollectionNames.json b/test/retryable_reads/legacy/listCollectionNames.json similarity index 98% rename from test/retryable_reads/listCollectionNames.json rename to test/retryable_reads/legacy/listCollectionNames.json index 437fc36a40..73d96a3cf7 100644 --- a/test/retryable_reads/listCollectionNames.json +++ b/test/retryable_reads/legacy/listCollectionNames.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_reads/listCollectionObjects-serverErrors.json b/test/retryable_reads/legacy/listCollectionObjects-serverErrors.json similarity index 95% rename from test/retryable_reads/listCollectionObjects-serverErrors.json rename to test/retryable_reads/legacy/listCollectionObjects-serverErrors.json index 3922713df9..ab469dfe30 100644 --- a/test/retryable_reads/listCollectionObjects-serverErrors.json +++ b/test/retryable_reads/legacy/listCollectionObjects-serverErrors.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], @@ -93,7 +94,7 @@ ] }, { - "description": "ListCollectionObjects succeeds after NotMaster", + "description": "ListCollectionObjects succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -130,7 +131,7 @@ ] }, { - "description": "ListCollectionObjects succeeds after NotMasterNoSlaveOk", + "description": "ListCollectionObjects succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -167,7 +168,7 @@ ] }, { - "description": "ListCollectionObjects succeeds after NotMasterOrSecondary", + "description": "ListCollectionObjects succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -426,7 +427,7 @@ ] }, { - "description": "ListCollectionObjects fails after two NotMaster errors", + "description": "ListCollectionObjects fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -464,7 +465,7 @@ ] }, { - "description": "ListCollectionObjects fails after NotMaster when retryReads is false", + "description": "ListCollectionObjects fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/listCollectionObjects.json b/test/retryable_reads/legacy/listCollectionObjects.json similarity index 98% rename from test/retryable_reads/listCollectionObjects.json rename to test/retryable_reads/legacy/listCollectionObjects.json index 1f537b743f..1fb0f18437 100644 --- a/test/retryable_reads/listCollectionObjects.json +++ b/test/retryable_reads/legacy/listCollectionObjects.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_reads/listCollections-serverErrors.json b/test/retryable_reads/legacy/listCollections-serverErrors.json similarity index 95% rename from test/retryable_reads/listCollections-serverErrors.json rename to test/retryable_reads/legacy/listCollections-serverErrors.json index 6972073b18..def9ac4595 100644 --- a/test/retryable_reads/listCollections-serverErrors.json +++ b/test/retryable_reads/legacy/listCollections-serverErrors.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], @@ -93,7 +94,7 @@ ] }, { - "description": "ListCollections succeeds after NotMaster", + "description": "ListCollections succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -130,7 +131,7 @@ ] }, { - "description": "ListCollections succeeds after NotMasterNoSlaveOk", + "description": "ListCollections succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -167,7 +168,7 @@ ] }, { - "description": "ListCollections succeeds after NotMasterOrSecondary", + "description": "ListCollections succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -426,7 +427,7 @@ ] }, { - "description": "ListCollections fails after two NotMaster errors", + "description": "ListCollections fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -464,7 +465,7 @@ ] }, { - "description": "ListCollections fails after NotMaster when retryReads is false", + "description": "ListCollections fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/listCollections.json b/test/retryable_reads/legacy/listCollections.json similarity index 98% rename from test/retryable_reads/listCollections.json rename to test/retryable_reads/legacy/listCollections.json index a6b452e64f..2427883621 100644 --- a/test/retryable_reads/listCollections.json +++ b/test/retryable_reads/legacy/listCollections.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_reads/listDatabaseNames-serverErrors.json b/test/retryable_reads/legacy/listDatabaseNames-serverErrors.json similarity index 95% rename from test/retryable_reads/listDatabaseNames-serverErrors.json rename to test/retryable_reads/legacy/listDatabaseNames-serverErrors.json index 11faf58bf0..1dd8e4415a 100644 --- a/test/retryable_reads/listDatabaseNames-serverErrors.json +++ b/test/retryable_reads/legacy/listDatabaseNames-serverErrors.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], @@ -93,7 +94,7 @@ ] }, { - "description": "ListDatabaseNames succeeds after NotMaster", + "description": "ListDatabaseNames succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -130,7 +131,7 @@ ] }, { - "description": "ListDatabaseNames succeeds after NotMasterNoSlaveOk", + "description": "ListDatabaseNames succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -167,7 +168,7 @@ ] }, { - "description": "ListDatabaseNames succeeds after NotMasterOrSecondary", + "description": "ListDatabaseNames succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -426,7 +427,7 @@ ] }, { - "description": "ListDatabaseNames fails after two NotMaster errors", + "description": "ListDatabaseNames fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -464,7 +465,7 @@ ] }, { - "description": "ListDatabaseNames fails after NotMaster when retryReads is false", + "description": "ListDatabaseNames fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/listDatabaseNames.json b/test/retryable_reads/legacy/listDatabaseNames.json similarity index 98% rename from test/retryable_reads/listDatabaseNames.json rename to test/retryable_reads/legacy/listDatabaseNames.json index b35f7ab185..b431f57016 100644 --- a/test/retryable_reads/listDatabaseNames.json +++ b/test/retryable_reads/legacy/listDatabaseNames.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_reads/listDatabaseObjects-serverErrors.json b/test/retryable_reads/legacy/listDatabaseObjects-serverErrors.json similarity index 95% rename from test/retryable_reads/listDatabaseObjects-serverErrors.json rename to test/retryable_reads/legacy/listDatabaseObjects-serverErrors.json index 38082f2e28..bc497bb088 100644 --- a/test/retryable_reads/listDatabaseObjects-serverErrors.json +++ b/test/retryable_reads/legacy/listDatabaseObjects-serverErrors.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], @@ -93,7 +94,7 @@ ] }, { - "description": "ListDatabaseObjects succeeds after NotMaster", + "description": "ListDatabaseObjects succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -130,7 +131,7 @@ ] }, { - "description": "ListDatabaseObjects succeeds after NotMasterNoSlaveOk", + "description": "ListDatabaseObjects succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -167,7 +168,7 @@ ] }, { - "description": "ListDatabaseObjects succeeds after NotMasterOrSecondary", + "description": "ListDatabaseObjects succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -426,7 +427,7 @@ ] }, { - "description": "ListDatabaseObjects fails after two NotMaster errors", + "description": "ListDatabaseObjects fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -464,7 +465,7 @@ ] }, { - "description": "ListDatabaseObjects fails after NotMaster when retryReads is false", + "description": "ListDatabaseObjects fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/listDatabaseObjects.json b/test/retryable_reads/legacy/listDatabaseObjects.json similarity index 98% rename from test/retryable_reads/listDatabaseObjects.json rename to test/retryable_reads/legacy/listDatabaseObjects.json index cbd2c6763a..267fe921ca 100644 --- a/test/retryable_reads/listDatabaseObjects.json +++ b/test/retryable_reads/legacy/listDatabaseObjects.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_reads/listDatabases-serverErrors.json b/test/retryable_reads/legacy/listDatabases-serverErrors.json similarity index 95% rename from test/retryable_reads/listDatabases-serverErrors.json rename to test/retryable_reads/legacy/listDatabases-serverErrors.json index 4047f749ff..ed7bcbc398 100644 --- a/test/retryable_reads/listDatabases-serverErrors.json +++ b/test/retryable_reads/legacy/listDatabases-serverErrors.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], @@ -93,7 +94,7 @@ ] }, { - "description": "ListDatabases succeeds after NotMaster", + "description": "ListDatabases succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -130,7 +131,7 @@ ] }, { - "description": "ListDatabases succeeds after NotMasterNoSlaveOk", + "description": "ListDatabases succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -167,7 +168,7 @@ ] }, { - "description": "ListDatabases succeeds after NotMasterOrSecondary", + "description": "ListDatabases succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -426,7 +427,7 @@ ] }, { - "description": "ListDatabases fails after two NotMaster errors", + "description": "ListDatabases fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -464,7 +465,7 @@ ] }, { - "description": "ListDatabases fails after NotMaster when retryReads is false", + "description": "ListDatabases fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/listDatabases.json b/test/retryable_reads/legacy/listDatabases.json similarity index 98% rename from test/retryable_reads/listDatabases.json rename to test/retryable_reads/legacy/listDatabases.json index 3cb8bbd083..69ef9788f8 100644 --- a/test/retryable_reads/listDatabases.json +++ b/test/retryable_reads/legacy/listDatabases.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_reads/listIndexNames-serverErrors.json b/test/retryable_reads/legacy/listIndexNames-serverErrors.json similarity index 96% rename from test/retryable_reads/listIndexNames-serverErrors.json rename to test/retryable_reads/legacy/listIndexNames-serverErrors.json index 1a9ba83bc6..2d3265ec85 100644 --- a/test/retryable_reads/listIndexNames-serverErrors.json +++ b/test/retryable_reads/legacy/listIndexNames-serverErrors.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], @@ -97,7 +98,7 @@ ] }, { - "description": "ListIndexNames succeeds after NotMaster", + "description": "ListIndexNames succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -136,7 +137,7 @@ ] }, { - "description": "ListIndexNames succeeds after NotMasterNoSlaveOk", + "description": "ListIndexNames succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -175,7 +176,7 @@ ] }, { - "description": "ListIndexNames succeeds after NotMasterOrSecondary", + "description": "ListIndexNames succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -448,7 +449,7 @@ ] }, { - "description": "ListIndexNames fails after two NotMaster errors", + "description": "ListIndexNames fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -488,7 +489,7 @@ ] }, { - "description": "ListIndexNames fails after NotMaster when retryReads is false", + "description": "ListIndexNames fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/listIndexNames.json b/test/retryable_reads/legacy/listIndexNames.json similarity index 92% rename from test/retryable_reads/listIndexNames.json rename to test/retryable_reads/legacy/listIndexNames.json index ef2a6d7306..fbdb420f8a 100644 --- a/test/retryable_reads/listIndexNames.json +++ b/test/retryable_reads/legacy/listIndexNames.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], @@ -30,7 +31,7 @@ { "command_started_event": { "command": { - "listIndexNames": "coll" + "listIndexes": "coll" }, "database_name": "retryable-reads-tests" } @@ -61,7 +62,7 @@ { "command_started_event": { "command": { - "listIndexNames": "coll" + "listIndexes": "coll" }, "database_name": "retryable-reads-tests" } @@ -69,7 +70,7 @@ { "command_started_event": { "command": { - "listIndexNames": "coll" + "listIndexes": "coll" }, "database_name": "retryable-reads-tests" } @@ -104,7 +105,7 @@ { "command_started_event": { "command": { - "listIndexNames": "coll" + "listIndexes": "coll" }, "database_name": "retryable-reads-tests" } @@ -136,7 +137,7 @@ { "command_started_event": { "command": { - "listIndexNames": "coll" + "listIndexes": "coll" }, "database_name": "retryable-reads-tests" } @@ -144,7 +145,7 @@ { "command_started_event": { "command": { - "listIndexNames": "coll" + "listIndexes": "coll" }, "database_name": "retryable-reads-tests" } diff --git a/test/retryable_reads/listIndexes-serverErrors.json b/test/retryable_reads/legacy/listIndexes-serverErrors.json similarity index 96% rename from test/retryable_reads/listIndexes-serverErrors.json rename to test/retryable_reads/legacy/listIndexes-serverErrors.json index 16b61d535d..25c5b0e448 100644 --- a/test/retryable_reads/listIndexes-serverErrors.json +++ b/test/retryable_reads/legacy/listIndexes-serverErrors.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], @@ -97,7 +98,7 @@ ] }, { - "description": "ListIndexes succeeds after NotMaster", + "description": "ListIndexes succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -136,7 +137,7 @@ ] }, { - "description": "ListIndexes succeeds after NotMasterNoSlaveOk", + "description": "ListIndexes succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -175,7 +176,7 @@ ] }, { - "description": "ListIndexes succeeds after NotMasterOrSecondary", + "description": "ListIndexes succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -448,7 +449,7 @@ ] }, { - "description": "ListIndexes fails after two NotMaster errors", + "description": "ListIndexes fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -488,7 +489,7 @@ ] }, { - "description": "ListIndexes fails after NotMaster when retryReads is false", + "description": "ListIndexes fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/listIndexes.json b/test/retryable_reads/legacy/listIndexes.json similarity index 98% rename from test/retryable_reads/listIndexes.json rename to test/retryable_reads/legacy/listIndexes.json index f460ea7684..5cb620ae45 100644 --- a/test/retryable_reads/listIndexes.json +++ b/test/retryable_reads/legacy/listIndexes.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_reads/mapReduce.json b/test/retryable_reads/legacy/mapReduce.json similarity index 98% rename from test/retryable_reads/mapReduce.json rename to test/retryable_reads/legacy/mapReduce.json index 9dc7a56f3c..9327a23052 100644 --- a/test/retryable_reads/mapReduce.json +++ b/test/retryable_reads/legacy/mapReduce.json @@ -10,8 +10,10 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" - ] + "sharded", + "load-balanced" + ], + "serverless": "forbid" } ], "database_name": "retryable-reads-tests", diff --git a/test/retryable_reads/unified/handshakeError.json b/test/retryable_reads/unified/handshakeError.json new file mode 100644 index 0000000000..2921d8a954 --- /dev/null +++ b/test/retryable_reads/unified/handshakeError.json @@ -0,0 +1,3079 @@ +{ + "description": "retryable reads handshake failures", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "auth": true + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "connectionCheckOutStartedEvent", + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-handshake-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "client.listDatabases succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases" + } + }, + { + "commandSucceededEvent": { + "commandName": "listDatabases" + } + } + ] + } + ] + }, + { + "description": "client.listDatabases succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases" + } + }, + { + "commandSucceededEvent": { + "commandName": "listDatabases" + } + } + ] + } + ] + }, + { + "description": "client.listDatabaseNames succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listDatabaseNames", + "object": "client" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases" + } + }, + { + "commandSucceededEvent": { + "commandName": "listDatabases" + } + } + ] + } + ] + }, + { + "description": "client.listDatabaseNames succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listDatabaseNames", + "object": "client" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases" + } + }, + { + "commandSucceededEvent": { + "commandName": "listDatabases" + } + } + ] + } + ] + }, + { + "description": "client.createChangeStream succeeds after retryable handshake network error", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "client.createChangeStream succeeds after retryable handshake server error (ShutdownInProgress)", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "database.aggregate succeeds after retryable handshake network error", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "database.aggregate succeeds after retryable handshake server error (ShutdownInProgress)", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "database.listCollections succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections" + } + }, + { + "commandSucceededEvent": { + "commandName": "listCollections" + } + } + ] + } + ] + }, + { + "description": "database.listCollections succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections" + } + }, + { + "commandSucceededEvent": { + "commandName": "listCollections" + } + } + ] + } + ] + }, + { + "description": "database.listCollectionNames succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections" + } + }, + { + "commandSucceededEvent": { + "commandName": "listCollections" + } + } + ] + } + ] + }, + { + "description": "database.listCollectionNames succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections" + } + }, + { + "commandSucceededEvent": { + "commandName": "listCollections" + } + } + ] + } + ] + }, + { + "description": "database.createChangeStream succeeds after retryable handshake network error", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "database.createChangeStream succeeds after retryable handshake server error (ShutdownInProgress)", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "collection.aggregate succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "collection.aggregate succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "collection.countDocuments succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "collection.countDocuments succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "collection.estimatedDocumentCount succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "count" + } + }, + { + "commandSucceededEvent": { + "commandName": "count" + } + } + ] + } + ] + }, + { + "description": "collection.estimatedDocumentCount succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "count" + } + }, + { + "commandSucceededEvent": { + "commandName": "count" + } + } + ] + } + ] + }, + { + "description": "collection.distinct succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "distinct" + } + }, + { + "commandSucceededEvent": { + "commandName": "distinct" + } + } + ] + } + ] + }, + { + "description": "collection.distinct succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "distinct" + } + }, + { + "commandSucceededEvent": { + "commandName": "distinct" + } + } + ] + } + ] + }, + { + "description": "collection.find succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "collection.find succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "collection.findOne succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "collection.findOne succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "collection.listIndexes succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes" + } + }, + { + "commandSucceededEvent": { + "commandName": "listIndexes" + } + } + ] + } + ] + }, + { + "description": "collection.listIndexes succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes" + } + }, + { + "commandSucceededEvent": { + "commandName": "listIndexes" + } + } + ] + } + ] + }, + { + "description": "collection.listIndexNames succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listIndexNames", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes" + } + }, + { + "commandSucceededEvent": { + "commandName": "listIndexes" + } + } + ] + } + ] + }, + { + "description": "collection.listIndexNames succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listIndexNames", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes" + } + }, + { + "commandSucceededEvent": { + "commandName": "listIndexes" + } + } + ] + } + ] + }, + { + "description": "collection.createChangeStream succeeds after retryable handshake network error", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "collection.createChangeStream succeeds after retryable handshake server error (ShutdownInProgress)", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/bulkWrite-serverErrors.json b/test/retryable_writes/legacy/bulkWrite-errorLabels.json similarity index 81% rename from test/retryable_writes/bulkWrite-serverErrors.json rename to test/retryable_writes/legacy/bulkWrite-errorLabels.json index 79c81a583b..66c3ecb336 100644 --- a/test/retryable_writes/bulkWrite-serverErrors.json +++ b/test/retryable_writes/legacy/bulkWrite-errorLabels.json @@ -1,15 +1,11 @@ { "runOn": [ { - "minServerVersion": "4.0", + "minServerVersion": "4.3.1", "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded" + "replicaset", + "sharded", + "load-balanced" ] } ], @@ -25,7 +21,7 @@ ], "tests": [ { - "description": "BulkWrite succeeds after PrimarySteppedDown", + "description": "BulkWrite succeeds with RetryableWriteError from server", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -35,7 +31,10 @@ "failCommands": [ "update" ], - "errorCode": 189 + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] } }, "operation": { @@ -105,7 +104,7 @@ } }, { - "description": "BulkWrite succeeds after WriteConcernError ShutdownInProgress", + "description": "BulkWrite fails if server does not return RetryableWriteError", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -113,12 +112,10 @@ }, "data": { "failCommands": [ - "insert" + "update" ], - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - } + "errorCode": 11600, + "errorLabels": [] } }, "operation": { @@ -162,22 +159,17 @@ } }, "outcome": { + "error": true, "result": { - "deletedCount": 1, - "insertedCount": 1, - "insertedIds": { - "1": 3 - }, - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0, - "upsertedIds": {} + "errorLabelsOmit": [ + "RetryableWriteError" + ] }, "collection": { "data": [ { "_id": 2, - "x": 23 + "x": 22 }, { "_id": 3, diff --git a/test/retryable_writes/legacy/bulkWrite-serverErrors.json b/test/retryable_writes/legacy/bulkWrite-serverErrors.json new file mode 100644 index 0000000000..1e6cc74c05 --- /dev/null +++ b/test/retryable_writes/legacy/bulkWrite-serverErrors.json @@ -0,0 +1,273 @@ +{ + "runOn": [ + { + "minServerVersion": "4.0", + "topology": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topology": [ + "sharded", + "load-balanced" + ] + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "tests": [ + { + "description": "BulkWrite succeeds after PrimarySteppedDown", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operation": { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "options": { + "ordered": true + } + } + }, + "outcome": { + "result": { + "deletedCount": 1, + "insertedCount": 1, + "insertedIds": { + "1": 3 + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + }, + "collection": { + "data": [ + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 33 + } + ] + } + } + }, + { + "description": "BulkWrite succeeds after WriteConcernError ShutdownInProgress", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + }, + "operation": { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "options": { + "ordered": true + } + } + }, + "outcome": { + "result": { + "deletedCount": 1, + "insertedCount": 1, + "insertedIds": { + "1": 3 + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + }, + "collection": { + "data": [ + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 33 + } + ] + } + } + }, + { + "description": "BulkWrite fails with a RetryableWriteError label after two connection failures", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "closeConnection": true + } + }, + "operation": { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "options": { + "ordered": true + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + } + } + ] +} diff --git a/test/retryable_writes/bulkWrite.json b/test/retryable_writes/legacy/bulkWrite.json similarity index 100% rename from test/retryable_writes/bulkWrite.json rename to test/retryable_writes/legacy/bulkWrite.json diff --git a/test/retryable_writes/deleteMany.json b/test/retryable_writes/legacy/deleteMany.json similarity index 93% rename from test/retryable_writes/deleteMany.json rename to test/retryable_writes/legacy/deleteMany.json index 642ad11fb4..faa21c44f1 100644 --- a/test/retryable_writes/deleteMany.json +++ b/test/retryable_writes/legacy/deleteMany.json @@ -4,7 +4,8 @@ "minServerVersion": "3.6", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_writes/deleteOne-serverErrors.json b/test/retryable_writes/legacy/deleteOne-errorLabels.json similarity index 69% rename from test/retryable_writes/deleteOne-serverErrors.json rename to test/retryable_writes/legacy/deleteOne-errorLabels.json index 9ef2bf2f29..c14692fd1a 100644 --- a/test/retryable_writes/deleteOne-serverErrors.json +++ b/test/retryable_writes/legacy/deleteOne-errorLabels.json @@ -1,15 +1,11 @@ { "runOn": [ { - "minServerVersion": "4.0", + "minServerVersion": "4.3.1", "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded" + "replicaset", + "sharded", + "load-balanced" ] } ], @@ -25,7 +21,7 @@ ], "tests": [ { - "description": "DeleteOne succeeds after PrimarySteppedDown", + "description": "DeleteOne succeeds with RetryableWriteError from server", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -35,7 +31,10 @@ "failCommands": [ "delete" ], - "errorCode": 189 + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] } }, "operation": { @@ -61,7 +60,7 @@ } }, { - "description": "DeleteOne succeeds after WriteConcernError ShutdownInProgress", + "description": "DeleteOne fails if server does not return RetryableWriteError", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -71,10 +70,8 @@ "failCommands": [ "delete" ], - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - } + "errorCode": 11600, + "errorLabels": [] } }, "operation": { @@ -86,11 +83,18 @@ } }, "outcome": { + "error": true, "result": { - "deletedCount": 1 + "errorLabelsOmit": [ + "RetryableWriteError" + ] }, "collection": { "data": [ + { + "_id": 1, + "x": 11 + }, { "_id": 2, "x": 22 diff --git a/test/retryable_writes/legacy/deleteOne-serverErrors.json b/test/retryable_writes/legacy/deleteOne-serverErrors.json new file mode 100644 index 0000000000..a1a27838de --- /dev/null +++ b/test/retryable_writes/legacy/deleteOne-serverErrors.json @@ -0,0 +1,153 @@ +{ + "runOn": [ + { + "minServerVersion": "4.0", + "topology": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topology": [ + "sharded", + "load-balanced" + ] + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "tests": [ + { + "description": "DeleteOne succeeds after PrimarySteppedDown", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operation": { + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + } + } + }, + "outcome": { + "result": { + "deletedCount": 1 + }, + "collection": { + "data": [ + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "DeleteOne succeeds after WriteConcernError ShutdownInProgress", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + }, + "operation": { + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + } + } + }, + "outcome": { + "result": { + "deletedCount": 1 + }, + "collection": { + "data": [ + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "DeleteOne fails with RetryableWriteError label after two connection failures", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "closeConnection": true + } + }, + "operation": { + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/retryable_writes/deleteOne.json b/test/retryable_writes/legacy/deleteOne.json similarity index 100% rename from test/retryable_writes/deleteOne.json rename to test/retryable_writes/legacy/deleteOne.json diff --git a/test/retryable_writes/findOneAndDelete-serverErrors.json b/test/retryable_writes/legacy/findOneAndDelete-errorLabels.json similarity index 71% rename from test/retryable_writes/findOneAndDelete-serverErrors.json rename to test/retryable_writes/legacy/findOneAndDelete-errorLabels.json index d72d1a05ba..60e6e0a7bc 100644 --- a/test/retryable_writes/findOneAndDelete-serverErrors.json +++ b/test/retryable_writes/legacy/findOneAndDelete-errorLabels.json @@ -1,15 +1,11 @@ { "runOn": [ { - "minServerVersion": "4.0", + "minServerVersion": "4.3.1", "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded" + "replicaset", + "sharded", + "load-balanced" ] } ], @@ -25,7 +21,7 @@ ], "tests": [ { - "description": "FindOneAndDelete succeeds after PrimarySteppedDown", + "description": "FindOneAndDelete succeeds with RetryableWriteError from server", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -35,7 +31,10 @@ "failCommands": [ "findAndModify" ], - "errorCode": 189 + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] } }, "operation": { @@ -67,7 +66,7 @@ } }, { - "description": "FindOneAndDelete succeeds after WriteConcernError ShutdownInProgress", + "description": "FindOneAndDelete fails if server does not return RetryableWriteError", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -77,10 +76,8 @@ "failCommands": [ "findAndModify" ], - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - } + "errorCode": 11600, + "errorLabels": [] } }, "operation": { @@ -97,12 +94,18 @@ } }, "outcome": { + "error": true, "result": { - "_id": 1, - "x": 11 + "errorLabelsOmit": [ + "RetryableWriteError" + ] }, "collection": { "data": [ + { + "_id": 1, + "x": 11 + }, { "_id": 2, "x": 22 diff --git a/test/retryable_writes/legacy/findOneAndDelete-serverErrors.json b/test/retryable_writes/legacy/findOneAndDelete-serverErrors.json new file mode 100644 index 0000000000..c18b63f456 --- /dev/null +++ b/test/retryable_writes/legacy/findOneAndDelete-serverErrors.json @@ -0,0 +1,170 @@ +{ + "runOn": [ + { + "minServerVersion": "4.0", + "topology": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topology": [ + "sharded", + "load-balanced" + ] + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "tests": [ + { + "description": "FindOneAndDelete succeeds after PrimarySteppedDown", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operation": { + "name": "findOneAndDelete", + "arguments": { + "filter": { + "x": { + "$gte": 11 + } + }, + "sort": { + "x": 1 + } + } + }, + "outcome": { + "result": { + "_id": 1, + "x": 11 + }, + "collection": { + "data": [ + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "FindOneAndDelete succeeds after WriteConcernError ShutdownInProgress", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + }, + "operation": { + "name": "findOneAndDelete", + "arguments": { + "filter": { + "x": { + "$gte": 11 + } + }, + "sort": { + "x": 1 + } + } + }, + "outcome": { + "result": { + "_id": 1, + "x": 11 + }, + "collection": { + "data": [ + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "FindOneAndDelete fails with a RetryableWriteError label after two connection failures", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "closeConnection": true + } + }, + "operation": { + "name": "findOneAndDelete", + "arguments": { + "filter": { + "x": { + "$gte": 11 + } + }, + "sort": { + "x": 1 + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/retryable_writes/findOneAndDelete.json b/test/retryable_writes/legacy/findOneAndDelete.json similarity index 100% rename from test/retryable_writes/findOneAndDelete.json rename to test/retryable_writes/legacy/findOneAndDelete.json diff --git a/test/retryable_writes/findOneAndReplace-serverErrors.json b/test/retryable_writes/legacy/findOneAndReplace-errorLabels.json similarity index 75% rename from test/retryable_writes/findOneAndReplace-serverErrors.json rename to test/retryable_writes/legacy/findOneAndReplace-errorLabels.json index d5d25e1d78..afa2f47af4 100644 --- a/test/retryable_writes/findOneAndReplace-serverErrors.json +++ b/test/retryable_writes/legacy/findOneAndReplace-errorLabels.json @@ -1,15 +1,11 @@ { "runOn": [ { - "minServerVersion": "4.0", + "minServerVersion": "4.3.1", "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded" + "replicaset", + "sharded", + "load-balanced" ] } ], @@ -25,7 +21,7 @@ ], "tests": [ { - "description": "FindOneAndReplace succeeds after PrimarySteppedDown", + "description": "FindOneAndReplace succeeds with RetryableWriteError from server", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -35,7 +31,10 @@ "failCommands": [ "findAndModify" ], - "errorCode": 189 + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] } }, "operation": { @@ -71,7 +70,7 @@ } }, { - "description": "FindOneAndReplace succeeds after WriteConcernError ShutdownInProgress", + "description": "FindOneAndReplace fails if server does not return RetryableWriteError", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -81,10 +80,8 @@ "failCommands": [ "findAndModify" ], - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - } + "errorCode": 11600, + "errorLabels": [] } }, "operation": { @@ -101,15 +98,17 @@ } }, "outcome": { + "error": true, "result": { - "_id": 1, - "x": 11 + "errorLabelsOmit": [ + "RetryableWriteError" + ] }, "collection": { "data": [ { "_id": 1, - "x": 111 + "x": 11 }, { "_id": 2, diff --git a/test/retryable_writes/legacy/findOneAndReplace-serverErrors.json b/test/retryable_writes/legacy/findOneAndReplace-serverErrors.json new file mode 100644 index 0000000000..944a3af848 --- /dev/null +++ b/test/retryable_writes/legacy/findOneAndReplace-serverErrors.json @@ -0,0 +1,178 @@ +{ + "runOn": [ + { + "minServerVersion": "4.0", + "topology": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topology": [ + "sharded", + "load-balanced" + ] + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "tests": [ + { + "description": "FindOneAndReplace succeeds after PrimarySteppedDown", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operation": { + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + }, + "returnDocument": "Before" + } + }, + "outcome": { + "result": { + "_id": 1, + "x": 11 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 111 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "FindOneAndReplace succeeds after WriteConcernError ShutdownInProgress", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + }, + "operation": { + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + }, + "returnDocument": "Before" + } + }, + "outcome": { + "result": { + "_id": 1, + "x": 11 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 111 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "FindOneAndReplace fails with a RetryableWriteError label after two connection failures", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "closeConnection": true + } + }, + "operation": { + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + }, + "returnDocument": "Before" + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/retryable_writes/findOneAndReplace.json b/test/retryable_writes/legacy/findOneAndReplace.json similarity index 100% rename from test/retryable_writes/findOneAndReplace.json rename to test/retryable_writes/legacy/findOneAndReplace.json diff --git a/test/retryable_writes/findOneAndUpdate-serverErrors.json b/test/retryable_writes/legacy/findOneAndUpdate-errorLabels.json similarity index 76% rename from test/retryable_writes/findOneAndUpdate-serverErrors.json rename to test/retryable_writes/legacy/findOneAndUpdate-errorLabels.json index b9f57cd825..19b3a9e771 100644 --- a/test/retryable_writes/findOneAndUpdate-serverErrors.json +++ b/test/retryable_writes/legacy/findOneAndUpdate-errorLabels.json @@ -1,15 +1,11 @@ { "runOn": [ { - "minServerVersion": "4.0", + "minServerVersion": "4.3.1", "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded" + "replicaset", + "sharded", + "load-balanced" ] } ], @@ -25,7 +21,7 @@ ], "tests": [ { - "description": "FindOneAndUpdate succeeds after PrimarySteppedDown", + "description": "FindOneAndUpdate succeeds with RetryableWriteError from server", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -35,7 +31,10 @@ "failCommands": [ "findAndModify" ], - "errorCode": 189 + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] } }, "operation": { @@ -72,7 +71,7 @@ } }, { - "description": "FindOneAndUpdate succeeds after WriteConcernError ShutdownInProgress", + "description": "FindOneAndUpdate fails if server does not return RetryableWriteError", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -82,10 +81,8 @@ "failCommands": [ "findAndModify" ], - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - } + "errorCode": 11600, + "errorLabels": [] } }, "operation": { @@ -103,15 +100,17 @@ } }, "outcome": { + "error": true, "result": { - "_id": 1, - "x": 11 + "errorLabelsOmit": [ + "RetryableWriteError" + ] }, "collection": { "data": [ { "_id": 1, - "x": 12 + "x": 11 }, { "_id": 2, diff --git a/test/retryable_writes/legacy/findOneAndUpdate-serverErrors.json b/test/retryable_writes/legacy/findOneAndUpdate-serverErrors.json new file mode 100644 index 0000000000..e83a610615 --- /dev/null +++ b/test/retryable_writes/legacy/findOneAndUpdate-serverErrors.json @@ -0,0 +1,181 @@ +{ + "runOn": [ + { + "minServerVersion": "4.0", + "topology": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topology": [ + "sharded", + "load-balanced" + ] + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "tests": [ + { + "description": "FindOneAndUpdate succeeds after PrimarySteppedDown", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operation": { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + } + }, + "outcome": { + "result": { + "_id": 1, + "x": 11 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "FindOneAndUpdate succeeds after WriteConcernError ShutdownInProgress", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + }, + "operation": { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + } + }, + "outcome": { + "result": { + "_id": 1, + "x": 11 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "FindOneAndUpdate fails with a RetryableWriteError label after two connection failures", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "closeConnection": true + } + }, + "operation": { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/retryable_writes/findOneAndUpdate.json b/test/retryable_writes/legacy/findOneAndUpdate.json similarity index 100% rename from test/retryable_writes/findOneAndUpdate.json rename to test/retryable_writes/legacy/findOneAndUpdate.json diff --git a/test/retryable_writes/insertMany-serverErrors.json b/test/retryable_writes/legacy/insertMany-errorLabels.json similarity index 73% rename from test/retryable_writes/insertMany-serverErrors.json rename to test/retryable_writes/legacy/insertMany-errorLabels.json index 773ad9307f..65fd377fa6 100644 --- a/test/retryable_writes/insertMany-serverErrors.json +++ b/test/retryable_writes/legacy/insertMany-errorLabels.json @@ -1,15 +1,11 @@ { "runOn": [ { - "minServerVersion": "4.0", + "minServerVersion": "4.3.1", "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded" + "replicaset", + "sharded", + "load-balanced" ] } ], @@ -21,7 +17,7 @@ ], "tests": [ { - "description": "InsertMany succeeds after PrimarySteppedDown", + "description": "InsertMany succeeds with RetryableWriteError from server", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -31,7 +27,10 @@ "failCommands": [ "insert" ], - "errorCode": 189 + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] } }, "operation": { @@ -78,7 +77,7 @@ } }, { - "description": "InsertMany succeeds after WriteConcernError ShutdownInProgress", + "description": "InsertMany fails if server does not return RetryableWriteError", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -88,10 +87,8 @@ "failCommands": [ "insert" ], - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - } + "errorCode": 11600, + "errorLabels": [] } }, "operation": { @@ -113,25 +110,17 @@ } }, "outcome": { + "error": true, "result": { - "insertedIds": { - "0": 2, - "1": 3 - } + "errorLabelsOmit": [ + "RetryableWriteError" + ] }, "collection": { "data": [ { "_id": 1, "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 } ] } diff --git a/test/retryable_writes/legacy/insertMany-serverErrors.json b/test/retryable_writes/legacy/insertMany-serverErrors.json new file mode 100644 index 0000000000..fe8dbf4a62 --- /dev/null +++ b/test/retryable_writes/legacy/insertMany-serverErrors.json @@ -0,0 +1,197 @@ +{ + "runOn": [ + { + "minServerVersion": "4.0", + "topology": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topology": [ + "sharded", + "load-balanced" + ] + } + ], + "data": [ + { + "_id": 1, + "x": 11 + } + ], + "tests": [ + { + "description": "InsertMany succeeds after PrimarySteppedDown", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operation": { + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "options": { + "ordered": true + } + } + }, + "outcome": { + "result": { + "insertedIds": { + "0": 2, + "1": 3 + } + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + } + }, + { + "description": "InsertMany succeeds after WriteConcernError ShutdownInProgress", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + }, + "operation": { + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "options": { + "ordered": true + } + } + }, + "outcome": { + "result": { + "insertedIds": { + "0": 2, + "1": 3 + } + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + } + }, + { + "description": "InsertMany fails with a RetryableWriteError label after two connection failures", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + }, + "operation": { + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "options": { + "ordered": true + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + } + ] + } + } + } + ] +} diff --git a/test/retryable_writes/insertMany.json b/test/retryable_writes/legacy/insertMany.json similarity index 100% rename from test/retryable_writes/insertMany.json rename to test/retryable_writes/legacy/insertMany.json diff --git a/test/retryable_writes/legacy/insertOne-errorLabels.json b/test/retryable_writes/legacy/insertOne-errorLabels.json new file mode 100644 index 0000000000..d90ac5dfbd --- /dev/null +++ b/test/retryable_writes/legacy/insertOne-errorLabels.json @@ -0,0 +1,91 @@ +{ + "runOn": [ + { + "minServerVersion": "4.3.1", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "data": [], + "tests": [ + { + "description": "InsertOne succeeds with RetryableWriteError from server", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operation": { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + } + }, + "outcome": { + "result": { + "insertedId": 1 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + } + ] + } + } + }, + { + "description": "InsertOne fails if server does not return RetryableWriteError", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 11600, + "errorLabels": [] + } + }, + "operation": { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsOmit": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [] + } + } + } + ] +} diff --git a/test/retryable_writes/insertOne-serverErrors.json b/test/retryable_writes/legacy/insertOne-serverErrors.json similarity index 79% rename from test/retryable_writes/insertOne-serverErrors.json rename to test/retryable_writes/legacy/insertOne-serverErrors.json index 3c3c5b1dc3..5179a6ab75 100644 --- a/test/retryable_writes/insertOne-serverErrors.json +++ b/test/retryable_writes/legacy/insertOne-serverErrors.json @@ -9,7 +9,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], @@ -70,7 +71,54 @@ } }, { - "description": "InsertOne succeeds after NotMaster", + "description": "InsertOne fails after connection failure when retryWrites option is false", + "clientOptions": { + "retryWrites": false + }, + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + }, + "operation": { + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsOmit": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "InsertOne succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -81,6 +129,9 @@ "insert" ], "errorCode": 10107, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -116,7 +167,7 @@ } }, { - "description": "InsertOne succeeds after NotMasterOrSecondary", + "description": "InsertOne succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -127,6 +178,9 @@ "insert" ], "errorCode": 13436, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -162,7 +216,7 @@ } }, { - "description": "InsertOne succeeds after NotMasterNoSlaveOk", + "description": "InsertOne succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -173,6 +227,9 @@ "insert" ], "errorCode": 13435, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -219,6 +276,9 @@ "insert" ], "errorCode": 11602, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -265,6 +325,9 @@ "insert" ], "errorCode": 11600, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -311,6 +374,9 @@ "insert" ], "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -357,6 +423,9 @@ "insert" ], "errorCode": 91, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -403,6 +472,9 @@ "insert" ], "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -449,6 +521,9 @@ "insert" ], "errorCode": 6, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -495,6 +570,9 @@ "insert" ], "errorCode": 9001, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -541,6 +619,58 @@ "insert" ], "errorCode": 89, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + }, + "operation": { + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + "outcome": { + "result": { + "insertedId": 3 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + } + }, + { + "description": "InsertOne succeeds after ExceededTimeLimit", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 262, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -601,6 +731,11 @@ }, "outcome": { "error": true, + "result": { + "errorLabelsOmit": [ + "RetryableWriteError" + ] + }, "collection": { "data": [ { @@ -626,6 +761,9 @@ "failCommands": [ "insert" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 11600, "errmsg": "Replication is being shut down" @@ -674,6 +812,9 @@ "failCommands": [ "insert" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 11602, "errmsg": "Replication is being shut down" @@ -722,6 +863,9 @@ "failCommands": [ "insert" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 189, "errmsg": "Replication is being shut down" @@ -770,6 +914,9 @@ "failCommands": [ "insert" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, "errmsg": "Replication is being shut down" @@ -818,6 +965,9 @@ "failCommands": [ "insert" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, "errmsg": "Replication is being shut down" @@ -835,6 +985,11 @@ }, "outcome": { "error": true, + "result": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + }, "collection": { "data": [ { @@ -881,6 +1036,11 @@ }, "outcome": { "error": true, + "result": { + "errorLabelsOmit": [ + "RetryableWriteError" + ] + }, "collection": { "data": [ { @@ -931,6 +1091,11 @@ }, "outcome": { "error": true, + "result": { + "errorLabelsOmit": [ + "RetryableWriteError" + ] + }, "collection": { "data": [ { @@ -948,6 +1113,50 @@ ] } } + }, + { + "description": "InsertOne fails with a RetryableWriteError label after two connection failures", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + }, + "operation": { + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } } ] } diff --git a/test/retryable_writes/insertOne.json b/test/retryable_writes/legacy/insertOne.json similarity index 100% rename from test/retryable_writes/insertOne.json rename to test/retryable_writes/legacy/insertOne.json diff --git a/test/retryable_writes/replaceOne-serverErrors.json b/test/retryable_writes/legacy/replaceOne-errorLabels.json similarity index 75% rename from test/retryable_writes/replaceOne-serverErrors.json rename to test/retryable_writes/legacy/replaceOne-errorLabels.json index aac7b2f394..6029b875dc 100644 --- a/test/retryable_writes/replaceOne-serverErrors.json +++ b/test/retryable_writes/legacy/replaceOne-errorLabels.json @@ -1,15 +1,11 @@ { "runOn": [ { - "minServerVersion": "4.0", + "minServerVersion": "4.3.1", "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded" + "replicaset", + "sharded", + "load-balanced" ] } ], @@ -25,7 +21,7 @@ ], "tests": [ { - "description": "ReplaceOne succeeds after PrimarySteppedDown", + "description": "ReplaceOne succeeds with RetryableWriteError from server", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -35,7 +31,10 @@ "failCommands": [ "update" ], - "errorCode": 189 + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] } }, "operation": { @@ -71,7 +70,7 @@ } }, { - "description": "ReplaceOne succeeds after WriteConcernError ShutdownInProgress", + "description": "ReplaceOne fails if server does not return RetryableWriteError", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -81,10 +80,8 @@ "failCommands": [ "update" ], - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - } + "errorCode": 11600, + "errorLabels": [] } }, "operation": { @@ -100,16 +97,17 @@ } }, "outcome": { + "error": true, "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 + "errorLabelsOmit": [ + "RetryableWriteError" + ] }, "collection": { "data": [ { "_id": 1, - "x": 111 + "x": 11 }, { "_id": 2, diff --git a/test/retryable_writes/legacy/replaceOne-serverErrors.json b/test/retryable_writes/legacy/replaceOne-serverErrors.json new file mode 100644 index 0000000000..6b35722e12 --- /dev/null +++ b/test/retryable_writes/legacy/replaceOne-serverErrors.json @@ -0,0 +1,177 @@ +{ + "runOn": [ + { + "minServerVersion": "4.0", + "topology": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topology": [ + "sharded", + "load-balanced" + ] + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "tests": [ + { + "description": "ReplaceOne succeeds after PrimarySteppedDown", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operation": { + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + } + } + }, + "outcome": { + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 111 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "ReplaceOne succeeds after WriteConcernError ShutdownInProgress", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + }, + "operation": { + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + } + } + }, + "outcome": { + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 111 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "ReplaceOne fails with a RetryableWriteError label after two connection failures", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "closeConnection": true + } + }, + "operation": { + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/retryable_writes/replaceOne.json b/test/retryable_writes/legacy/replaceOne.json similarity index 100% rename from test/retryable_writes/replaceOne.json rename to test/retryable_writes/legacy/replaceOne.json diff --git a/test/retryable_writes/updateMany.json b/test/retryable_writes/legacy/updateMany.json similarity index 95% rename from test/retryable_writes/updateMany.json rename to test/retryable_writes/legacy/updateMany.json index 14288c2860..46fef73e74 100644 --- a/test/retryable_writes/updateMany.json +++ b/test/retryable_writes/legacy/updateMany.json @@ -4,7 +4,8 @@ "minServerVersion": "3.6", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_writes/updateOne-serverErrors.json b/test/retryable_writes/legacy/updateOne-errorLabels.json similarity index 75% rename from test/retryable_writes/updateOne-serverErrors.json rename to test/retryable_writes/legacy/updateOne-errorLabels.json index 6f6c55dd51..5bd00cde90 100644 --- a/test/retryable_writes/updateOne-serverErrors.json +++ b/test/retryable_writes/legacy/updateOne-errorLabels.json @@ -1,15 +1,11 @@ { "runOn": [ { - "minServerVersion": "4.0", + "minServerVersion": "4.3.1", "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded" + "replicaset", + "sharded", + "load-balanced" ] } ], @@ -25,7 +21,7 @@ ], "tests": [ { - "description": "UpdateOne succeeds after PrimarySteppedDown", + "description": "UpdateOne succeeds with RetryableWriteError from server", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -35,7 +31,10 @@ "failCommands": [ "update" ], - "errorCode": 189 + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] } }, "operation": { @@ -72,7 +71,7 @@ } }, { - "description": "UpdateOne succeeds after WriteConcernError ShutdownInProgress", + "description": "UpdateOne fails if server does not return RetryableWriteError", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -82,10 +81,8 @@ "failCommands": [ "update" ], - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - } + "errorCode": 11600, + "errorLabels": [] } }, "operation": { @@ -102,16 +99,17 @@ } }, "outcome": { + "error": true, "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 + "errorLabelsOmit": [ + "RetryableWriteError" + ] }, "collection": { "data": [ { "_id": 1, - "x": 12 + "x": 11 }, { "_id": 2, diff --git a/test/retryable_writes/legacy/updateOne-serverErrors.json b/test/retryable_writes/legacy/updateOne-serverErrors.json new file mode 100644 index 0000000000..cf274f57e0 --- /dev/null +++ b/test/retryable_writes/legacy/updateOne-serverErrors.json @@ -0,0 +1,180 @@ +{ + "runOn": [ + { + "minServerVersion": "4.0", + "topology": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topology": [ + "sharded", + "load-balanced" + ] + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "tests": [ + { + "description": "UpdateOne succeeds after PrimarySteppedDown", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operation": { + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + "outcome": { + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "UpdateOne succeeds after WriteConcernError ShutdownInProgress", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + }, + "operation": { + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + "outcome": { + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "UpdateOne fails with a RetryableWriteError label after two connection failures", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "closeConnection": true + } + }, + "operation": { + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/retryable_writes/updateOne.json b/test/retryable_writes/legacy/updateOne.json similarity index 100% rename from test/retryable_writes/updateOne.json rename to test/retryable_writes/legacy/updateOne.json diff --git a/test/retryable_writes/unified/bulkWrite-serverErrors.json b/test/retryable_writes/unified/bulkWrite-serverErrors.json new file mode 100644 index 0000000000..23cf2869a6 --- /dev/null +++ b/test/retryable_writes/unified/bulkWrite-serverErrors.json @@ -0,0 +1,205 @@ +{ + "description": "retryable-writes bulkWrite serverErrors", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite succeeds after retryable writeConcernError in first batch", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded-replicaset" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 2 + } + } + } + ] + }, + "expectResult": { + "deletedCount": 1, + "insertedCount": 1, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": { + "0": 3 + } + }, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3, + "x": 33 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-writes-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3, + "x": 33 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-writes-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "coll", + "deletes": [ + { + "q": { + "_id": 2 + }, + "limit": 1 + } + ] + }, + "commandName": "delete", + "databaseName": "retryable-writes-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/handshakeError.json b/test/retryable_writes/unified/handshakeError.json new file mode 100644 index 0000000000..df37bd7232 --- /dev/null +++ b/test/retryable_writes/unified/handshakeError.json @@ -0,0 +1,1797 @@ +{ + "description": "retryable writes handshake failures", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "auth": true + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "connectionCheckOutStartedEvent", + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-handshake-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "collection.insertOne succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "collection.insertOne succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "collection.insertMany succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "collection.insertMany succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "collection.deleteOne succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "delete" + } + }, + { + "commandSucceededEvent": { + "commandName": "delete" + } + } + ] + } + ] + }, + { + "description": "collection.deleteOne succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "delete" + } + }, + { + "commandSucceededEvent": { + "commandName": "delete" + } + } + ] + } + ] + }, + { + "description": "collection.replaceOne succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 22 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "update" + } + }, + { + "commandSucceededEvent": { + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "collection.replaceOne succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 22 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "update" + } + }, + { + "commandSucceededEvent": { + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "collection.updateOne succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 22 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "update" + } + }, + { + "commandSucceededEvent": { + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "collection.updateOne succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 22 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "update" + } + }, + { + "commandSucceededEvent": { + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "collection.findOneAndDelete succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify" + } + }, + { + "commandSucceededEvent": { + "commandName": "findAndModify" + } + } + ] + } + ] + }, + { + "description": "collection.findOneAndDelete succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify" + } + }, + { + "commandSucceededEvent": { + "commandName": "findAndModify" + } + } + ] + } + ] + }, + { + "description": "collection.findOneAndReplace succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 22 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify" + } + }, + { + "commandSucceededEvent": { + "commandName": "findAndModify" + } + } + ] + } + ] + }, + { + "description": "collection.findOneAndReplace succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 22 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify" + } + }, + { + "commandSucceededEvent": { + "commandName": "findAndModify" + } + } + ] + } + ] + }, + { + "description": "collection.findOneAndUpdate succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 22 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify" + } + }, + { + "commandSucceededEvent": { + "commandName": "findAndModify" + } + } + ] + } + ] + }, + { + "description": "collection.findOneAndUpdate succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 22 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify" + } + }, + { + "commandSucceededEvent": { + "commandName": "findAndModify" + } + } + ] + } + ] + }, + { + "description": "collection.bulkWrite succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 2, + "x": 22 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "collection.bulkWrite succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 2, + "x": 22 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/insertOne-noWritesPerformedError.json b/test/retryable_writes/unified/insertOne-noWritesPerformedError.json new file mode 100644 index 0000000000..3194e91c5c --- /dev/null +++ b/test/retryable_writes/unified/insertOne-noWritesPerformedError.json @@ -0,0 +1,90 @@ +{ + "description": "retryable-writes insertOne noWritesPerformedErrors", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "6.0", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "no-writes-performed-collection" + } + } + ], + "tests": [ + { + "description": "InsertOne fails after NoWritesPerformed error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 64, + "errorLabels": [ + "NoWritesPerformed", + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "errorCode": 64, + "errorLabelsContain": [ + "NoWritesPerformed", + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "no-writes-performed-collection", + "databaseName": "retryable-writes-tests", + "documents": [] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/insertOne-serverErrors.json b/test/retryable_writes/unified/insertOne-serverErrors.json new file mode 100644 index 0000000000..77245a8197 --- /dev/null +++ b/test/retryable_writes/unified/insertOne-serverErrors.json @@ -0,0 +1,173 @@ +{ + "description": "retryable-writes insertOne serverErrors", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "InsertOne succeeds after retryable writeConcernError", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded-replicaset" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3, + "x": 33 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-writes-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3, + "x": 33 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-writes-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/run_command/unified/runCommand.json b/test/run_command/unified/runCommand.json new file mode 100644 index 0000000000..007e514bd7 --- /dev/null +++ b/test/run_command/unified/runCommand.json @@ -0,0 +1,635 @@ +{ + "description": "runCommand", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "collection", + "database": "db", + "collectionName": "collection" + } + }, + { + "database": { + "id": "dbWithRC", + "client": "client", + "databaseName": "dbWithRC", + "databaseOptions": { + "readConcern": { + "level": "local" + } + } + } + }, + { + "database": { + "id": "dbWithWC", + "client": "client", + "databaseName": "dbWithWC", + "databaseOptions": { + "writeConcern": { + "w": 0 + } + } + } + }, + { + "session": { + "id": "session", + "client": "client" + } + }, + { + "client": { + "id": "clientWithStableApi", + "observeEvents": [ + "commandStartedEvent" + ], + "serverApi": { + "version": "1", + "strict": true + } + } + }, + { + "database": { + "id": "dbWithStableApi", + "client": "clientWithStableApi", + "databaseName": "dbWithStableApi" + } + } + ], + "initialData": [ + { + "collectionName": "collection", + "databaseName": "db", + "documents": [] + } + ], + "tests": [ + { + "description": "always attaches $db and implicit lsid to given command and omits default readPreference", + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "$db": "db", + "lsid": { + "$$exists": true + }, + "$readPreference": { + "$$exists": false + } + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "always gossips the $clusterTime on the sent command", + "runOnRequirements": [ + { + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "command": { + "ping": 1, + "$clusterTime": { + "$$exists": true + } + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "attaches the provided session lsid to given command", + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + }, + "session": "session" + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "lsid": { + "$$sessionLsid": "session" + }, + "$db": "db" + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "attaches the provided $readPreference to given command", + "runOnRequirements": [ + { + "topologies": [ + "replicaset", + "sharded-replicaset", + "load-balanced", + "sharded" + ] + } + ], + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + }, + "readPreference": { + "mode": "nearest" + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "$readPreference": { + "mode": "nearest" + }, + "$db": "db" + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "does not attach $readPreference to given command on standalone", + "runOnRequirements": [ + { + "topologies": [ + "single" + ] + } + ], + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + }, + "readPreference": { + "mode": "nearest" + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "$readPreference": { + "$$exists": false + }, + "$db": "db" + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "does not attach primary $readPreference to given command", + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + }, + "readPreference": { + "mode": "primary" + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "$readPreference": { + "$$exists": false + }, + "$db": "db" + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "does not inherit readConcern specified at the db level", + "operations": [ + { + "name": "runCommand", + "object": "dbWithRC", + "arguments": { + "commandName": "aggregate", + "command": { + "aggregate": "collection", + "pipeline": [], + "cursor": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection", + "readConcern": { + "$$exists": false + }, + "$db": "dbWithRC" + }, + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "does not inherit writeConcern specified at the db level", + "operations": [ + { + "name": "runCommand", + "object": "dbWithWC", + "arguments": { + "commandName": "insert", + "command": { + "insert": "collection", + "documents": [ + { + "foo": "bar" + } + ], + "ordered": true + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "collection", + "writeConcern": { + "$$exists": false + }, + "$db": "dbWithWC" + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "does not retry retryable errors on given command", + "runOnRequirements": [ + { + "minServerVersion": "4.2" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "attaches transaction fields to given command", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.2", + "topologies": [ + "sharded-replicaset", + "load-balanced" + ] + } + ], + "operations": [ + { + "name": "withTransaction", + "object": "session", + "arguments": { + "callback": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "session": "session", + "commandName": "insert", + "command": { + "insert": "collection", + "documents": [ + { + "foo": "transaction" + } + ], + "ordered": true + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "collection", + "documents": [ + { + "foo": "transaction" + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "db" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session" + }, + "txnNumber": 1, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "readConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "attaches apiVersion fields to given command when stableApi is configured on the client", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "dbWithStableApi", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "clientWithStableApi", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "$db": "dbWithStableApi", + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + }, + "commandName": "ping" + } + } + ] + } + ] + } + ] +} diff --git a/test/run_command/unified/runCursorCommand.json b/test/run_command/unified/runCursorCommand.json new file mode 100644 index 0000000000..4f1ec8a01a --- /dev/null +++ b/test/run_command/unified/runCursorCommand.json @@ -0,0 +1,877 @@ +{ + "description": "runCursorCommand", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "connectionReadyEvent", + "connectionCheckedOutEvent", + "connectionCheckedInEvent" + ] + } + }, + { + "session": { + "id": "session", + "client": "client" + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "collection", + "database": "db", + "collectionName": "collection" + } + } + ], + "initialData": [ + { + "collectionName": "collection", + "databaseName": "db", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "successfully executes checkMetadataConsistency cursor creating command", + "runOnRequirements": [ + { + "minServerVersion": "7.0", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "checkMetadataConsistency", + "command": { + "checkMetadataConsistency": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "checkMetadataConsistency": 1, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "checkMetadataConsistency" + } + } + ] + } + ] + }, + { + "description": "errors if the command response is not a cursor", + "operations": [ + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "creates an implicit session that is reused across getMores", + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "command": { + "find": "collection", + "batchSize": 2 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + }, + { + "name": "assertSameLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client" + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "batchSize": 2, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "accepts an explicit session that is reused across getMores", + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "session": "session", + "command": { + "find": "collection", + "batchSize": 2 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + }, + { + "name": "assertSameLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client" + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "batchSize": 2, + "$db": "db", + "lsid": { + "$$sessionLsid": "session" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "$db": "db", + "lsid": { + "$$sessionLsid": "session" + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "returns pinned connections to the pool when the cursor is exhausted", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "operations": [ + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "commandName": "find", + "batchSize": 2, + "session": "session", + "command": { + "find": "collection", + "batchSize": 2 + } + }, + "saveResultAsEntity": "cursor" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client", + "connections": 1 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor", + "expectResult": { + "_id": 1, + "x": 11 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor", + "expectResult": { + "_id": 2, + "x": 22 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor", + "expectResult": { + "_id": 3, + "x": 33 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor", + "expectResult": { + "_id": 4, + "x": 44 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor", + "expectResult": { + "_id": 5, + "x": 55 + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "batchSize": 2, + "$db": "db", + "lsid": { + "$$sessionLsid": "session" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "$db": "db", + "lsid": { + "$$sessionLsid": "session" + } + }, + "commandName": "getMore" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "$db": "db", + "lsid": { + "$$sessionLsid": "session" + } + }, + "commandName": "getMore" + } + } + ] + }, + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "returns pinned connections to the pool when the cursor is closed", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "operations": [ + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "commandName": "find", + "command": { + "find": "collection", + "batchSize": 2 + } + }, + "saveResultAsEntity": "cursor" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client", + "connections": 1 + } + }, + { + "name": "close", + "object": "cursor" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client", + "connections": 0 + } + } + ] + }, + { + "description": "supports configuring getMore batchSize", + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "batchSize": 5, + "command": { + "find": "collection", + "batchSize": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "batchSize": 1, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "batchSize": 5, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "supports configuring getMore maxTimeMS", + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "maxTimeMS": 300, + "command": { + "find": "collection", + "maxTimeMS": 200, + "batchSize": 1 + } + }, + "ignoreResultAndError": true + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "maxTimeMS": 200, + "batchSize": 1, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "$db": "db", + "maxTimeMS": 300, + "lsid": { + "$$exists": true + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "supports configuring getMore comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "comment": { + "hello": "getMore" + }, + "command": { + "find": "collection", + "batchSize": 1, + "comment": { + "hello": "find" + } + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "batchSize": 1, + "comment": { + "hello": "find" + }, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "comment": { + "hello": "getMore" + }, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "does not close the cursor when receiving an empty batch", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "cappedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "cappedCollection", + "capped": true, + "size": 4096, + "max": 3 + }, + "saveResultAsEntity": "cappedCollection" + }, + { + "name": "insertMany", + "object": "cappedCollection", + "arguments": { + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + }, + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "cursorType": "tailable", + "commandName": "find", + "batchSize": 2, + "command": { + "find": "cappedCollection", + "tailable": true + } + }, + "saveResultAsEntity": "cursor" + }, + { + "name": "iterateOnce", + "object": "cursor" + }, + { + "name": "iterateOnce", + "object": "cursor" + }, + { + "name": "iterateOnce", + "object": "cursor" + }, + { + "name": "close", + "object": "cursor" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "cappedCollection" + }, + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "cappedCollection" + }, + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "cappedCollection" + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "cappedCollection", + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "cappedCollection", + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "getMore" + } + }, + { + "commandStartedEvent": { + "command": { + "killCursors": "cappedCollection", + "cursors": { + "$$type": "array" + } + }, + "commandName": "killCursors" + } + } + ] + } + ] + } + ] +} diff --git a/test/sdam_monitoring/discovered_standalone.json b/test/sdam_monitoring/discovered_standalone.json new file mode 100644 index 0000000000..dd8f7fc51e --- /dev/null +++ b/test/sdam_monitoring/discovered_standalone.json @@ -0,0 +1,105 @@ +{ + "description": "Monitoring a discovered standalone connection", + "uri": "mongodb://a:27017/?directConnection=false", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "events": [ + { + "topology_opening_event": { + "topologyId": "42" + } + }, + { + "topology_description_changed_event": { + "topologyId": "42", + "previousDescription": { + "topologyType": "Unknown", + "servers": [] + }, + "newDescription": { + "topologyType": "Unknown", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } + ] + } + } + }, + { + "server_opening_event": { + "topologyId": "42", + "address": "a:27017" + } + }, + { + "server_description_changed_event": { + "topologyId": "42", + "address": "a:27017", + "previousDescription": { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + }, + "newDescription": { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Standalone" + } + } + }, + { + "topology_description_changed_event": { + "topologyId": "42", + "previousDescription": { + "topologyType": "Unknown", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } + ] + }, + "newDescription": { + "topologyType": "Single", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Standalone" + } + ] + } + } + } + ] + } + } + ] +} diff --git a/test/sdam_monitoring/load_balancer.json b/test/sdam_monitoring/load_balancer.json new file mode 100644 index 0000000000..09b1537193 --- /dev/null +++ b/test/sdam_monitoring/load_balancer.json @@ -0,0 +1,93 @@ +{ + "description": "Monitoring a load balancer", + "uri": "mongodb://a:27017/?loadBalanced=true", + "phases": [ + { + "outcome": { + "events": [ + { + "topology_opening_event": { + "topologyId": "42" + } + }, + { + "topology_description_changed_event": { + "topologyId": "42", + "previousDescription": { + "topologyType": "Unknown", + "servers": [] + }, + "newDescription": { + "topologyType": "LoadBalanced", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } + ] + } + } + }, + { + "server_opening_event": { + "topologyId": "42", + "address": "a:27017" + } + }, + { + "server_description_changed_event": { + "topologyId": "42", + "address": "a:27017", + "previousDescription": { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + }, + "newDescription": { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "LoadBalancer" + } + } + }, + { + "topology_description_changed_event": { + "topologyId": "42", + "previousDescription": { + "topologyType": "LoadBalanced", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } + ] + }, + "newDescription": { + "topologyType": "LoadBalanced", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "LoadBalancer" + } + ] + } + } + } + ] + } + } + ] +} diff --git a/test/sdam_monitoring/replica_set_with_no_primary.json b/test/sdam_monitoring/replica_set_with_no_primary.json index 2f398a85f0..950e32efe1 100644 --- a/test/sdam_monitoring/replica_set_with_no_primary.json +++ b/test/sdam_monitoring/replica_set_with_no_primary.json @@ -8,16 +8,18 @@ "a:27017", { "ok": 1, - "ismaster": false, + "helloOk": true, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "setVersion": 1, "primary": "b:27017", "hosts": [ - "a:27017", "b:27017" + "a:27017", + "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 4 + "maxWireVersion": 6 } ] ], @@ -83,7 +85,8 @@ "address": "a:27017", "arbiters": [], "hosts": [ - "a:27017", "b:27017" + "a:27017", + "b:27017" ], "passives": [], "primary": "b:27017", @@ -122,7 +125,8 @@ "address": "a:27017", "arbiters": [], "hosts": [ - "a:27017", "b:27017" + "a:27017", + "b:27017" ], "passives": [], "primary": "b:27017", @@ -134,7 +138,7 @@ "arbiters": [], "hosts": [], "passives": [], - "type": "Unknown" + "type": "PossiblePrimary" } ] } diff --git a/test/sdam_monitoring/replica_set_with_primary.json b/test/sdam_monitoring/replica_set_with_primary.json index 6c0d8819d0..2ad94d6e6a 100644 --- a/test/sdam_monitoring/replica_set_with_primary.json +++ b/test/sdam_monitoring/replica_set_with_primary.json @@ -8,15 +8,17 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "setName": "rs", "setVersion": 1, "primary": "a:27017", "hosts": [ - "a:27017", "b:27017" + "a:27017", + "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 4 + "maxWireVersion": 6 } ] ], @@ -82,7 +84,8 @@ "address": "a:27017", "arbiters": [], "hosts": [ - "a:27017", "b:27017" + "a:27017", + "b:27017" ], "passives": [], "primary": "a:27017", diff --git a/test/sdam_monitoring/replica_set_with_removal.json b/test/sdam_monitoring/replica_set_with_removal.json index a14456cdba..ae28faa30c 100644 --- a/test/sdam_monitoring/replica_set_with_removal.json +++ b/test/sdam_monitoring/replica_set_with_removal.json @@ -3,30 +3,7 @@ "uri": "mongodb://a,b/", "phases": [ { - "responses": [ - [ - "a:27017", - { - "ok": 1, - "ismaster": true, - "setName": "rs", - "setVersion": 1, - "primary": "a:27017", - "hosts": [ - "a:27017" - ], - "minWireVersion": 0, - "maxWireVersion": 4 - } - ], - [ - "b:27017", - { - "ok": 1, - "ismaster": true - } - ] - ], + "responses": [], "outcome": { "events": [ { @@ -73,7 +50,39 @@ "topologyId": "42", "address": "b:27017" } - }, + } + ] + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "setVersion": 1, + "primary": "a:27017", + "hosts": [ + "a:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 6 + } + ], + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true + } + ] + ], + "outcome": { + "events": [ { "server_description_changed_event": { "topologyId": "42", diff --git a/test/sdam_monitoring/required_replica_set.json b/test/sdam_monitoring/required_replica_set.json index 0afe0d1a42..401c5d99c5 100644 --- a/test/sdam_monitoring/required_replica_set.json +++ b/test/sdam_monitoring/required_replica_set.json @@ -1,149 +1,152 @@ { - "description": "Monitoring a topology that is required to be a replica set", - "phases": [ - { - "outcome": { - "events": [ - { - "topology_opening_event": { - "topologyId": "42" - } - }, - { - "topology_description_changed_event": { - "newDescription": { - "servers": [ - { - "address": "a:27017", - "arbiters": [], - "hosts": [], - "passives": [], - "type": "Unknown" - }, - { - "address": "b:27017", - "arbiters": [], - "hosts": [], - "passives": [], - "type": "Unknown" - } - ], - "topologyType": "ReplicaSetNoPrimary" - }, - "previousDescription": { - "servers": [], - "topologyType": "Unknown" - }, - "topologyId": "42" - } - }, - { - "server_opening_event": { - "address": "a:27017", - "topologyId": "42" - } - }, - { - "server_opening_event": { - "address": "b:27017", - "topologyId": "42" - } - }, - { - "server_description_changed_event": { - "address": "a:27017", - "newDescription": { - "address": "a:27017", - "arbiters": [], - "hosts": [ - "a:27017", - "b:27017" - ], - "passives": [], - "primary": "a:27017", - "setName": "rs", - "type": "RSPrimary" - }, - "previousDescription": { - "address": "a:27017", - "arbiters": [], - "hosts": [], - "passives": [], - "type": "Unknown" - }, - "topologyId": "42" - } - }, - { - "topology_description_changed_event": { - "newDescription": { - "servers": [ - { - "address": "a:27017", - "arbiters": [], - "hosts": [ - "a:27017", - "b:27017" - ], - "passives": [], - "primary": "a:27017", - "setName": "rs", - "type": "RSPrimary" - }, - { - "address": "b:27017", - "arbiters": [], - "hosts": [], - "passives": [], - "type": "Unknown" - } - ], - "setName": "rs", - "topologyType": "ReplicaSetWithPrimary" - }, - "previousDescription": { - "servers": [ - { - "address": "a:27017", - "arbiters": [], - "hosts": [], - "passives": [], - "type": "Unknown" - }, - { - "address": "b:27017", - "arbiters": [], - "hosts": [], - "passives": [], - "type": "Unknown" - } - ], - "topologyType": "ReplicaSetNoPrimary" - }, - "topologyId": "42" - } - } + "description": "Monitoring a topology that is required to be a replica set", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "setVersion": 1, + "primary": "a:27017", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "events": [ + { + "topology_opening_event": { + "topologyId": "42" + } + }, + { + "topology_description_changed_event": { + "topologyId": "42", + "previousDescription": { + "topologyType": "Unknown", + "servers": [] + }, + "newDescription": { + "topologyType": "ReplicaSetNoPrimary", + "setName": "rs", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + }, + { + "address": "b:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } ] - }, - "responses": [ - [ - "a:27017", - { - "hosts": [ - "a:27017", - "b:27017" - ], - "ismaster": true, - "maxWireVersion": 4, - "minWireVersion": 0, - "ok": 1, - "primary": "a:27017", - "setName": "rs", - "setVersion": 1.0 - } + } + } + }, + { + "server_opening_event": { + "topologyId": "42", + "address": "a:27017" + } + }, + { + "server_opening_event": { + "topologyId": "42", + "address": "b:27017" + } + }, + { + "server_description_changed_event": { + "topologyId": "42", + "address": "a:27017", + "previousDescription": { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + }, + "newDescription": { + "address": "a:27017", + "arbiters": [], + "hosts": [ + "a:27017", + "b:27017" + ], + "passives": [], + "primary": "a:27017", + "setName": "rs", + "type": "RSPrimary" + } + } + }, + { + "topology_description_changed_event": { + "topologyId": "42", + "previousDescription": { + "topologyType": "ReplicaSetNoPrimary", + "setName": "rs", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + }, + { + "address": "b:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } ] - ] - } - ], - "uri": "mongodb://a,b/?replicaSet=rs" + }, + "newDescription": { + "topologyType": "ReplicaSetWithPrimary", + "setName": "rs", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [ + "a:27017", + "b:27017" + ], + "passives": [], + "primary": "a:27017", + "setName": "rs", + "type": "RSPrimary" + }, + { + "address": "b:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } + ] + } + } + } + ] + } + } + ] } diff --git a/test/sdam_monitoring/standalone.json b/test/sdam_monitoring/standalone.json index 1ca3c3c24d..821a1525d4 100644 --- a/test/sdam_monitoring/standalone.json +++ b/test/sdam_monitoring/standalone.json @@ -1,104 +1,105 @@ { - "description": "Monitoring a standalone connection", - "phases": [ - { - "outcome": { - "events": [ - { - "topology_opening_event": { - "topologyId": "42" - } - }, - { - "topology_description_changed_event": { - "newDescription": { - "servers": [ - { - "address": "a:27017", - "arbiters": [], - "hosts": [], - "passives": [], - "type": "Unknown" - } - ], - "topologyType": "Single" - }, - "previousDescription": { - "servers": [], - "topologyType": "Unknown" - }, - "topologyId": "42" - } - }, - { - "server_opening_event": { - "address": "a:27017", - "topologyId": "42" - } - }, - { - "server_description_changed_event": { - "address": "a:27017", - "newDescription": { - "address": "a:27017", - "arbiters": [], - "hosts": [], - "passives": [], - "type": "Standalone" - }, - "previousDescription": { - "address": "a:27017", - "arbiters": [], - "hosts": [], - "passives": [], - "type": "Unknown" - }, - "topologyId": "42" - } - }, - { - "topology_description_changed_event": { - "newDescription": { - "servers": [ - { - "address": "a:27017", - "arbiters": [], - "hosts": [], - "passives": [], - "type": "Standalone" - } - ], - "topologyType": "Single" - }, - "previousDescription": { - "servers": [ - { - "address": "a:27017", - "arbiters": [], - "hosts": [], - "passives": [], - "type": "Unknown" - } - ], - "topologyType": "Single" - }, - "topologyId": "42" - } - } + "description": "Monitoring a direct connection", + "uri": "mongodb://a:27017/?directConnection=true", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "events": [ + { + "topology_opening_event": { + "topologyId": "42" + } + }, + { + "topology_description_changed_event": { + "topologyId": "42", + "previousDescription": { + "topologyType": "Unknown", + "servers": [] + }, + "newDescription": { + "topologyType": "Single", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } ] - }, - "responses": [ - [ - "a:27017", - { - "ismaster": true, - "maxWireVersion": 4, - "minWireVersion": 0, - "ok": 1 - } + } + } + }, + { + "server_opening_event": { + "topologyId": "42", + "address": "a:27017" + } + }, + { + "server_description_changed_event": { + "topologyId": "42", + "address": "a:27017", + "previousDescription": { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + }, + "newDescription": { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Standalone" + } + } + }, + { + "topology_description_changed_event": { + "topologyId": "42", + "previousDescription": { + "topologyType": "Single", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } ] - ] - } - ], - "uri": "mongodb://a:27017" + }, + "newDescription": { + "topologyType": "Single", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Standalone" + } + ] + } + } + } + ] + } + } + ] } diff --git a/test/sdam_monitoring/standalone_suppress_equal_description_changes.json b/test/sdam_monitoring/standalone_suppress_equal_description_changes.json new file mode 100644 index 0000000000..5958e2d26c --- /dev/null +++ b/test/sdam_monitoring/standalone_suppress_equal_description_changes.json @@ -0,0 +1,115 @@ +{ + "description": "Monitoring a direct connection - suppress update events for equal server descriptions", + "uri": "mongodb://a:27017/?directConnection=true", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ], + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "events": [ + { + "topology_opening_event": { + "topologyId": "42" + } + }, + { + "topology_description_changed_event": { + "topologyId": "42", + "previousDescription": { + "topologyType": "Unknown", + "servers": [] + }, + "newDescription": { + "topologyType": "Single", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } + ] + } + } + }, + { + "server_opening_event": { + "topologyId": "42", + "address": "a:27017" + } + }, + { + "server_description_changed_event": { + "topologyId": "42", + "address": "a:27017", + "previousDescription": { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + }, + "newDescription": { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Standalone" + } + } + }, + { + "topology_description_changed_event": { + "topologyId": "42", + "previousDescription": { + "topologyType": "Single", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } + ] + }, + "newDescription": { + "topologyType": "Single", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Standalone" + } + ] + } + } + } + ] + } + } + ] +} diff --git a/test/server_selection/in_window/equilibrium.json b/test/server_selection/in_window/equilibrium.json new file mode 100644 index 0000000000..c5f177d49b --- /dev/null +++ b/test/server_selection/in_window/equilibrium.json @@ -0,0 +1,46 @@ +{ + "description": "When in equilibrium selection is evenly distributed", + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "b:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "c:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "mocked_topology_state": [ + { + "address": "a:27017", + "operation_count": 5 + }, + { + "address": "b:27017", + "operation_count": 5 + }, + { + "address": "c:27017", + "operation_count": 5 + } + ], + "iterations": 2000, + "outcome": { + "tolerance": 0.05, + "expected_frequencies": { + "a:27017": 0.33, + "b:27017": 0.33, + "c:27017": 0.33 + } + } +} diff --git a/test/server_selection/in_window/many-choices.json b/test/server_selection/in_window/many-choices.json new file mode 100644 index 0000000000..7e940513ef --- /dev/null +++ b/test/server_selection/in_window/many-choices.json @@ -0,0 +1,106 @@ +{ + "description": "Selections from many choices occur at correct frequencies", + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "b:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "c:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "d:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "e:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "f:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "g:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "i:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "mocked_topology_state": [ + { + "address": "a:27017", + "operation_count": 0 + }, + { + "address": "b:27017", + "operation_count": 5 + }, + { + "address": "c:27017", + "operation_count": 5 + }, + { + "address": "d:27017", + "operation_count": 10 + }, + { + "address": "e:27017", + "operation_count": 10 + }, + { + "address": "f:27017", + "operation_count": 20 + }, + { + "address": "g:27017", + "operation_count": 20 + }, + { + "address": "h:27017", + "operation_count": 50 + }, + { + "address": "i:27017", + "operation_count": 60 + } + ], + "iterations": 10000, + "outcome": { + "tolerance": 0.03, + "expected_frequencies": { + "a:27017": 0.22, + "b:27017": 0.18, + "c:27017": 0.18, + "d:27017": 0.125, + "e:27017": 0.125, + "f:27017": 0.074, + "g:27017": 0.074, + "h:27017": 0.0277, + "i:27017": 0 + } + } +} diff --git a/test/server_selection/in_window/one-least-two-tied.json b/test/server_selection/in_window/one-least-two-tied.json new file mode 100644 index 0000000000..ed7526e716 --- /dev/null +++ b/test/server_selection/in_window/one-least-two-tied.json @@ -0,0 +1,46 @@ +{ + "description": "Least operations gets most selections, two tied share the rest", + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "b:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "c:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "mocked_topology_state": [ + { + "address": "a:27017", + "operation_count": 16 + }, + { + "address": "b:27017", + "operation_count": 10 + }, + { + "address": "c:27017", + "operation_count": 16 + } + ], + "iterations": 2000, + "outcome": { + "tolerance": 0.05, + "expected_frequencies": { + "a:27017": 0.165, + "b:27017": 0.66, + "c:27017": 0.165 + } + } +} diff --git a/test/server_selection/in_window/rs-equilibrium.json b/test/server_selection/in_window/rs-equilibrium.json new file mode 100644 index 0000000000..61c6687e50 --- /dev/null +++ b/test/server_selection/in_window/rs-equilibrium.json @@ -0,0 +1,46 @@ +{ + "description": "When in equilibrium selection is evenly distributed (replica set)", + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 35, + "type": "RSPrimary" + }, + { + "address": "b:27017", + "avg_rtt_ms": 35, + "type": "RSSecondary" + }, + { + "address": "c:27017", + "avg_rtt_ms": 35, + "type": "RSSecondary" + } + ] + }, + "mocked_topology_state": [ + { + "address": "a:27017", + "operation_count": 6 + }, + { + "address": "b:27017", + "operation_count": 6 + }, + { + "address": "c:27017", + "operation_count": 6 + } + ], + "iterations": 2000, + "outcome": { + "tolerance": 0.05, + "expected_frequencies": { + "a:27017": 0.33, + "b:27017": 0.33, + "c:27017": 0.33 + } + } +} diff --git a/test/server_selection/in_window/rs-three-choices.json b/test/server_selection/in_window/rs-three-choices.json new file mode 100644 index 0000000000..3fdc15205c --- /dev/null +++ b/test/server_selection/in_window/rs-three-choices.json @@ -0,0 +1,46 @@ +{ + "description": "Selections from three servers occur at proper distributions (replica set)", + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 35, + "type": "RSPrimary" + }, + { + "address": "b:27017", + "avg_rtt_ms": 35, + "type": "RSSecondary" + }, + { + "address": "c:27017", + "avg_rtt_ms": 35, + "type": "RSSecondary" + } + ] + }, + "mocked_topology_state": [ + { + "address": "a:27017", + "operation_count": 3 + }, + { + "address": "b:27017", + "operation_count": 6 + }, + { + "address": "c:27017", + "operation_count": 20 + } + ], + "iterations": 2000, + "outcome": { + "tolerance": 0.05, + "expected_frequencies": { + "a:27017": 0.66, + "b:27017": 0.33, + "c:27017": 0 + } + } +} diff --git a/test/server_selection/in_window/three-choices.json b/test/server_selection/in_window/three-choices.json new file mode 100644 index 0000000000..7b5b414549 --- /dev/null +++ b/test/server_selection/in_window/three-choices.json @@ -0,0 +1,46 @@ +{ + "description": "Selections from three servers occur at proper distributions", + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "b:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "c:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "mocked_topology_state": [ + { + "address": "a:27017", + "operation_count": 3 + }, + { + "address": "b:27017", + "operation_count": 6 + }, + { + "address": "c:27017", + "operation_count": 20 + } + ], + "iterations": 2000, + "outcome": { + "tolerance": 0.05, + "expected_frequencies": { + "a:27017": 0.66, + "b:27017": 0.33, + "c:27017": 0 + } + } +} diff --git a/test/server_selection/in_window/two-choices.json b/test/server_selection/in_window/two-choices.json new file mode 100644 index 0000000000..2c7a605d8d --- /dev/null +++ b/test/server_selection/in_window/two-choices.json @@ -0,0 +1,36 @@ +{ + "description": "Better of two choices always selected", + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "b:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "mocked_topology_state": [ + { + "address": "a:27017", + "operation_count": 0 + }, + { + "address": "b:27017", + "operation_count": 5 + } + ], + "iterations": 100, + "outcome": { + "tolerance": 0, + "expected_frequencies": { + "a:27017": 1, + "b:27017": 0 + } + } +} diff --git a/test/server_selection/in_window/two-least.json b/test/server_selection/in_window/two-least.json new file mode 100644 index 0000000000..73214fc647 --- /dev/null +++ b/test/server_selection/in_window/two-least.json @@ -0,0 +1,46 @@ +{ + "description": "Two tied for least operations share all selections", + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "b:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "c:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "mocked_topology_state": [ + { + "address": "a:27017", + "operation_count": 10 + }, + { + "address": "b:27017", + "operation_count": 10 + }, + { + "address": "c:27017", + "operation_count": 16 + } + ], + "iterations": 2000, + "outcome": { + "tolerance": 0.05, + "expected_frequencies": { + "a:27017": 0.5, + "b:27017": 0.5, + "c:27017": 0 + } + } +} diff --git a/test/server_selection/rtt/first_value.json b/test/server_selection/rtt/first_value.json index 2e92195606..421944da36 100644 --- a/test/server_selection/rtt/first_value.json +++ b/test/server_selection/rtt/first_value.json @@ -1,5 +1,5 @@ { - "avg_rtt_ms": "NULL", - "new_avg_rtt": 10, - "new_rtt_ms": 10 + "avg_rtt_ms": "NULL", + "new_rtt_ms": 10, + "new_avg_rtt": 10 } diff --git a/test/server_selection/rtt/first_value_zero.json b/test/server_selection/rtt/first_value_zero.json index 1953a742a4..d5bfc41b25 100644 --- a/test/server_selection/rtt/first_value_zero.json +++ b/test/server_selection/rtt/first_value_zero.json @@ -1,5 +1,5 @@ { - "avg_rtt_ms": "NULL", - "new_avg_rtt": 0, - "new_rtt_ms": 0 + "avg_rtt_ms": "NULL", + "new_rtt_ms": 0, + "new_avg_rtt": 0 } diff --git a/test/server_selection/rtt/value_test_1.json b/test/server_selection/rtt/value_test_1.json index bfa3eb32bf..ed6a80ce29 100644 --- a/test/server_selection/rtt/value_test_1.json +++ b/test/server_selection/rtt/value_test_1.json @@ -1,5 +1,5 @@ { - "avg_rtt_ms": 0, - "new_avg_rtt": 1.0, - "new_rtt_ms": 5 + "avg_rtt_ms": 0, + "new_rtt_ms": 5, + "new_avg_rtt": 1 } diff --git a/test/server_selection/rtt/value_test_2.json b/test/server_selection/rtt/value_test_2.json index 0614cc3f03..ccb5a0173b 100644 --- a/test/server_selection/rtt/value_test_2.json +++ b/test/server_selection/rtt/value_test_2.json @@ -1,5 +1,5 @@ { - "avg_rtt_ms": 3.1, - "new_avg_rtt": 9.68, - "new_rtt_ms": 36 + "avg_rtt_ms": 3.1, + "new_rtt_ms": 36, + "new_avg_rtt": 9.68 } diff --git a/test/server_selection/rtt/value_test_3.json b/test/server_selection/rtt/value_test_3.json index c42edc1087..6921c94d36 100644 --- a/test/server_selection/rtt/value_test_3.json +++ b/test/server_selection/rtt/value_test_3.json @@ -1,5 +1,5 @@ { - "avg_rtt_ms": 9.12, - "new_avg_rtt": 9.12, - "new_rtt_ms": 9.12 + "avg_rtt_ms": 9.12, + "new_rtt_ms": 9.12, + "new_avg_rtt": 9.12 } diff --git a/test/server_selection/rtt/value_test_4.json b/test/server_selection/rtt/value_test_4.json index f65b362eca..d9ce3800b8 100644 --- a/test/server_selection/rtt/value_test_4.json +++ b/test/server_selection/rtt/value_test_4.json @@ -1,5 +1,5 @@ { - "avg_rtt_ms": 1, - "new_avg_rtt": 200.8, - "new_rtt_ms": 1000 + "avg_rtt_ms": 1, + "new_rtt_ms": 1000, + "new_avg_rtt": 200.8 } diff --git a/test/server_selection/rtt/value_test_5.json b/test/server_selection/rtt/value_test_5.json index 4c86e05a24..9ae33bc143 100644 --- a/test/server_selection/rtt/value_test_5.json +++ b/test/server_selection/rtt/value_test_5.json @@ -1,5 +1,5 @@ { - "avg_rtt_ms": 0, - "new_avg_rtt": 0.05, - "new_rtt_ms": 0.25 + "avg_rtt_ms": 0, + "new_rtt_ms": 0.25, + "new_avg_rtt": 0.05 } diff --git a/test/server_selection/server_selection/LoadBalanced/read/Nearest.json b/test/server_selection/server_selection/LoadBalanced/read/Nearest.json new file mode 100644 index 0000000000..76fa336d55 --- /dev/null +++ b/test/server_selection/server_selection/LoadBalanced/read/Nearest.json @@ -0,0 +1,35 @@ +{ + "topology_description": { + "type": "LoadBalanced", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Nearest", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] +} diff --git a/test/server_selection/server_selection/LoadBalanced/read/Primary.json b/test/server_selection/server_selection/LoadBalanced/read/Primary.json new file mode 100644 index 0000000000..5a4a0aa93a --- /dev/null +++ b/test/server_selection/server_selection/LoadBalanced/read/Primary.json @@ -0,0 +1,30 @@ +{ + "topology_description": { + "type": "LoadBalanced", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Primary" + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] +} diff --git a/test/server_selection/server_selection/LoadBalanced/read/PrimaryPreferred.json b/test/server_selection/server_selection/LoadBalanced/read/PrimaryPreferred.json new file mode 100644 index 0000000000..9aa151cd06 --- /dev/null +++ b/test/server_selection/server_selection/LoadBalanced/read/PrimaryPreferred.json @@ -0,0 +1,35 @@ +{ + "topology_description": { + "type": "LoadBalanced", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "PrimaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] +} diff --git a/test/server_selection/server_selection/LoadBalanced/read/Secondary.json b/test/server_selection/server_selection/LoadBalanced/read/Secondary.json new file mode 100644 index 0000000000..c49e30370b --- /dev/null +++ b/test/server_selection/server_selection/LoadBalanced/read/Secondary.json @@ -0,0 +1,35 @@ +{ + "topology_description": { + "type": "LoadBalanced", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Secondary", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] +} diff --git a/test/server_selection/server_selection/LoadBalanced/read/SecondaryPreferred.json b/test/server_selection/server_selection/LoadBalanced/read/SecondaryPreferred.json new file mode 100644 index 0000000000..18e46877b4 --- /dev/null +++ b/test/server_selection/server_selection/LoadBalanced/read/SecondaryPreferred.json @@ -0,0 +1,35 @@ +{ + "topology_description": { + "type": "LoadBalanced", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] +} diff --git a/test/server_selection/server_selection/LoadBalanced/write/Nearest.json b/test/server_selection/server_selection/LoadBalanced/write/Nearest.json new file mode 100644 index 0000000000..e52e343332 --- /dev/null +++ b/test/server_selection/server_selection/LoadBalanced/write/Nearest.json @@ -0,0 +1,35 @@ +{ + "topology_description": { + "type": "LoadBalanced", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "Nearest", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] +} diff --git a/test/server_selection/server_selection/LoadBalanced/write/Primary.json b/test/server_selection/server_selection/LoadBalanced/write/Primary.json new file mode 100644 index 0000000000..9061b25208 --- /dev/null +++ b/test/server_selection/server_selection/LoadBalanced/write/Primary.json @@ -0,0 +1,30 @@ +{ + "topology_description": { + "type": "LoadBalanced", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "Primary" + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] +} diff --git a/test/server_selection/server_selection/LoadBalanced/write/PrimaryPreferred.json b/test/server_selection/server_selection/LoadBalanced/write/PrimaryPreferred.json new file mode 100644 index 0000000000..5c94dc410d --- /dev/null +++ b/test/server_selection/server_selection/LoadBalanced/write/PrimaryPreferred.json @@ -0,0 +1,35 @@ +{ + "topology_description": { + "type": "LoadBalanced", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "PrimaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] +} diff --git a/test/server_selection/server_selection/LoadBalanced/write/Secondary.json b/test/server_selection/server_selection/LoadBalanced/write/Secondary.json new file mode 100644 index 0000000000..5493867e12 --- /dev/null +++ b/test/server_selection/server_selection/LoadBalanced/write/Secondary.json @@ -0,0 +1,35 @@ +{ + "topology_description": { + "type": "LoadBalanced", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "Secondary", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] +} diff --git a/test/server_selection/server_selection/LoadBalanced/write/SecondaryPreferred.json b/test/server_selection/server_selection/LoadBalanced/write/SecondaryPreferred.json new file mode 100644 index 0000000000..f7905f1d5f --- /dev/null +++ b/test/server_selection/server_selection/LoadBalanced/write/SecondaryPreferred.json @@ -0,0 +1,35 @@ +{ + "topology_description": { + "type": "LoadBalanced", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] +} diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Nearest.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Nearest.json index 9677494ad6..aa48679e86 100644 --- a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Nearest.json +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Nearest.json @@ -1,60 +1,60 @@ { - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "operation": "read", - "read_preference": { - "mode": "Nearest", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Nearest", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } } + ], + "in_latency_window": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] } diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Nearest_multiple.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Nearest_multiple.json index be0d571fcd..1fcfd52a47 100644 --- a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Nearest_multiple.json +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Nearest_multiple.json @@ -1,68 +1,68 @@ { - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 10, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 20, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 10, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "operation": "read", - "read_preference": { - "mode": "Nearest", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 10, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 20, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + }, + { + "address": "c:27017", + "avg_rtt_ms": 20, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 10, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 20, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Nearest", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 10, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 20, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "avg_rtt_ms": 10, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 20, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } } + ] } diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Nearest_non_matching.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Nearest_non_matching.json index 9aac3327d1..b72895d8a8 100644 --- a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Nearest_non_matching.json +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Nearest_non_matching.json @@ -1,34 +1,34 @@ { - "in_latency_window": [], - "operation": "read", - "read_preference": { - "mode": "Nearest", - "tag_sets": [ - { - "data_center": "sf" - } - ] - }, - "suitable_servers": [], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" - } + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Nearest", + "tag_sets": [ + { + "data_center": "sf" + } + ] + }, + "suitable_servers": [], + "in_latency_window": [] } diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PossiblePrimary.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PossiblePrimary.json new file mode 100644 index 0000000000..4d286af830 --- /dev/null +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PossiblePrimary.json @@ -0,0 +1,21 @@ +{ + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "PossiblePrimary" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Primary", + "tag_sets": [ + {} + ] + }, + "suitable_servers": [], + "in_latency_window": [] +} diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PossiblePrimaryNearest.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PossiblePrimaryNearest.json new file mode 100644 index 0000000000..bf9c70b420 --- /dev/null +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PossiblePrimaryNearest.json @@ -0,0 +1,21 @@ +{ + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "PossiblePrimary" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Nearest", + "tag_sets": [ + {} + ] + }, + "suitable_servers": [], + "in_latency_window": [] +} diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Primary.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Primary.json index a4102fe823..f0f3fa9ea1 100644 --- a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Primary.json +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Primary.json @@ -1,32 +1,29 @@ { - "in_latency_window": [], - "operation": "read", - "read_preference": { - "mode": "Primary", - "tag_sets": [ - {} - ] - }, - "suitable_servers": [], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" - } + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Primary" + }, + "suitable_servers": [], + "in_latency_window": [] } diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PrimaryPreferred.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PrimaryPreferred.json index 53b66c0b82..f87ef4f617 100644 --- a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PrimaryPreferred.json +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PrimaryPreferred.json @@ -1,58 +1,58 @@ { - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "operation": "read", - "read_preference": { - "mode": "PrimaryPreferred", - "tag_sets": [ - {} - ] - }, - "suitable_servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "PrimaryPreferred", + "tag_sets": [ + {} + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } } + ], + "in_latency_window": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] } diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PrimaryPreferred_non_matching.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PrimaryPreferred_non_matching.json index 6a3702d00d..ee96229927 100644 --- a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PrimaryPreferred_non_matching.json +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PrimaryPreferred_non_matching.json @@ -1,34 +1,34 @@ { - "in_latency_window": [], - "operation": "read", - "read_preference": { - "mode": "PrimaryPreferred", - "tag_sets": [ - { - "data_center": "sf" - } - ] - }, - "suitable_servers": [], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" - } + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "PrimaryPreferred", + "tag_sets": [ + { + "data_center": "sf" + } + ] + }, + "suitable_servers": [], + "in_latency_window": [] } diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary.json index 325a7550bb..3b8f1e97cd 100644 --- a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary.json +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary.json @@ -1,60 +1,60 @@ { - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "operation": "read", - "read_preference": { - "mode": "Secondary", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Secondary", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } } + ], + "in_latency_window": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] } diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/SecondaryPreferred.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/SecondaryPreferred.json index 9012b76111..c3142ec115 100644 --- a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/SecondaryPreferred.json +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/SecondaryPreferred.json @@ -1,60 +1,60 @@ { - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "operation": "read", - "read_preference": { - "mode": "SecondaryPreferred", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } } + ], + "in_latency_window": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] } diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/SecondaryPreferred_non_matching.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/SecondaryPreferred_non_matching.json index bad36648bf..a2c18bb7d2 100644 --- a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/SecondaryPreferred_non_matching.json +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/SecondaryPreferred_non_matching.json @@ -1,34 +1,34 @@ { - "in_latency_window": [], - "operation": "read", - "read_preference": { - "mode": "SecondaryPreferred", - "tag_sets": [ - { - "data_center": "sf" - } - ] - }, - "suitable_servers": [], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" - } + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "sf" + } + ] + }, + "suitable_servers": [], + "in_latency_window": [] } diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary_multi_tags.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary_multi_tags.json index 3cfe980786..b319918e92 100644 --- a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary_multi_tags.json +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary_multi_tags.json @@ -1,60 +1,60 @@ { - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc", - "rack": "one" - }, - "type": "RSSecondary" + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "rack": "one", + "data_center": "nyc" } - ], - "operation": "read", - "read_preference": { - "mode": "Secondary", - "tag_sets": [ - { - "data_center": "nyc", - "rack": "one" - }, - { - "other_tag": "doesntexist" - } - ] - }, - "suitable_servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc", - "rack": "one" - }, - "type": "RSSecondary" + }, + { + "address": "c:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "rack": "two", + "data_center": "sf" } - ], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc", - "rack": "one" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "sf", - "rack": "two" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Secondary", + "tag_sets": [ + { + "data_center": "nyc", + "rack": "one" + }, + { + "other_tag": "doesntexist" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "rack": "one", + "data_center": "nyc" + } } + ], + "in_latency_window": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "rack": "one", + "data_center": "nyc" + } + } + ] } diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary_multi_tags2.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary_multi_tags2.json index 2b0cfb7eec..8f64d95ecb 100644 --- a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary_multi_tags2.json +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary_multi_tags2.json @@ -1,60 +1,60 @@ { - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc", - "rack": "one" - }, - "type": "RSSecondary" + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "rack": "one", + "data_center": "nyc" } - ], - "operation": "read", - "read_preference": { - "mode": "Secondary", - "tag_sets": [ - { - "data_center": "nyc", - "rack": "one" - }, - { - "other_tag": "doesntexist" - } - ] - }, - "suitable_servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc", - "rack": "one" - }, - "type": "RSSecondary" + }, + { + "address": "c:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "rack": "two", + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc", - "rack": "one" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc", - "rack": "two" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Secondary", + "tag_sets": [ + { + "data_center": "nyc", + "rack": "one" + }, + { + "other_tag": "doesntexist" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "rack": "one", + "data_center": "nyc" + } } + ], + "in_latency_window": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "rack": "one", + "data_center": "nyc" + } + } + ] } diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary_non_matching.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary_non_matching.json index 4d8bd18917..4931e1019a 100644 --- a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary_non_matching.json +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary_non_matching.json @@ -1,34 +1,34 @@ { - "in_latency_window": [], - "operation": "read", - "read_preference": { - "mode": "Secondary", - "tag_sets": [ - { - "data_center": "sf" - } - ] - }, - "suitable_servers": [], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" - } + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Secondary", + "tag_sets": [ + { + "data_center": "sf" + } + ] + }, + "suitable_servers": [], + "in_latency_window": [] } diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/write/SecondaryPreferred.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/write/SecondaryPreferred.json index db2fb398d1..e136cf12a4 100644 --- a/test/server_selection/server_selection/ReplicaSetNoPrimary/write/SecondaryPreferred.json +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/write/SecondaryPreferred.json @@ -1,34 +1,34 @@ { - "in_latency_window": [], - "operation": "write", - "read_preference": { - "mode": "SecondaryPreferred", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" - } + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [], + "in_latency_window": [] } diff --git a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Nearest.json b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Nearest.json index 5975f770ce..cfe4965938 100644 --- a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Nearest.json +++ b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Nearest.json @@ -1,76 +1,76 @@ { - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "operation": "read", - "read_preference": { - "mode": "Nearest", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" - } - ], - "type": "ReplicaSetWithPrimary" + }, + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Nearest", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } } + ] } diff --git a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Nearest_multiple.json b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Nearest_multiple.json index f174c1ba33..67296d434f 100644 --- a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Nearest_multiple.json +++ b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Nearest_multiple.json @@ -1,84 +1,84 @@ { - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 10, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "a:27017", - "avg_rtt_ms": 20, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 10, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "operation": "read", - "read_preference": { - "mode": "Nearest", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 10, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "a:27017", - "avg_rtt_ms": 20, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "a:27017", + "avg_rtt_ms": 20, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 10, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "a:27017", - "avg_rtt_ms": 20, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" - } - ], - "type": "ReplicaSetWithPrimary" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Nearest", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 10, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "a:27017", + "avg_rtt_ms": 20, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "avg_rtt_ms": 10, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "a:27017", + "avg_rtt_ms": 20, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } } + ] } diff --git a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Nearest_non_matching.json b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Nearest_non_matching.json index af890cd596..a3a85c9a83 100644 --- a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Nearest_non_matching.json +++ b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Nearest_non_matching.json @@ -1,42 +1,42 @@ { - "in_latency_window": [], - "operation": "read", - "read_preference": { - "mode": "Nearest", - "tag_sets": [ - { - "data_center": "sf" - } - ] - }, - "suitable_servers": [], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" - } - ], - "type": "ReplicaSetWithPrimary" - } + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Nearest", + "tag_sets": [ + { + "data_center": "sf" + } + ] + }, + "suitable_servers": [], + "in_latency_window": [] } diff --git a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Primary.json b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Primary.json index de001ca0bc..8da1482e96 100644 --- a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Primary.json +++ b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Primary.json @@ -1,58 +1,55 @@ { - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "operation": "read", - "read_preference": { - "mode": "Primary", - "tag_sets": [ - {} - ] - }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" - } - ], - "type": "ReplicaSetWithPrimary" + }, + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Primary" + }, + "suitable_servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } } + ] } diff --git a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/PrimaryPreferred.json b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/PrimaryPreferred.json index b413745dd2..306171f3a2 100644 --- a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/PrimaryPreferred.json +++ b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/PrimaryPreferred.json @@ -1,58 +1,58 @@ { - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "operation": "read", - "read_preference": { - "mode": "PrimaryPreferred", - "tag_sets": [ - {} - ] - }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" - } - ], - "type": "ReplicaSetWithPrimary" + }, + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "PrimaryPreferred", + "tag_sets": [ + {} + ] + }, + "suitable_servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } } + ] } diff --git a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/PrimaryPreferred_non_matching.json b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/PrimaryPreferred_non_matching.json index 12b20040a8..722f1cfb1a 100644 --- a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/PrimaryPreferred_non_matching.json +++ b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/PrimaryPreferred_non_matching.json @@ -1,60 +1,60 @@ { - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "operation": "read", - "read_preference": { - "mode": "PrimaryPreferred", - "tag_sets": [ - { - "data_center": "sf" - } - ] - }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" - } - ], - "type": "ReplicaSetWithPrimary" + }, + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "PrimaryPreferred", + "tag_sets": [ + { + "data_center": "sf" + } + ] + }, + "suitable_servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } } + ] } diff --git a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Secondary.json b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Secondary.json index faf3b70a6b..23864a278c 100644 --- a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Secondary.json +++ b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Secondary.json @@ -1,68 +1,68 @@ { - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "operation": "read", - "read_preference": { - "mode": "Secondary", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" - } - ], - "type": "ReplicaSetWithPrimary" + }, + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Secondary", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } } + ] } diff --git a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/SecondaryPreferred.json b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/SecondaryPreferred.json index fd549a2327..d07c24218d 100644 --- a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/SecondaryPreferred.json +++ b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/SecondaryPreferred.json @@ -1,68 +1,68 @@ { - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "operation": "read", - "read_preference": { - "mode": "SecondaryPreferred", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" - } - ], - "type": "ReplicaSetWithPrimary" + }, + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } } + ] } diff --git a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/SecondaryPreferred_non_matching.json b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/SecondaryPreferred_non_matching.json index 948a728633..f893cc9f82 100644 --- a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/SecondaryPreferred_non_matching.json +++ b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/SecondaryPreferred_non_matching.json @@ -1,60 +1,60 @@ { - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "operation": "read", - "read_preference": { - "mode": "SecondaryPreferred", - "tag_sets": [ - { - "data_center": "sf" - } - ] - }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" - } - ], - "type": "ReplicaSetWithPrimary" + }, + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "sf" + } + ] + }, + "suitable_servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } } + ] } diff --git a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/SecondaryPreferred_tags.json b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/SecondaryPreferred_tags.json index b391864bdc..a74a2dbf33 100644 --- a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/SecondaryPreferred_tags.json +++ b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/SecondaryPreferred_tags.json @@ -1,52 +1,52 @@ { - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 5, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" } - ], - "operation": "read", - "read_preference": { - "mode": "SecondaryPreferred", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" + }, + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "sf" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "sf" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetWithPrimary" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 5, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } } + ], + "in_latency_window": [ + { + "address": "a:27017", + "avg_rtt_ms": 5, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] } diff --git a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Secondary_non_matching.json b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Secondary_non_matching.json index 27213d3591..1272180666 100644 --- a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Secondary_non_matching.json +++ b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Secondary_non_matching.json @@ -1,42 +1,42 @@ { - "in_latency_window": [], - "operation": "read", - "read_preference": { - "mode": "Secondary", - "tag_sets": [ - { - "data_center": "sf" - } - ] - }, - "suitable_servers": [], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" - } - ], - "type": "ReplicaSetWithPrimary" - } + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Secondary", + "tag_sets": [ + { + "data_center": "sf" + } + ] + }, + "suitable_servers": [], + "in_latency_window": [] } diff --git a/test/server_selection/server_selection/ReplicaSetWithPrimary/write/SecondaryPreferred.json b/test/server_selection/server_selection/ReplicaSetWithPrimary/write/SecondaryPreferred.json index a57ee31be9..65ab3dc640 100644 --- a/test/server_selection/server_selection/ReplicaSetWithPrimary/write/SecondaryPreferred.json +++ b/test/server_selection/server_selection/ReplicaSetWithPrimary/write/SecondaryPreferred.json @@ -1,60 +1,60 @@ { - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "operation": "write", - "read_preference": { - "mode": "SecondaryPreferred", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" - } - ], - "type": "ReplicaSetWithPrimary" + }, + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } } + ] } diff --git a/test/server_selection/server_selection/Sharded/read/Nearest.json b/test/server_selection/server_selection/Sharded/read/Nearest.json new file mode 100644 index 0000000000..705a784a0b --- /dev/null +++ b/test/server_selection/server_selection/Sharded/read/Nearest.json @@ -0,0 +1,45 @@ +{ + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Nearest", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + } + ] +} diff --git a/test/server_selection/server_selection/Sharded/read/Primary.json b/test/server_selection/server_selection/Sharded/read/Primary.json new file mode 100644 index 0000000000..7a321be2bb --- /dev/null +++ b/test/server_selection/server_selection/Sharded/read/Primary.json @@ -0,0 +1,40 @@ +{ + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Primary" + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + } + ] +} diff --git a/test/server_selection/server_selection/Sharded/read/PrimaryPreferred.json b/test/server_selection/server_selection/Sharded/read/PrimaryPreferred.json new file mode 100644 index 0000000000..e9bc1421f9 --- /dev/null +++ b/test/server_selection/server_selection/Sharded/read/PrimaryPreferred.json @@ -0,0 +1,45 @@ +{ + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "PrimaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + } + ] +} diff --git a/test/server_selection/server_selection/Sharded/read/Secondary.json b/test/server_selection/server_selection/Sharded/read/Secondary.json new file mode 100644 index 0000000000..49813f7b9e --- /dev/null +++ b/test/server_selection/server_selection/Sharded/read/Secondary.json @@ -0,0 +1,45 @@ +{ + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Secondary", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + } + ] +} diff --git a/test/server_selection/server_selection/Sharded/read/SecondaryPreferred.json b/test/server_selection/server_selection/Sharded/read/SecondaryPreferred.json index e45556f4cb..62fa13f297 100644 --- a/test/server_selection/server_selection/Sharded/read/SecondaryPreferred.json +++ b/test/server_selection/server_selection/Sharded/read/SecondaryPreferred.json @@ -1,60 +1,45 @@ { - "in_latency_window": [ - { - "address": "g:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "Mongos" - } - ], - "operation": "read", - "read_preference": { - "mode": "SecondaryPreferred", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [ - { - "address": "g:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "Mongos" - }, - { - "address": "h:27017", - "avg_rtt_ms": 35, - "tags": { - "data_center": "dc" - }, - "type": "Mongos" - } - ], - "topology_description": { - "servers": [ - { - "address": "g:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "Mongos" - }, - { - "address": "h:27017", - "avg_rtt_ms": 35, - "tags": { - "data_center": "dc" - }, - "type": "Mongos" - } - ], - "type": "Sharded" + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + } + ] } diff --git a/test/server_selection/server_selection/Sharded/write/Nearest.json b/test/server_selection/server_selection/Sharded/write/Nearest.json new file mode 100644 index 0000000000..aef7f02ec7 --- /dev/null +++ b/test/server_selection/server_selection/Sharded/write/Nearest.json @@ -0,0 +1,45 @@ +{ + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "Nearest", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + } + ] +} diff --git a/test/server_selection/server_selection/Sharded/write/Primary.json b/test/server_selection/server_selection/Sharded/write/Primary.json new file mode 100644 index 0000000000..f6ce2e75c1 --- /dev/null +++ b/test/server_selection/server_selection/Sharded/write/Primary.json @@ -0,0 +1,40 @@ +{ + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "Primary" + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + } + ] +} diff --git a/test/server_selection/server_selection/Sharded/write/PrimaryPreferred.json b/test/server_selection/server_selection/Sharded/write/PrimaryPreferred.json new file mode 100644 index 0000000000..25f56a5359 --- /dev/null +++ b/test/server_selection/server_selection/Sharded/write/PrimaryPreferred.json @@ -0,0 +1,45 @@ +{ + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "PrimaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + } + ] +} diff --git a/test/server_selection/server_selection/Sharded/write/Secondary.json b/test/server_selection/server_selection/Sharded/write/Secondary.json new file mode 100644 index 0000000000..1fa026f716 --- /dev/null +++ b/test/server_selection/server_selection/Sharded/write/Secondary.json @@ -0,0 +1,45 @@ +{ + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "Secondary", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + } + ] +} diff --git a/test/server_selection/server_selection/Sharded/write/SecondaryPreferred.json b/test/server_selection/server_selection/Sharded/write/SecondaryPreferred.json index 4262ce2efb..f9467472aa 100644 --- a/test/server_selection/server_selection/Sharded/write/SecondaryPreferred.json +++ b/test/server_selection/server_selection/Sharded/write/SecondaryPreferred.json @@ -1,60 +1,45 @@ { - "in_latency_window": [ - { - "address": "g:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "Mongos" - } - ], - "operation": "write", - "read_preference": { - "mode": "SecondaryPreferred", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [ - { - "address": "g:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "Mongos" - }, - { - "address": "h:27017", - "avg_rtt_ms": 35, - "tags": { - "data_center": "dc" - }, - "type": "Mongos" - } - ], - "topology_description": { - "servers": [ - { - "address": "g:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "Mongos" - }, - { - "address": "h:27017", - "avg_rtt_ms": 35, - "tags": { - "data_center": "dc" - }, - "type": "Mongos" - } - ], - "type": "Sharded" + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + } + ] } diff --git a/test/server_selection/server_selection/Single/read/SecondaryPreferred.json b/test/server_selection/server_selection/Single/read/SecondaryPreferred.json index 86c704c147..e60496dfdf 100644 --- a/test/server_selection/server_selection/Single/read/SecondaryPreferred.json +++ b/test/server_selection/server_selection/Single/read/SecondaryPreferred.json @@ -1,44 +1,44 @@ { - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "dc" - }, - "type": "Standalone" + "topology_description": { + "type": "Single", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 5, + "type": "Standalone", + "tags": { + "data_center": "dc" } - ], - "operation": "read", - "read_preference": { - "mode": "SecondaryPreferred", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "dc" - }, - "type": "Standalone" - } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "dc" - }, - "type": "Standalone" - } - ], - "type": "Single" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 5, + "type": "Standalone", + "tags": { + "data_center": "dc" + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "avg_rtt_ms": 5, + "type": "Standalone", + "tags": { + "data_center": "dc" + } } + ] } diff --git a/test/server_selection/server_selection/Single/write/SecondaryPreferred.json b/test/server_selection/server_selection/Single/write/SecondaryPreferred.json index be8771caeb..34fe91d5a2 100644 --- a/test/server_selection/server_selection/Single/write/SecondaryPreferred.json +++ b/test/server_selection/server_selection/Single/write/SecondaryPreferred.json @@ -1,44 +1,44 @@ { - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "dc" - }, - "type": "Standalone" + "topology_description": { + "type": "Single", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 5, + "type": "Standalone", + "tags": { + "data_center": "dc" } - ], - "operation": "write", - "read_preference": { - "mode": "SecondaryPreferred", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "dc" - }, - "type": "Standalone" - } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "dc" - }, - "type": "Standalone" - } - ], - "type": "Single" + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 5, + "type": "Standalone", + "tags": { + "data_center": "dc" + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "avg_rtt_ms": 5, + "type": "Standalone", + "tags": { + "data_center": "dc" + } } + ] } diff --git a/test/server_selection/server_selection/Unknown/read/SecondaryPreferred.json b/test/server_selection/server_selection/Unknown/read/SecondaryPreferred.json index ce0d8376b3..0ae8075fba 100644 --- a/test/server_selection/server_selection/Unknown/read/SecondaryPreferred.json +++ b/test/server_selection/server_selection/Unknown/read/SecondaryPreferred.json @@ -1,17 +1,17 @@ { - "in_latency_window": [], - "operation": "read", - "read_preference": { - "mode": "SecondaryPreferred", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [], - "topology_description": { - "servers": [], - "type": "Unknown" - } + "topology_description": { + "type": "Unknown", + "servers": [] + }, + "operation": "read", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [], + "in_latency_window": [] } diff --git a/test/server_selection/server_selection/Unknown/read/ghost.json b/test/server_selection/server_selection/Unknown/read/ghost.json new file mode 100644 index 0000000000..76d3d774e8 --- /dev/null +++ b/test/server_selection/server_selection/Unknown/read/ghost.json @@ -0,0 +1,18 @@ +{ + "topology_description": { + "type": "Unknown", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 5, + "type": "RSGhost" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Nearest" + }, + "suitable_servers": [], + "in_latency_window": [] +} diff --git a/test/server_selection/server_selection/Unknown/write/SecondaryPreferred.json b/test/server_selection/server_selection/Unknown/write/SecondaryPreferred.json index 4d6f79b46b..a70eece62c 100644 --- a/test/server_selection/server_selection/Unknown/write/SecondaryPreferred.json +++ b/test/server_selection/server_selection/Unknown/write/SecondaryPreferred.json @@ -1,17 +1,17 @@ { - "in_latency_window": [], - "operation": "write", - "read_preference": { - "mode": "SecondaryPreferred", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [], - "topology_description": { - "servers": [], - "type": "Unknown" - } + "topology_description": { + "type": "Unknown", + "servers": [] + }, + "operation": "write", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [], + "in_latency_window": [] } diff --git a/test/server_selection/server_selection/Unknown/write/ghost.json b/test/server_selection/server_selection/Unknown/write/ghost.json new file mode 100644 index 0000000000..65caa4cd0a --- /dev/null +++ b/test/server_selection/server_selection/Unknown/write/ghost.json @@ -0,0 +1,18 @@ +{ + "topology_description": { + "type": "Unknown", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 5, + "type": "RSGhost" + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "Nearest" + }, + "suitable_servers": [], + "in_latency_window": [] +} diff --git a/test/sessions/dirty-session-errors.json b/test/sessions/dirty-session-errors.json deleted file mode 100644 index 9eccff0593..0000000000 --- a/test/sessions/dirty-session-errors.json +++ /dev/null @@ -1,523 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "session-tests", - "collection_name": "test", - "data": [ - { - "_id": 1 - } - ], - "tests": [ - { - "description": "Clean explicit session is not discarded", - "operations": [ - { - "name": "assertSessionNotDirty", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "assertSessionNotDirty", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "endSession", - "object": "session0" - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": -1 - } - }, - "result": [] - }, - { - "name": "assertSameLsidOnLastTwoCommands", - "object": "testRunner" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "lsid": "session0" - }, - "command_name": "insert", - "database_name": "session-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "_id": -1 - }, - "lsid": "session0" - }, - "command_name": "find", - "database_name": "session-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "Clean implicit session is not discarded", - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": -1 - } - }, - "result": [] - }, - { - "name": "assertSameLsidOnLastTwoCommands", - "object": "testRunner" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true - }, - "command_name": "insert", - "database_name": "session-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "_id": -1 - } - }, - "command_name": "find", - "database_name": "session-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "Dirty explicit session is discarded", - "clientOptions": { - "retryWrites": true - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "assertSessionNotDirty", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "assertSessionDirty", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "assertSessionDirty", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "endSession", - "object": "session0" - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": -1 - } - }, - "result": [] - }, - { - "name": "assertDifferentLsidOnLastTwoCommands", - "object": "testRunner" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - } - }, - "command_name": "insert", - "database_name": "session-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - } - }, - "command_name": "insert", - "database_name": "session-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 3 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - } - }, - "command_name": "insert", - "database_name": "session-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "_id": -1 - } - }, - "command_name": "find", - "database_name": "session-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - } - ] - } - } - }, - { - "description": "Dirty implicit session is discarded (write)", - "clientOptions": { - "retryWrites": true - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": -1 - } - }, - "result": [] - }, - { - "name": "assertDifferentLsidOnLastTwoCommands", - "object": "testRunner" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "txnNumber": { - "$numberLong": "1" - } - }, - "command_name": "insert", - "database_name": "session-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "txnNumber": { - "$numberLong": "1" - } - }, - "command_name": "insert", - "database_name": "session-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "_id": -1 - } - }, - "command_name": "find", - "database_name": "session-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "Dirty implicit session is discarded (read)", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ] - }, - "error": true - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": -1 - } - }, - "result": [] - }, - { - "name": "assertDifferentLsidOnLastTwoCommands", - "object": "testRunner" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - } - ] -} diff --git a/test/sessions/driver-sessions-dirty-session-errors.json b/test/sessions/driver-sessions-dirty-session-errors.json new file mode 100644 index 0000000000..361ea83d7b --- /dev/null +++ b/test/sessions/driver-sessions-dirty-session-errors.json @@ -0,0 +1,968 @@ +{ + "description": "driver-sessions-dirty-session-errors", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "session-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "Dirty explicit session is discarded (insert)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "name": "assertSessionNotDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "name": "assertSessionDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "assertSessionDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "endSession", + "object": "session0" + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertDifferentLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1 + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1 + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 3 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 2 + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$type": "object" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ] + }, + { + "description": "Dirty explicit session is discarded (findAndModify)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "closeConnection": true + } + } + } + }, + { + "name": "assertSessionNotDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 1 + } + }, + { + "name": "assertSessionDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "endSession", + "object": "session0" + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertDifferentLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "new": false, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "new": false, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$type": "object" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1, + "x": 1 + } + ] + } + ] + }, + { + "description": "Dirty implicit session is discarded (insert)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertDifferentLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$type": "object" + }, + "txnNumber": 1 + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$type": "object" + }, + "txnNumber": 1 + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$type": "object" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "Dirty implicit session is discarded (findAndModify)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "closeConnection": true + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 1 + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertDifferentLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "new": false, + "lsid": { + "$$type": "object" + }, + "txnNumber": 1, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "new": false, + "lsid": { + "$$type": "object" + }, + "txnNumber": 1, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$type": "object" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1, + "x": 1 + } + ] + } + ] + }, + { + "description": "Dirty implicit session is discarded (read returning cursor)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 1 + } + ] + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertDifferentLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "lsid": { + "$$type": "object" + } + }, + "commandName": "aggregate", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "lsid": { + "$$type": "object" + } + }, + "commandName": "aggregate", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$type": "object" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Dirty implicit session is discarded (read not returning cursor)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "name": "countDocuments", + "object": "collection0", + "arguments": { + "filter": {} + }, + "expectResult": 1 + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertDifferentLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ], + "lsid": { + "$$type": "object" + } + }, + "commandName": "aggregate", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ], + "lsid": { + "$$type": "object" + } + }, + "commandName": "aggregate", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$type": "object" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/sessions/driver-sessions-server-support.json b/test/sessions/driver-sessions-server-support.json new file mode 100644 index 0000000000..55312b32eb --- /dev/null +++ b/test/sessions/driver-sessions-server-support.json @@ -0,0 +1,256 @@ +{ + "description": "driver-sessions-server-support", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.6" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "session-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "Server supports explicit sessions", + "operations": [ + { + "name": "assertSessionNotDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "name": "assertSessionNotDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "endSession", + "object": "session0" + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertSameLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + } + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$sessionLsid": "session0" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "Server supports implicit sessions", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertSameLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$type": "object" + } + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$type": "object" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/sessions/implicit-sessions-default-causal-consistency.json b/test/sessions/implicit-sessions-default-causal-consistency.json new file mode 100644 index 0000000000..517c8ebc63 --- /dev/null +++ b/test/sessions/implicit-sessions-default-causal-consistency.json @@ -0,0 +1,318 @@ +{ + "description": "implicit sessions default causal consistency", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "implicit-cc-tests" + } + }, + { + "collection": { + "id": "collectionDefault", + "database": "database0", + "collectionName": "coll-default" + } + }, + { + "collection": { + "id": "collectionSnapshot", + "database": "database0", + "collectionName": "coll-snapshot", + "collectionOptions": { + "readConcern": { + "level": "snapshot" + } + } + } + }, + { + "collection": { + "id": "collectionlinearizable", + "database": "database0", + "collectionName": "coll-linearizable", + "collectionOptions": { + "readConcern": { + "level": "linearizable" + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll-default", + "databaseName": "implicit-cc-tests", + "documents": [ + { + "_id": 1, + "x": "default" + } + ] + }, + { + "collectionName": "coll-snapshot", + "databaseName": "implicit-cc-tests", + "documents": [ + { + "_id": 1, + "x": "snapshot" + } + ] + }, + { + "collectionName": "coll-linearizable", + "databaseName": "implicit-cc-tests", + "documents": [ + { + "_id": 1, + "x": "linearizable" + } + ] + } + ], + "tests": [ + { + "description": "readConcern is not sent on retried read in implicit session when readConcern level is not specified", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11600 + } + } + } + }, + { + "name": "find", + "object": "collectionDefault", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "x": "default" + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll-default", + "filter": {}, + "readConcern": { + "$$exists": false + } + }, + "databaseName": "implicit-cc-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll-default", + "filter": {}, + "readConcern": { + "$$exists": false + } + }, + "databaseName": "implicit-cc-tests" + } + } + ] + } + ] + }, + { + "description": "afterClusterTime is not sent on retried read in implicit session when readConcern level is snapshot", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11600 + } + } + } + }, + { + "name": "find", + "object": "collectionSnapshot", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "x": "snapshot" + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll-snapshot", + "filter": {}, + "readConcern": { + "level": "snapshot", + "afterClusterTime": { + "$$exists": false + } + } + }, + "databaseName": "implicit-cc-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll-snapshot", + "filter": {}, + "readConcern": { + "level": "snapshot", + "afterClusterTime": { + "$$exists": false + } + } + }, + "databaseName": "implicit-cc-tests" + } + } + ] + } + ] + }, + { + "description": "afterClusterTime is not sent on retried read in implicit session when readConcern level is linearizable", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11600 + } + } + } + }, + { + "name": "find", + "object": "collectionlinearizable", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "x": "linearizable" + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll-linearizable", + "filter": {}, + "readConcern": { + "level": "linearizable", + "afterClusterTime": { + "$$exists": false + } + } + }, + "databaseName": "implicit-cc-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll-linearizable", + "filter": {}, + "readConcern": { + "level": "linearizable", + "afterClusterTime": { + "$$exists": false + } + } + }, + "databaseName": "implicit-cc-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/sessions/snapshot-sessions-not-supported-client-error.json b/test/sessions/snapshot-sessions-not-supported-client-error.json new file mode 100644 index 0000000000..208e4cfe63 --- /dev/null +++ b/test/sessions/snapshot-sessions-not-supported-client-error.json @@ -0,0 +1,128 @@ +{ + "description": "snapshot-sessions-not-supported-client-error", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "maxServerVersion": "4.4.99" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + }, + { + "session": { + "id": "session0", + "client": "client0", + "sessionOptions": { + "snapshot": true + } + } + } + ], + "initialData": [ + { + "collectionName": "collection0", + "databaseName": "database0", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "Client error on find with snapshot", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": {} + }, + "expectError": { + "isClientError": true, + "errorContains": "Snapshot reads require MongoDB 5.0 or later" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Client error on aggregate with snapshot", + "operations": [ + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "session": "session0", + "pipeline": [] + }, + "expectError": { + "isClientError": true, + "errorContains": "Snapshot reads require MongoDB 5.0 or later" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Client error on distinct with snapshot", + "operations": [ + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": {}, + "session": "session0" + }, + "expectError": { + "isClientError": true, + "errorContains": "Snapshot reads require MongoDB 5.0 or later" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + } + ] +} diff --git a/test/sessions/snapshot-sessions-not-supported-server-error.json b/test/sessions/snapshot-sessions-not-supported-server-error.json new file mode 100644 index 0000000000..79213f314f --- /dev/null +++ b/test/sessions/snapshot-sessions-not-supported-server-error.json @@ -0,0 +1,187 @@ +{ + "description": "snapshot-sessions-not-supported-server-error", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "5.0", + "topologies": [ + "single" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + }, + { + "session": { + "id": "session0", + "client": "client0", + "sessionOptions": { + "snapshot": true + } + } + } + ], + "initialData": [ + { + "collectionName": "collection0", + "databaseName": "database0", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "Server returns an error on find with snapshot", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": {} + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "Server returns an error on aggregate with snapshot", + "operations": [ + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "session": "session0", + "pipeline": [] + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "Server returns an error on distinct with snapshot", + "operations": [ + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": {}, + "session": "session0" + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "distinct" + } + } + ] + } + ] + } + ] +} diff --git a/test/sessions/snapshot-sessions-unsupported-ops.json b/test/sessions/snapshot-sessions-unsupported-ops.json new file mode 100644 index 0000000000..1021b7f264 --- /dev/null +++ b/test/sessions/snapshot-sessions-unsupported-ops.json @@ -0,0 +1,493 @@ +{ + "description": "snapshot-sessions-unsupported-ops", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "5.0", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + }, + { + "session": { + "id": "session0", + "client": "client0", + "sessionOptions": { + "snapshot": true + } + } + } + ], + "initialData": [ + { + "collectionName": "collection0", + "databaseName": "database0", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "Server returns an error on insertOne with snapshot", + "runOnRequirements": [ + { + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 22, + "x": 22 + } + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "Server returns an error on insertMany with snapshot", + "runOnRequirements": [ + { + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "session": "session0", + "documents": [ + { + "_id": 22, + "x": 22 + }, + { + "_id": 33, + "x": 33 + } + ] + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "Server returns an error on deleteOne with snapshot", + "runOnRequirements": [ + { + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "deleteOne", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": {} + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "delete" + } + } + ] + } + ] + }, + { + "description": "Server returns an error on updateOne with snapshot", + "runOnRequirements": [ + { + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "Server returns an error on findOneAndUpdate with snapshot", + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "findAndModify" + } + } + ] + } + ] + }, + { + "description": "Server returns an error on listDatabases with snapshot", + "operations": [ + { + "name": "listDatabases", + "object": "client0", + "arguments": { + "session": "session0" + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1, + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "listDatabases" + } + } + ] + } + ] + }, + { + "description": "Server returns an error on listCollections with snapshot", + "operations": [ + { + "name": "listCollections", + "object": "database0", + "arguments": { + "session": "session0" + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "listCollections" + } + } + ] + } + ] + }, + { + "description": "Server returns an error on listIndexes with snapshot", + "operations": [ + { + "name": "listIndexes", + "object": "collection0", + "arguments": { + "session": "session0" + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "listIndexes" + } + } + ] + } + ] + }, + { + "description": "Server returns an error on runCommand with snapshot", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "session": "session0", + "commandName": "listCollections", + "command": { + "listCollections": 1 + } + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "listCollections" + } + } + ] + } + ] + } + ] +} diff --git a/test/sessions/snapshot-sessions.json b/test/sessions/snapshot-sessions.json new file mode 100644 index 0000000000..75b577b039 --- /dev/null +++ b/test/sessions/snapshot-sessions.json @@ -0,0 +1,993 @@ +{ + "description": "snapshot-sessions", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "5.0", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "findAndModify", + "insert", + "update" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0", + "collectionOptions": { + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "session": { + "id": "session0", + "client": "client0", + "sessionOptions": { + "snapshot": true + } + } + }, + { + "session": { + "id": "session1", + "client": "client0", + "sessionOptions": { + "snapshot": true + } + } + } + ], + "initialData": [ + { + "collectionName": "collection0", + "databaseName": "database0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "Find operation with snapshot", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + } + ] + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "After" + }, + "expectResult": { + "_id": 1, + "x": 12 + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "session": "session1", + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 12 + } + ] + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "After" + }, + "expectResult": { + "_id": 1, + "x": 13 + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 13 + } + ] + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + } + ] + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "session": "session1", + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 12 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "find": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "find": "collection0", + "readConcern": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "find": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": true + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "find": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": true + } + } + } + } + } + ] + } + ] + }, + { + "description": "Distinct operation with snapshot", + "operations": [ + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 11 + ] + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "After" + }, + "expectResult": { + "_id": 2, + "x": 12 + } + }, + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": {}, + "session": "session1" + }, + "expectResult": [ + 11, + 12 + ] + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "After" + }, + "expectResult": { + "_id": 2, + "x": 13 + } + }, + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": {} + }, + "expectResult": [ + 11, + 13 + ] + }, + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 11 + ] + }, + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": {}, + "session": "session1" + }, + "expectResult": [ + 11, + 12 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "collection0", + "readConcern": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": true + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": true + } + } + } + } + } + ] + } + ] + }, + { + "description": "Aggregate operation with snapshot", + "operations": [ + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": 1 + } + } + ], + "session": "session0" + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + } + ] + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "After" + }, + "expectResult": { + "_id": 1, + "x": 12 + } + }, + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": 1 + } + } + ], + "session": "session1" + }, + "expectResult": [ + { + "_id": 1, + "x": 12 + } + ] + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "After" + }, + "expectResult": { + "_id": 1, + "x": 13 + } + }, + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 1, + "x": 13 + } + ] + }, + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": 1 + } + } + ], + "session": "session0" + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + } + ] + }, + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": 1 + } + } + ], + "session": "session1" + }, + "expectResult": [ + { + "_id": 1, + "x": 12 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "readConcern": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": true + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": true + } + } + } + } + } + ] + } + ] + }, + { + "description": "countDocuments operation with snapshot", + "operations": [ + { + "name": "countDocuments", + "object": "collection0", + "arguments": { + "filter": {}, + "session": "session0" + }, + "expectResult": 2 + }, + { + "name": "countDocuments", + "object": "collection0", + "arguments": { + "filter": {}, + "session": "session0" + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": true + } + } + } + } + } + ] + } + ] + }, + { + "description": "Mixed operation with snapshot", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + } + ] + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "After" + }, + "expectResult": { + "_id": 1, + "x": 12 + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 12 + } + ] + }, + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": 1 + } + } + ], + "session": "session0" + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + } + ] + }, + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 11 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "find": "collection0", + "readConcern": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": true + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": true + } + } + } + } + } + ] + } + ] + }, + { + "description": "Write commands with snapshot session do not affect snapshot reads", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {}, + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 22, + "x": 33 + } + } + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "find": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": true + } + } + } + } + } + ] + } + ] + }, + { + "description": "First snapshot read does not send atClusterTime", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {}, + "session": "session0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + }, + "commandName": "find", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "StartTransaction fails in snapshot session", + "operations": [ + { + "name": "startTransaction", + "object": "session0", + "expectError": { + "isError": true, + "isClientError": true, + "errorContains": "Transactions are not supported in snapshot sessions" + } + } + ] + } + ] +} diff --git a/test/sigstop_sigcont.py b/test/sigstop_sigcont.py new file mode 100644 index 0000000000..95a36ad7a2 --- /dev/null +++ b/test/sigstop_sigcont.py @@ -0,0 +1,94 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Used by test_client.TestClient.test_sigstop_sigcont.""" +from __future__ import annotations + +import logging +import os +import sys + +sys.path[0:0] = [""] + +from pymongo import monitoring +from pymongo.mongo_client import MongoClient +from pymongo.server_api import ServerApi + +SERVER_API = None +MONGODB_API_VERSION = os.environ.get("MONGODB_API_VERSION") +if MONGODB_API_VERSION: + SERVER_API = ServerApi(MONGODB_API_VERSION) + + +class HeartbeatLogger(monitoring.ServerHeartbeatListener): + """Log events until the listener is closed.""" + + def __init__(self): + self.closed = False + + def close(self): + self.closed = True + + def started(self, event: monitoring.ServerHeartbeatStartedEvent) -> None: + if self.closed: + return + logging.info("%s", event) + + def succeeded(self, event: monitoring.ServerHeartbeatSucceededEvent) -> None: + if self.closed: + return + logging.info("%s", event) + + def failed(self, event: monitoring.ServerHeartbeatFailedEvent) -> None: + if self.closed: + return + logging.warning("%s", event) + + +def main(uri: str) -> None: + heartbeat_logger = HeartbeatLogger() + client = MongoClient( + uri, + event_listeners=[heartbeat_logger], + heartbeatFrequencyMS=500, + connectTimeoutMS=500, + server_api=SERVER_API, + ) + client.admin.command("ping") + logging.info("TEST STARTED") + # test_sigstop_sigcont will SIGSTOP and SIGCONT this process in this loop. + while True: + try: + data = input('Type "q" to quit: ') + except EOFError: + break + if data == "q": + break + client.admin.command("ping") + logging.info("TEST COMPLETED") + heartbeat_logger.close() + client.close() + + +if __name__ == "__main__": + if len(sys.argv) != 2: + print("unknown or missing options") + print(f"usage: python3 {sys.argv[0]} 'mongodb://localhost'") + sys.exit(1) + + # Enable logs in this format: + # 2022-03-30 12:40:55,582 INFO + FORMAT = "%(asctime)s %(levelname)s %(message)s" + logging.basicConfig(format=FORMAT, level=logging.INFO) + main(sys.argv[1]) diff --git a/test/srv_seedlist/load-balanced/loadBalanced-directConnection.json b/test/srv_seedlist/load-balanced/loadBalanced-directConnection.json new file mode 100644 index 0000000000..3f500acdc6 --- /dev/null +++ b/test/srv_seedlist/load-balanced/loadBalanced-directConnection.json @@ -0,0 +1,14 @@ +{ + "uri": "mongodb+srv://test24.test.build.10gen.cc/?directConnection=false", + "seeds": [ + "localhost.test.build.10gen.cc:8000" + ], + "hosts": [ + "localhost.test.build.10gen.cc:8000" + ], + "options": { + "loadBalanced": true, + "ssl": true, + "directConnection": false + } +} diff --git a/test/srv_seedlist/load-balanced/loadBalanced-no-results.json b/test/srv_seedlist/load-balanced/loadBalanced-no-results.json new file mode 100644 index 0000000000..7f49416aa3 --- /dev/null +++ b/test/srv_seedlist/load-balanced/loadBalanced-no-results.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test4.test.build.10gen.cc/?loadBalanced=true", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because no SRV records are present for this URI." +} diff --git a/test/srv_seedlist/load-balanced/loadBalanced-replicaSet-errors.json b/test/srv_seedlist/load-balanced/loadBalanced-replicaSet-errors.json new file mode 100644 index 0000000000..2133dee532 --- /dev/null +++ b/test/srv_seedlist/load-balanced/loadBalanced-replicaSet-errors.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test24.test.build.10gen.cc/?replicaSet=replset", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because loadBalanced=true is incompatible with replicaSet" +} diff --git a/test/srv_seedlist/load-balanced/loadBalanced-true-multiple-hosts.json b/test/srv_seedlist/load-balanced/loadBalanced-true-multiple-hosts.json new file mode 100644 index 0000000000..f425c06b30 --- /dev/null +++ b/test/srv_seedlist/load-balanced/loadBalanced-true-multiple-hosts.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/?loadBalanced=true", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because loadBalanced is true but the SRV record resolves to multiple hosts" +} diff --git a/test/srv_seedlist/load-balanced/loadBalanced-true-txt.json b/test/srv_seedlist/load-balanced/loadBalanced-true-txt.json new file mode 100644 index 0000000000..f9719e760d --- /dev/null +++ b/test/srv_seedlist/load-balanced/loadBalanced-true-txt.json @@ -0,0 +1,13 @@ +{ + "uri": "mongodb+srv://test24.test.build.10gen.cc/", + "seeds": [ + "localhost.test.build.10gen.cc:8000" + ], + "hosts": [ + "localhost.test.build.10gen.cc:8000" + ], + "options": { + "loadBalanced": true, + "ssl": true + } +} diff --git a/test/srv_seedlist/load-balanced/srvMaxHosts-conflicts_with_loadBalanced-true-txt.json b/test/srv_seedlist/load-balanced/srvMaxHosts-conflicts_with_loadBalanced-true-txt.json new file mode 100644 index 0000000000..593a521c26 --- /dev/null +++ b/test/srv_seedlist/load-balanced/srvMaxHosts-conflicts_with_loadBalanced-true-txt.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test24.test.build.10gen.cc/?srvMaxHosts=1", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because positive integer for srvMaxHosts conflicts with loadBalanced=true (TXT)" +} diff --git a/test/srv_seedlist/load-balanced/srvMaxHosts-conflicts_with_loadBalanced-true.json b/test/srv_seedlist/load-balanced/srvMaxHosts-conflicts_with_loadBalanced-true.json new file mode 100644 index 0000000000..d03a174b1e --- /dev/null +++ b/test/srv_seedlist/load-balanced/srvMaxHosts-conflicts_with_loadBalanced-true.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test3.test.build.10gen.cc/?loadBalanced=true&srvMaxHosts=1", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because positive integer for srvMaxHosts conflicts with loadBalanced=true" +} diff --git a/test/srv_seedlist/load-balanced/srvMaxHosts-zero-txt.json b/test/srv_seedlist/load-balanced/srvMaxHosts-zero-txt.json new file mode 100644 index 0000000000..a18360ea64 --- /dev/null +++ b/test/srv_seedlist/load-balanced/srvMaxHosts-zero-txt.json @@ -0,0 +1,14 @@ +{ + "uri": "mongodb+srv://test24.test.build.10gen.cc/?srvMaxHosts=0", + "seeds": [ + "localhost.test.build.10gen.cc:8000" + ], + "hosts": [ + "localhost.test.build.10gen.cc:8000" + ], + "options": { + "loadBalanced": true, + "srvMaxHosts": 0, + "ssl": true + } +} diff --git a/test/srv_seedlist/load-balanced/srvMaxHosts-zero.json b/test/srv_seedlist/load-balanced/srvMaxHosts-zero.json new file mode 100644 index 0000000000..bd85418117 --- /dev/null +++ b/test/srv_seedlist/load-balanced/srvMaxHosts-zero.json @@ -0,0 +1,14 @@ +{ + "uri": "mongodb+srv://test23.test.build.10gen.cc/?loadBalanced=true&srvMaxHosts=0", + "seeds": [ + "localhost.test.build.10gen.cc:8000" + ], + "hosts": [ + "localhost.test.build.10gen.cc:8000" + ], + "options": { + "loadBalanced": true, + "srvMaxHosts": 0, + "ssl": true + } +} diff --git a/test/srv_seedlist/replica-set/dbname-with-commas-escaped.json b/test/srv_seedlist/replica-set/dbname-with-commas-escaped.json new file mode 100644 index 0000000000..b5fcfd2c07 --- /dev/null +++ b/test/srv_seedlist/replica-set/dbname-with-commas-escaped.json @@ -0,0 +1,19 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/some%2Cdb?replicaSet=repl0", + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "replicaSet": "repl0", + "ssl": true + }, + "parsed_options": { + "defaultDatabase": "some,db" + } +} diff --git a/test/srv_seedlist/replica-set/dbname-with-commas.json b/test/srv_seedlist/replica-set/dbname-with-commas.json new file mode 100644 index 0000000000..c1e85f4b99 --- /dev/null +++ b/test/srv_seedlist/replica-set/dbname-with-commas.json @@ -0,0 +1,19 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/some,db?replicaSet=repl0", + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "replicaSet": "repl0", + "ssl": true + }, + "parsed_options": { + "defaultDatabase": "some,db" + } +} diff --git a/test/srv_seedlist/replica-set/direct-connection-false.json b/test/srv_seedlist/replica-set/direct-connection-false.json new file mode 100644 index 0000000000..1d57bdcb3c --- /dev/null +++ b/test/srv_seedlist/replica-set/direct-connection-false.json @@ -0,0 +1,15 @@ +{ + "uri": "mongodb+srv://test3.test.build.10gen.cc/?directConnection=false", + "seeds": [ + "localhost.test.build.10gen.cc:27017" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "ssl": true, + "directConnection": false + } +} diff --git a/test/srv_seedlist/replica-set/direct-connection-true.json b/test/srv_seedlist/replica-set/direct-connection-true.json new file mode 100644 index 0000000000..ace6700106 --- /dev/null +++ b/test/srv_seedlist/replica-set/direct-connection-true.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test3.test.build.10gen.cc/?directConnection=true", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because directConnection=true is incompatible with SRV URIs." +} diff --git a/test/srv_seedlist/replica-set/encoded-userinfo-and-db.json b/test/srv_seedlist/replica-set/encoded-userinfo-and-db.json new file mode 100644 index 0000000000..70c6c23a39 --- /dev/null +++ b/test/srv_seedlist/replica-set/encoded-userinfo-and-db.json @@ -0,0 +1,21 @@ +{ + "uri": "mongodb+srv://b*b%40f3tt%3D:%244to%40L8%3DMC@test3.test.build.10gen.cc/mydb%3F?replicaSet=repl0", + "seeds": [ + "localhost.test.build.10gen.cc:27017" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "replicaSet": "repl0", + "ssl": true + }, + "parsed_options": { + "user": "b*b@f3tt=", + "password": "$4to@L8=MC", + "db": "mydb?" + }, + "comment": "Encoded user, pass, and DB parse correctly" +} diff --git a/test/srv_seedlist/replica-set/loadBalanced-false-txt.json b/test/srv_seedlist/replica-set/loadBalanced-false-txt.json new file mode 100644 index 0000000000..fd2e565c7b --- /dev/null +++ b/test/srv_seedlist/replica-set/loadBalanced-false-txt.json @@ -0,0 +1,15 @@ +{ + "uri": "mongodb+srv://test21.test.build.10gen.cc/", + "seeds": [ + "localhost.test.build.10gen.cc:27017" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "loadBalanced": false, + "ssl": true + } +} diff --git a/test/srv_seedlist/longer-parent-in-return.json b/test/srv_seedlist/replica-set/longer-parent-in-return.json similarity index 100% rename from test/srv_seedlist/longer-parent-in-return.json rename to test/srv_seedlist/replica-set/longer-parent-in-return.json diff --git a/test/srv_seedlist/misformatted-option.json b/test/srv_seedlist/replica-set/misformatted-option.json similarity index 100% rename from test/srv_seedlist/misformatted-option.json rename to test/srv_seedlist/replica-set/misformatted-option.json diff --git a/test/srv_seedlist/no-results.json b/test/srv_seedlist/replica-set/no-results.json similarity index 100% rename from test/srv_seedlist/no-results.json rename to test/srv_seedlist/replica-set/no-results.json diff --git a/test/srv_seedlist/not-enough-parts.json b/test/srv_seedlist/replica-set/not-enough-parts.json similarity index 100% rename from test/srv_seedlist/not-enough-parts.json rename to test/srv_seedlist/replica-set/not-enough-parts.json diff --git a/test/srv_seedlist/one-result-default-port.json b/test/srv_seedlist/replica-set/one-result-default-port.json similarity index 100% rename from test/srv_seedlist/one-result-default-port.json rename to test/srv_seedlist/replica-set/one-result-default-port.json diff --git a/test/srv_seedlist/one-txt-record-multiple-strings.json b/test/srv_seedlist/replica-set/one-txt-record-multiple-strings.json similarity index 100% rename from test/srv_seedlist/one-txt-record-multiple-strings.json rename to test/srv_seedlist/replica-set/one-txt-record-multiple-strings.json diff --git a/test/srv_seedlist/one-txt-record.json b/test/srv_seedlist/replica-set/one-txt-record.json similarity index 100% rename from test/srv_seedlist/one-txt-record.json rename to test/srv_seedlist/replica-set/one-txt-record.json diff --git a/test/srv_seedlist/parent-part-mismatch1.json b/test/srv_seedlist/replica-set/parent-part-mismatch1.json similarity index 100% rename from test/srv_seedlist/parent-part-mismatch1.json rename to test/srv_seedlist/replica-set/parent-part-mismatch1.json diff --git a/test/srv_seedlist/parent-part-mismatch2.json b/test/srv_seedlist/replica-set/parent-part-mismatch2.json similarity index 100% rename from test/srv_seedlist/parent-part-mismatch2.json rename to test/srv_seedlist/replica-set/parent-part-mismatch2.json diff --git a/test/srv_seedlist/parent-part-mismatch3.json b/test/srv_seedlist/replica-set/parent-part-mismatch3.json similarity index 100% rename from test/srv_seedlist/parent-part-mismatch3.json rename to test/srv_seedlist/replica-set/parent-part-mismatch3.json diff --git a/test/srv_seedlist/parent-part-mismatch4.json b/test/srv_seedlist/replica-set/parent-part-mismatch4.json similarity index 100% rename from test/srv_seedlist/parent-part-mismatch4.json rename to test/srv_seedlist/replica-set/parent-part-mismatch4.json diff --git a/test/srv_seedlist/parent-part-mismatch5.json b/test/srv_seedlist/replica-set/parent-part-mismatch5.json similarity index 100% rename from test/srv_seedlist/parent-part-mismatch5.json rename to test/srv_seedlist/replica-set/parent-part-mismatch5.json diff --git a/test/srv_seedlist/returned-parent-too-short.json b/test/srv_seedlist/replica-set/returned-parent-too-short.json similarity index 100% rename from test/srv_seedlist/returned-parent-too-short.json rename to test/srv_seedlist/replica-set/returned-parent-too-short.json diff --git a/test/srv_seedlist/returned-parent-wrong.json b/test/srv_seedlist/replica-set/returned-parent-wrong.json similarity index 100% rename from test/srv_seedlist/returned-parent-wrong.json rename to test/srv_seedlist/replica-set/returned-parent-wrong.json diff --git a/test/srv_seedlist/replica-set/srv-service-name.json b/test/srv_seedlist/replica-set/srv-service-name.json new file mode 100644 index 0000000000..ec36cdbb00 --- /dev/null +++ b/test/srv_seedlist/replica-set/srv-service-name.json @@ -0,0 +1,16 @@ +{ + "uri": "mongodb+srv://test22.test.build.10gen.cc/?srvServiceName=customname", + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "ssl": true, + "srvServiceName": "customname" + } +} diff --git a/test/srv_seedlist/replica-set/srvMaxHosts-conflicts_with_replicaSet-txt.json b/test/srv_seedlist/replica-set/srvMaxHosts-conflicts_with_replicaSet-txt.json new file mode 100644 index 0000000000..6de1e37fa5 --- /dev/null +++ b/test/srv_seedlist/replica-set/srvMaxHosts-conflicts_with_replicaSet-txt.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test5.test.build.10gen.cc/?srvMaxHosts=1", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because positive integer for srvMaxHosts conflicts with replicaSet option (TXT)" +} diff --git a/test/srv_seedlist/replica-set/srvMaxHosts-conflicts_with_replicaSet.json b/test/srv_seedlist/replica-set/srvMaxHosts-conflicts_with_replicaSet.json new file mode 100644 index 0000000000..f968757502 --- /dev/null +++ b/test/srv_seedlist/replica-set/srvMaxHosts-conflicts_with_replicaSet.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/?replicaSet=repl0&srvMaxHosts=1", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because positive integer for srvMaxHosts conflicts with replicaSet option" +} diff --git a/test/srv_seedlist/replica-set/srvMaxHosts-equal_to_srv_records.json b/test/srv_seedlist/replica-set/srvMaxHosts-equal_to_srv_records.json new file mode 100644 index 0000000000..d9765ac663 --- /dev/null +++ b/test/srv_seedlist/replica-set/srvMaxHosts-equal_to_srv_records.json @@ -0,0 +1,17 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=2", + "numSeeds": 2, + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "srvMaxHosts": 2, + "ssl": true + } +} diff --git a/test/srv_seedlist/replica-set/srvMaxHosts-greater_than_srv_records.json b/test/srv_seedlist/replica-set/srvMaxHosts-greater_than_srv_records.json new file mode 100644 index 0000000000..494bb87687 --- /dev/null +++ b/test/srv_seedlist/replica-set/srvMaxHosts-greater_than_srv_records.json @@ -0,0 +1,16 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=3", + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "srvMaxHosts": 3, + "ssl": true + } +} diff --git a/test/srv_seedlist/replica-set/srvMaxHosts-less_than_srv_records.json b/test/srv_seedlist/replica-set/srvMaxHosts-less_than_srv_records.json new file mode 100644 index 0000000000..66a5e90dad --- /dev/null +++ b/test/srv_seedlist/replica-set/srvMaxHosts-less_than_srv_records.json @@ -0,0 +1,13 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=1", + "numSeeds": 1, + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "srvMaxHosts": 1, + "ssl": true + } +} diff --git a/test/srv_seedlist/replica-set/srvMaxHosts-zero-txt.json b/test/srv_seedlist/replica-set/srvMaxHosts-zero-txt.json new file mode 100644 index 0000000000..241a901c64 --- /dev/null +++ b/test/srv_seedlist/replica-set/srvMaxHosts-zero-txt.json @@ -0,0 +1,17 @@ +{ + "uri": "mongodb+srv://test5.test.build.10gen.cc/?srvMaxHosts=0", + "seeds": [ + "localhost.test.build.10gen.cc:27017" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "authSource": "thisDB", + "replicaSet": "repl0", + "srvMaxHosts": 0, + "ssl": true + } +} diff --git a/test/srv_seedlist/replica-set/srvMaxHosts-zero.json b/test/srv_seedlist/replica-set/srvMaxHosts-zero.json new file mode 100644 index 0000000000..c68610a201 --- /dev/null +++ b/test/srv_seedlist/replica-set/srvMaxHosts-zero.json @@ -0,0 +1,17 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/?replicaSet=repl0&srvMaxHosts=0", + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "replicaSet": "repl0", + "srvMaxHosts": 0, + "ssl": true + } +} diff --git a/test/srv_seedlist/two-results-default-port.json b/test/srv_seedlist/replica-set/two-results-default-port.json similarity index 100% rename from test/srv_seedlist/two-results-default-port.json rename to test/srv_seedlist/replica-set/two-results-default-port.json diff --git a/test/srv_seedlist/two-results-nonstandard-port.json b/test/srv_seedlist/replica-set/two-results-nonstandard-port.json similarity index 100% rename from test/srv_seedlist/two-results-nonstandard-port.json rename to test/srv_seedlist/replica-set/two-results-nonstandard-port.json diff --git a/test/srv_seedlist/two-txt-records.json b/test/srv_seedlist/replica-set/two-txt-records.json similarity index 100% rename from test/srv_seedlist/two-txt-records.json rename to test/srv_seedlist/replica-set/two-txt-records.json diff --git a/test/srv_seedlist/replica-set/txt-record-not-allowed-option.json b/test/srv_seedlist/replica-set/txt-record-not-allowed-option.json new file mode 100644 index 0000000000..2a5cf2f007 --- /dev/null +++ b/test/srv_seedlist/replica-set/txt-record-not-allowed-option.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test10.test.build.10gen.cc/?replicaSet=repl0", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because socketTimeoutMS is not an allowed option." +} diff --git a/test/srv_seedlist/replica-set/txt-record-with-overridden-ssl-option.json b/test/srv_seedlist/replica-set/txt-record-with-overridden-ssl-option.json new file mode 100644 index 0000000000..0ebc737bd5 --- /dev/null +++ b/test/srv_seedlist/replica-set/txt-record-with-overridden-ssl-option.json @@ -0,0 +1,16 @@ +{ + "uri": "mongodb+srv://test5.test.build.10gen.cc/?ssl=false", + "seeds": [ + "localhost.test.build.10gen.cc:27017" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "replicaSet": "repl0", + "authSource": "thisDB", + "ssl": false + } +} diff --git a/test/srv_seedlist/txt-record-with-overridden-uri-option.json b/test/srv_seedlist/replica-set/txt-record-with-overridden-uri-option.json similarity index 100% rename from test/srv_seedlist/txt-record-with-overridden-uri-option.json rename to test/srv_seedlist/replica-set/txt-record-with-overridden-uri-option.json diff --git a/test/srv_seedlist/txt-record-with-unallowed-option.json b/test/srv_seedlist/replica-set/txt-record-with-unallowed-option.json similarity index 100% rename from test/srv_seedlist/txt-record-with-unallowed-option.json rename to test/srv_seedlist/replica-set/txt-record-with-unallowed-option.json diff --git a/test/srv_seedlist/replica-set/uri-with-admin-database.json b/test/srv_seedlist/replica-set/uri-with-admin-database.json new file mode 100644 index 0000000000..32710d75f7 --- /dev/null +++ b/test/srv_seedlist/replica-set/uri-with-admin-database.json @@ -0,0 +1,19 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/adminDB?replicaSet=repl0", + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "replicaSet": "repl0", + "ssl": true + }, + "parsed_options": { + "auth_database": "adminDB" + } +} diff --git a/test/srv_seedlist/replica-set/uri-with-auth.json b/test/srv_seedlist/replica-set/uri-with-auth.json new file mode 100644 index 0000000000..cc7257d85b --- /dev/null +++ b/test/srv_seedlist/replica-set/uri-with-auth.json @@ -0,0 +1,17 @@ +{ + "uri": "mongodb+srv://auser:apass@test1.test.build.10gen.cc/?replicaSet=repl0", + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "parsed_options": { + "user": "auser", + "password": "apass" + }, + "comment": "Should preserve auth credentials" +} diff --git a/test/srv_seedlist/uri-with-port.json b/test/srv_seedlist/replica-set/uri-with-port.json similarity index 100% rename from test/srv_seedlist/uri-with-port.json rename to test/srv_seedlist/replica-set/uri-with-port.json diff --git a/test/srv_seedlist/uri-with-two-hosts.json b/test/srv_seedlist/replica-set/uri-with-two-hosts.json similarity index 100% rename from test/srv_seedlist/uri-with-two-hosts.json rename to test/srv_seedlist/replica-set/uri-with-two-hosts.json diff --git a/test/srv_seedlist/sharded/srvMaxHosts-equal_to_srv_records.json b/test/srv_seedlist/sharded/srvMaxHosts-equal_to_srv_records.json new file mode 100644 index 0000000000..46390726f0 --- /dev/null +++ b/test/srv_seedlist/sharded/srvMaxHosts-equal_to_srv_records.json @@ -0,0 +1,16 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=2", + "numSeeds": 2, + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "options": { + "srvMaxHosts": 2, + "ssl": true + } +} diff --git a/test/srv_seedlist/sharded/srvMaxHosts-greater_than_srv_records.json b/test/srv_seedlist/sharded/srvMaxHosts-greater_than_srv_records.json new file mode 100644 index 0000000000..e02d72bf28 --- /dev/null +++ b/test/srv_seedlist/sharded/srvMaxHosts-greater_than_srv_records.json @@ -0,0 +1,15 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=3", + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "options": { + "srvMaxHosts": 3, + "ssl": true + } +} diff --git a/test/srv_seedlist/sharded/srvMaxHosts-less_than_srv_records.json b/test/srv_seedlist/sharded/srvMaxHosts-less_than_srv_records.json new file mode 100644 index 0000000000..fdcc1692c0 --- /dev/null +++ b/test/srv_seedlist/sharded/srvMaxHosts-less_than_srv_records.json @@ -0,0 +1,9 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=1", + "numSeeds": 1, + "numHosts": 1, + "options": { + "srvMaxHosts": 1, + "ssl": true + } +} diff --git a/test/srv_seedlist/sharded/srvMaxHosts-zero.json b/test/srv_seedlist/sharded/srvMaxHosts-zero.json new file mode 100644 index 0000000000..10ab9e656d --- /dev/null +++ b/test/srv_seedlist/sharded/srvMaxHosts-zero.json @@ -0,0 +1,15 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=0", + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "options": { + "srvMaxHosts": 0, + "ssl": true + } +} diff --git a/test/test_auth.py b/test/test_auth.py index 8e41e100fd..2240a4b5b9 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -13,155 +13,134 @@ # limitations under the License. """Authentication Tests.""" +from __future__ import annotations import os import sys import threading - -try: - from urllib.parse import quote_plus -except ImportError: - # Python 2 - from urllib import quote_plus +from urllib.parse import quote_plus sys.path[0:0] = [""] +from test import IntegrationTest, SkipTest, client_context, unittest +from test.utils import ( + AllowListEventListener, + delay, + ignore_deprecations, + rs_or_single_client, + rs_or_single_client_noauth, + single_client, + single_client_noauth, +) + from pymongo import MongoClient, monitoring from pymongo.auth import HAVE_KERBEROS, _build_credentials_tuple from pymongo.errors import OperationFailure +from pymongo.hello import HelloCompat from pymongo.read_preferences import ReadPreference from pymongo.saslprep import HAVE_STRINGPREP -from test import client_context, SkipTest, unittest, Version -from test.utils import (delay, - ignore_deprecations, - single_client, - rs_or_single_client, - rs_or_single_client_noauth, - single_client_noauth, - WhiteListEventListener) # YOU MUST RUN KINIT BEFORE RUNNING GSSAPI TESTS ON UNIX. -GSSAPI_HOST = os.environ.get('GSSAPI_HOST') -GSSAPI_PORT = int(os.environ.get('GSSAPI_PORT', '27017')) -GSSAPI_PRINCIPAL = os.environ.get('GSSAPI_PRINCIPAL') -GSSAPI_SERVICE_NAME = os.environ.get('GSSAPI_SERVICE_NAME', 'mongodb') -GSSAPI_CANONICALIZE = os.environ.get('GSSAPI_CANONICALIZE', 'false') -GSSAPI_SERVICE_REALM = os.environ.get('GSSAPI_SERVICE_REALM') -GSSAPI_PASS = os.environ.get('GSSAPI_PASS') -GSSAPI_DB = os.environ.get('GSSAPI_DB', 'test') - -SASL_HOST = os.environ.get('SASL_HOST') -SASL_PORT = int(os.environ.get('SASL_PORT', '27017')) -SASL_USER = os.environ.get('SASL_USER') -SASL_PASS = os.environ.get('SASL_PASS') -SASL_DB = os.environ.get('SASL_DB', '$external') +GSSAPI_HOST = os.environ.get("GSSAPI_HOST") +GSSAPI_PORT = int(os.environ.get("GSSAPI_PORT", "27017")) +GSSAPI_PRINCIPAL = os.environ.get("GSSAPI_PRINCIPAL") +GSSAPI_SERVICE_NAME = os.environ.get("GSSAPI_SERVICE_NAME", "mongodb") +GSSAPI_CANONICALIZE = os.environ.get("GSSAPI_CANONICALIZE", "false") +GSSAPI_SERVICE_REALM = os.environ.get("GSSAPI_SERVICE_REALM") +GSSAPI_PASS = os.environ.get("GSSAPI_PASS") +GSSAPI_DB = os.environ.get("GSSAPI_DB", "test") + +SASL_HOST = os.environ.get("SASL_HOST") +SASL_PORT = int(os.environ.get("SASL_PORT", "27017")) +SASL_USER = os.environ.get("SASL_USER") +SASL_PASS = os.environ.get("SASL_PASS") +SASL_DB = os.environ.get("SASL_DB", "$external") class AutoAuthenticateThread(threading.Thread): """Used in testing threaded authentication. This does collection.find_one() with a 1-second delay to ensure it must - check out and authenticate multiple sockets from the pool concurrently. + check out and authenticate multiple connections from the pool concurrently. :Parameters: `collection`: An auth-protected collection containing one document. """ def __init__(self, collection): - super(AutoAuthenticateThread, self).__init__() + super().__init__() self.collection = collection self.success = False def run(self): - assert self.collection.find_one({'$where': delay(1)}) is not None - self.success = True - - -class DBAuthenticateThread(threading.Thread): - """Used in testing threaded authentication. - - This does db.test.find_one() with a 1-second delay to ensure it must - check out and authenticate multiple sockets from the pool concurrently. - - :Parameters: - `db`: An auth-protected db with a 'test' collection containing one - document. - """ - - def __init__(self, db, username, password): - super(DBAuthenticateThread, self).__init__() - self.db = db - self.username = username - self.password = password - self.success = False - - def run(self): - self.db.authenticate(self.username, self.password) - assert self.db.test.find_one({'$where': delay(1)}) is not None + assert self.collection.find_one({"$where": delay(1)}) is not None self.success = True - class TestGSSAPI(unittest.TestCase): + mech_properties: str + service_realm_required: bool @classmethod def setUpClass(cls): if not HAVE_KERBEROS: - raise SkipTest('Kerberos module not available.') + raise SkipTest("Kerberos module not available.") if not GSSAPI_HOST or not GSSAPI_PRINCIPAL: - raise SkipTest( - 'Must set GSSAPI_HOST and GSSAPI_PRINCIPAL to test GSSAPI') + raise SkipTest("Must set GSSAPI_HOST and GSSAPI_PRINCIPAL to test GSSAPI") cls.service_realm_required = ( - GSSAPI_SERVICE_REALM is not None and - GSSAPI_SERVICE_REALM not in GSSAPI_PRINCIPAL) - mech_properties = 'SERVICE_NAME:%s' % (GSSAPI_SERVICE_NAME,) - mech_properties += ( - ',CANONICALIZE_HOST_NAME:%s' % (GSSAPI_CANONICALIZE,)) + GSSAPI_SERVICE_REALM is not None and GSSAPI_SERVICE_REALM not in GSSAPI_PRINCIPAL + ) + mech_properties = f"SERVICE_NAME:{GSSAPI_SERVICE_NAME}" + mech_properties += f",CANONICALIZE_HOST_NAME:{GSSAPI_CANONICALIZE}" if GSSAPI_SERVICE_REALM is not None: - mech_properties += ',SERVICE_REALM:%s' % (GSSAPI_SERVICE_REALM,) + mech_properties += f",SERVICE_REALM:{GSSAPI_SERVICE_REALM}" cls.mech_properties = mech_properties def test_credentials_hashing(self): # GSSAPI credentials are properly hashed. - creds0 = _build_credentials_tuple( - 'GSSAPI', None, 'user', 'pass', {}, None) + creds0 = _build_credentials_tuple("GSSAPI", None, "user", "pass", {}, None) creds1 = _build_credentials_tuple( - 'GSSAPI', None, 'user', 'pass', - {'authmechanismproperties': {'SERVICE_NAME': 'A'}}, None) + "GSSAPI", None, "user", "pass", {"authmechanismproperties": {"SERVICE_NAME": "A"}}, None + ) creds2 = _build_credentials_tuple( - 'GSSAPI', None, 'user', 'pass', - {'authmechanismproperties': {'SERVICE_NAME': 'A'}}, None) + "GSSAPI", None, "user", "pass", {"authmechanismproperties": {"SERVICE_NAME": "A"}}, None + ) creds3 = _build_credentials_tuple( - 'GSSAPI', None, 'user', 'pass', - {'authmechanismproperties': {'SERVICE_NAME': 'B'}}, None) + "GSSAPI", None, "user", "pass", {"authmechanismproperties": {"SERVICE_NAME": "B"}}, None + ) - self.assertEqual(1, len(set([creds1, creds2]))) - self.assertEqual(3, len(set([creds0, creds1, creds2, creds3]))) + self.assertEqual(1, len({creds1, creds2})) + self.assertEqual(3, len({creds0, creds1, creds2, creds3})) @ignore_deprecations def test_gssapi_simple(self): + assert GSSAPI_PRINCIPAL is not None if GSSAPI_PASS is not None: - uri = ('mongodb://%s:%s@%s:%d/?authMechanism=' - 'GSSAPI' % (quote_plus(GSSAPI_PRINCIPAL), - GSSAPI_PASS, - GSSAPI_HOST, - GSSAPI_PORT)) + uri = "mongodb://%s:%s@%s:%d/?authMechanism=GSSAPI" % ( + quote_plus(GSSAPI_PRINCIPAL), + GSSAPI_PASS, + GSSAPI_HOST, + GSSAPI_PORT, + ) else: - uri = ('mongodb://%s@%s:%d/?authMechanism=' - 'GSSAPI' % (quote_plus(GSSAPI_PRINCIPAL), - GSSAPI_HOST, - GSSAPI_PORT)) + uri = "mongodb://%s@%s:%d/?authMechanism=GSSAPI" % ( + quote_plus(GSSAPI_PRINCIPAL), + GSSAPI_HOST, + GSSAPI_PORT, + ) if not self.service_realm_required: # Without authMechanismProperties. - client = MongoClient(GSSAPI_HOST, - GSSAPI_PORT, - username=GSSAPI_PRINCIPAL, - password=GSSAPI_PASS, - authMechanism='GSSAPI') + client = MongoClient( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + ) client[GSSAPI_DB].collection.find_one() @@ -170,60 +149,68 @@ def test_gssapi_simple(self): client[GSSAPI_DB].collection.find_one() # Authenticate with authMechanismProperties. - client = MongoClient(GSSAPI_HOST, - GSSAPI_PORT, - username=GSSAPI_PRINCIPAL, - password=GSSAPI_PASS, - authMechanism='GSSAPI', - authMechanismProperties=self.mech_properties) + client = MongoClient( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + ) client[GSSAPI_DB].collection.find_one() # Log in using URI, with authMechanismProperties. - mech_uri = uri + '&authMechanismProperties=%s' % (self.mech_properties,) + mech_uri = uri + f"&authMechanismProperties={self.mech_properties}" client = MongoClient(mech_uri) client[GSSAPI_DB].collection.find_one() - set_name = client.admin.command('ismaster').get('setName') + set_name = client.admin.command(HelloCompat.LEGACY_CMD).get("setName") if set_name: if not self.service_realm_required: # Without authMechanismProperties - client = MongoClient(GSSAPI_HOST, - GSSAPI_PORT, - username=GSSAPI_PRINCIPAL, - password=GSSAPI_PASS, - authMechanism='GSSAPI', - replicaSet=set_name) + client = MongoClient( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + replicaSet=set_name, + ) client[GSSAPI_DB].list_collection_names() - uri = uri + '&replicaSet=%s' % (str(set_name),) + uri = uri + f"&replicaSet={set_name!s}" client = MongoClient(uri) client[GSSAPI_DB].list_collection_names() # With authMechanismProperties - client = MongoClient(GSSAPI_HOST, - GSSAPI_PORT, - username=GSSAPI_PRINCIPAL, - password=GSSAPI_PASS, - authMechanism='GSSAPI', - authMechanismProperties=self.mech_properties, - replicaSet=set_name) + client = MongoClient( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + replicaSet=set_name, + ) client[GSSAPI_DB].list_collection_names() - mech_uri = mech_uri + '&replicaSet=%s' % (str(set_name),) + mech_uri = mech_uri + f"&replicaSet={set_name!s}" client = MongoClient(mech_uri) client[GSSAPI_DB].list_collection_names() @ignore_deprecations def test_gssapi_threaded(self): - client = MongoClient(GSSAPI_HOST, - GSSAPI_PORT, - username=GSSAPI_PRINCIPAL, - password=GSSAPI_PASS, - authMechanism='GSSAPI', - authMechanismProperties=self.mech_properties) + client = MongoClient( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + ) # Authentication succeeded? client.server_info() @@ -231,13 +218,13 @@ def test_gssapi_threaded(self): # Need one document in the collection. AutoAuthenticateThread does # collection.find_one with a 1-second delay, forcing it to check out - # multiple sockets from the pool concurrently, proving that + # multiple connections from the pool concurrently, proving that # auto-authentication works with GSSAPI. collection = db.test if not collection.count_documents({}): try: collection.drop() - collection.insert_one({'_id': 1}) + collection.insert_one({"_id": 1}) except OperationFailure: raise SkipTest("User must be able to write.") @@ -250,15 +237,17 @@ def test_gssapi_threaded(self): thread.join() self.assertTrue(thread.success) - set_name = client.admin.command('ismaster').get('setName') + set_name = client.admin.command(HelloCompat.LEGACY_CMD).get("setName") if set_name: - client = MongoClient(GSSAPI_HOST, - GSSAPI_PORT, - username=GSSAPI_PRINCIPAL, - password=GSSAPI_PASS, - authMechanism='GSSAPI', - authMechanismProperties=self.mech_properties, - replicaSet=set_name) + client = MongoClient( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + replicaSet=set_name, + ) # Succeeded? client.server_info() @@ -274,314 +263,308 @@ def test_gssapi_threaded(self): class TestSASLPlain(unittest.TestCase): - @classmethod def setUpClass(cls): if not SASL_HOST or not SASL_USER or not SASL_PASS: - raise SkipTest('Must set SASL_HOST, ' - 'SASL_USER, and SASL_PASS to test SASL') + raise SkipTest("Must set SASL_HOST, SASL_USER, and SASL_PASS to test SASL") def test_sasl_plain(self): - client = MongoClient(SASL_HOST, - SASL_PORT, - username=SASL_USER, - password=SASL_PASS, - authSource=SASL_DB, - authMechanism='PLAIN') + client = MongoClient( + SASL_HOST, + SASL_PORT, + username=SASL_USER, + password=SASL_PASS, + authSource=SASL_DB, + authMechanism="PLAIN", + ) client.ldap.test.find_one() - uri = ('mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;' - 'authSource=%s' % (quote_plus(SASL_USER), - quote_plus(SASL_PASS), - SASL_HOST, SASL_PORT, SASL_DB)) + assert SASL_USER is not None + assert SASL_PASS is not None + uri = "mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;authSource=%s" % ( + quote_plus(SASL_USER), + quote_plus(SASL_PASS), + SASL_HOST, + SASL_PORT, + SASL_DB, + ) client = MongoClient(uri) client.ldap.test.find_one() - set_name = client.admin.command('ismaster').get('setName') + set_name = client.admin.command(HelloCompat.LEGACY_CMD).get("setName") if set_name: - client = MongoClient(SASL_HOST, - SASL_PORT, - replicaSet=set_name, - username=SASL_USER, - password=SASL_PASS, - authSource=SASL_DB, - authMechanism='PLAIN') + client = MongoClient( + SASL_HOST, + SASL_PORT, + replicaSet=set_name, + username=SASL_USER, + password=SASL_PASS, + authSource=SASL_DB, + authMechanism="PLAIN", + ) client.ldap.test.find_one() - uri = ('mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;' - 'authSource=%s;replicaSet=%s' % (quote_plus(SASL_USER), - quote_plus(SASL_PASS), - SASL_HOST, SASL_PORT, - SASL_DB, str(set_name))) + uri = "mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;authSource=%s;replicaSet=%s" % ( + quote_plus(SASL_USER), + quote_plus(SASL_PASS), + SASL_HOST, + SASL_PORT, + SASL_DB, + str(set_name), + ) client = MongoClient(uri) client.ldap.test.find_one() def test_sasl_plain_bad_credentials(self): - - with ignore_deprecations(): - client = MongoClient(SASL_HOST, SASL_PORT) - - # Bad username - self.assertRaises(OperationFailure, client.ldap.authenticate, - 'not-user', SASL_PASS, SASL_DB, 'PLAIN') - self.assertRaises(OperationFailure, client.ldap.test.find_one) - self.assertRaises(OperationFailure, client.ldap.test.insert_one, - {"failed": True}) - - # Bad password - self.assertRaises(OperationFailure, client.ldap.authenticate, - SASL_USER, 'not-pwd', SASL_DB, 'PLAIN') - self.assertRaises(OperationFailure, client.ldap.test.find_one) - self.assertRaises(OperationFailure, client.ldap.test.insert_one, - {"failed": True}) - def auth_string(user, password): - uri = ('mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;' - 'authSource=%s' % (quote_plus(user), - quote_plus(password), - SASL_HOST, SASL_PORT, SASL_DB)) + uri = "mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;authSource=%s" % ( + quote_plus(user), + quote_plus(password), + SASL_HOST, + SASL_PORT, + SASL_DB, + ) return uri - bad_user = MongoClient(auth_string('not-user', SASL_PASS)) - bad_pwd = MongoClient(auth_string(SASL_USER, 'not-pwd')) + bad_user = MongoClient(auth_string("not-user", SASL_PASS)) + bad_pwd = MongoClient(auth_string(SASL_USER, "not-pwd")) # OperationFailure raised upon connecting. - self.assertRaises(OperationFailure, bad_user.admin.command, 'ismaster') - self.assertRaises(OperationFailure, bad_pwd.admin.command, 'ismaster') + self.assertRaises(OperationFailure, bad_user.admin.command, "ping") + self.assertRaises(OperationFailure, bad_pwd.admin.command, "ping") -class TestSCRAMSHA1(unittest.TestCase): - +class TestSCRAMSHA1(IntegrationTest): @client_context.require_auth - @client_context.require_version_min(2, 7, 2) def setUp(self): - # Before 2.7.7, SCRAM-SHA-1 had to be enabled from the command line. - if client_context.version < Version(2, 7, 7): - cmd_line = client_context.cmd_line - if 'SCRAM-SHA-1' not in cmd_line.get( - 'parsed', {}).get('setParameter', - {}).get('authenticationMechanisms', ''): - raise SkipTest('SCRAM-SHA-1 mechanism not enabled') - - client_context.create_user( - 'pymongo_test', 'user', 'pass', roles=['userAdmin', 'readWrite']) + super().setUp() + client_context.create_user("pymongo_test", "user", "pass", roles=["userAdmin", "readWrite"]) def tearDown(self): - client_context.drop_user('pymongo_test', 'user') + client_context.drop_user("pymongo_test", "user") + super().tearDown() def test_scram_sha1(self): host, port = client_context.host, client_context.port - with ignore_deprecations(): - client = rs_or_single_client_noauth() - self.assertTrue(client.pymongo_test.authenticate( - 'user', 'pass', mechanism='SCRAM-SHA-1')) - client.pymongo_test.command('dbstats') - client = rs_or_single_client_noauth( - 'mongodb://user:pass@%s:%d/pymongo_test?authMechanism=SCRAM-SHA-1' - % (host, port)) - client.pymongo_test.command('dbstats') + "mongodb://user:pass@%s:%d/pymongo_test?authMechanism=SCRAM-SHA-1" % (host, port) + ) + client.pymongo_test.command("dbstats") if client_context.is_rs: - uri = ('mongodb://user:pass' - '@%s:%d/pymongo_test?authMechanism=SCRAM-SHA-1' - '&replicaSet=%s' % (host, port, - client_context.replica_set_name)) + uri = ( + "mongodb://user:pass" + "@%s:%d/pymongo_test?authMechanism=SCRAM-SHA-1" + "&replicaSet=%s" % (host, port, client_context.replica_set_name) + ) client = single_client_noauth(uri) - client.pymongo_test.command('dbstats') - db = client.get_database( - 'pymongo_test', read_preference=ReadPreference.SECONDARY) - db.command('dbstats') - + client.pymongo_test.command("dbstats") + db = client.get_database("pymongo_test", read_preference=ReadPreference.SECONDARY) + db.command("dbstats") -class TestSCRAM(unittest.TestCase): +# https://github.com/mongodb/specifications/blob/master/source/auth/auth.rst#scram-sha-256-and-mechanism-negotiation +class TestSCRAM(IntegrationTest): @client_context.require_auth @client_context.require_version_min(3, 7, 2) def setUp(self): + super().setUp() self._SENSITIVE_COMMANDS = monitoring._SENSITIVE_COMMANDS - monitoring._SENSITIVE_COMMANDS = set([]) - self.listener = WhiteListEventListener("saslStart") + monitoring._SENSITIVE_COMMANDS = set() + self.listener = AllowListEventListener("saslStart") def tearDown(self): monitoring._SENSITIVE_COMMANDS = self._SENSITIVE_COMMANDS client_context.client.testscram.command("dropAllUsersFromDatabase") client_context.client.drop_database("testscram") + super().tearDown() - @ignore_deprecations - def test_scram(self): - host, port = client_context.host, client_context.port - + def test_scram_skip_empty_exchange(self): + listener = AllowListEventListener("saslStart", "saslContinue") client_context.create_user( - 'testscram', - 'sha1', - 'pwd', - roles=['dbOwner'], - mechanisms=['SCRAM-SHA-1']) + "testscram", "sha256", "pwd", roles=["dbOwner"], mechanisms=["SCRAM-SHA-256"] + ) - client_context.create_user( - 'testscram', - 'sha256', - 'pwd', - roles=['dbOwner'], - mechanisms=['SCRAM-SHA-256']) + client = rs_or_single_client_noauth( + username="sha256", password="pwd", authSource="testscram", event_listeners=[listener] + ) + client.testscram.command("dbstats") + + if client_context.version < (4, 4, -1): + # Assert we sent the skipEmptyExchange option. + first_event = listener.started_events[0] + self.assertEqual(first_event.command_name, "saslStart") + self.assertEqual(first_event.command["options"], {"skipEmptyExchange": True}) + + # Assert the third exchange was skipped on servers that support it. + # Note that the first exchange occurs on the connection handshake. + started = listener.started_command_names() + if client_context.version.at_least(4, 4, -1): + self.assertEqual(started, ["saslContinue"]) + else: + self.assertEqual(started, ["saslStart", "saslContinue", "saslContinue"]) + def test_scram(self): + # Step 1: create users client_context.create_user( - 'testscram', - 'both', - 'pwd', - roles=['dbOwner'], - mechanisms=['SCRAM-SHA-1', 'SCRAM-SHA-256']) + "testscram", "sha1", "pwd", roles=["dbOwner"], mechanisms=["SCRAM-SHA-1"] + ) + client_context.create_user( + "testscram", "sha256", "pwd", roles=["dbOwner"], mechanisms=["SCRAM-SHA-256"] + ) + client_context.create_user( + "testscram", + "both", + "pwd", + roles=["dbOwner"], + mechanisms=["SCRAM-SHA-1", "SCRAM-SHA-256"], + ) + + # Step 2: verify auth success cases + client = rs_or_single_client_noauth(username="sha1", password="pwd", authSource="testscram") + client.testscram.command("dbstats") + + client = rs_or_single_client_noauth( + username="sha1", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-1" + ) + client.testscram.command("dbstats") + + client = rs_or_single_client_noauth( + username="sha256", password="pwd", authSource="testscram" + ) + client.testscram.command("dbstats") + + client = rs_or_single_client_noauth( + username="sha256", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-256" + ) + client.testscram.command("dbstats") + + # Step 2: SCRAM-SHA-1 and SCRAM-SHA-256 + client = rs_or_single_client_noauth( + username="both", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-1" + ) + client.testscram.command("dbstats") + client = rs_or_single_client_noauth( + username="both", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-256" + ) + client.testscram.command("dbstats") + self.listener.reset() client = rs_or_single_client_noauth( - event_listeners=[self.listener]) - self.assertTrue( - client.testscram.authenticate('sha1', 'pwd')) - client.testscram.command('dbstats') - client.testscram.logout() - self.assertTrue( - client.testscram.authenticate( - 'sha1', 'pwd', mechanism='SCRAM-SHA-1')) - client.testscram.command('dbstats') - client.testscram.logout() - self.assertRaises( - OperationFailure, - client.testscram.authenticate, - 'sha1', 'pwd', mechanism='SCRAM-SHA-256') - - self.assertTrue( - client.testscram.authenticate('sha256', 'pwd')) - client.testscram.command('dbstats') - client.testscram.logout() - self.assertTrue( - client.testscram.authenticate( - 'sha256', 'pwd', mechanism='SCRAM-SHA-256')) - client.testscram.command('dbstats') - client.testscram.logout() - self.assertRaises( - OperationFailure, - client.testscram.authenticate, - 'sha256', 'pwd', mechanism='SCRAM-SHA-1') - - self.listener.results.clear() - self.assertTrue( - client.testscram.authenticate('both', 'pwd')) - started = self.listener.results['started'][0] - self.assertEqual(started.command.get('mechanism'), 'SCRAM-SHA-256') - client.testscram.command('dbstats') - client.testscram.logout() - self.assertTrue( - client.testscram.authenticate( - 'both', 'pwd', mechanism='SCRAM-SHA-256')) - client.testscram.command('dbstats') - client.testscram.logout() - self.assertTrue( - client.testscram.authenticate( - 'both', 'pwd', mechanism='SCRAM-SHA-1')) - client.testscram.command('dbstats') - client.testscram.logout() - - self.assertRaises( - OperationFailure, - client.testscram.authenticate, - 'not-a-user', 'pwd') - - if HAVE_STRINGPREP: - # Test the use of SASLprep on passwords. For example, - # saslprep(u'\u2136') becomes u'IV' and saslprep(u'I\u00ADX') - # becomes u'IX'. SASLprep is only supported when the standard - # library provides stringprep. - client_context.create_user( - 'testscram', - u'\u2168', - u'\u2163', - roles=['dbOwner'], - mechanisms=['SCRAM-SHA-256']) - - client_context.create_user( - 'testscram', - u'IX', - u'IX', - roles=['dbOwner'], - mechanisms=['SCRAM-SHA-256']) - - self.assertTrue( - client.testscram.authenticate(u'\u2168', u'\u2163')) - client.testscram.command('dbstats') - client.testscram.logout() - self.assertTrue( - client.testscram.authenticate( - u'\u2168', u'\u2163', mechanism='SCRAM-SHA-256')) - client.testscram.command('dbstats') - client.testscram.logout() - self.assertTrue( - client.testscram.authenticate(u'\u2168', u'IV')) - client.testscram.command('dbstats') - client.testscram.logout() - - self.assertTrue( - client.testscram.authenticate(u'IX', u'I\u00ADX')) - client.testscram.command('dbstats') - client.testscram.logout() - self.assertTrue( - client.testscram.authenticate( - u'IX', u'I\u00ADX', mechanism='SCRAM-SHA-256')) - client.testscram.command('dbstats') - client.testscram.logout() - self.assertTrue( - client.testscram.authenticate(u'IX', u'IX')) - client.testscram.command('dbstats') - client.testscram.logout() - - client = rs_or_single_client_noauth( - u'mongodb://\u2168:\u2163@%s:%d/testscram' % (host, port)) - client.testscram.command('dbstats') - client = rs_or_single_client_noauth( - u'mongodb://\u2168:IV@%s:%d/testscram' % (host, port)) - client.testscram.command('dbstats') - - client = rs_or_single_client_noauth( - u'mongodb://IX:I\u00ADX@%s:%d/testscram' % (host, port)) - client.testscram.command('dbstats') - client = rs_or_single_client_noauth( - u'mongodb://IX:IX@%s:%d/testscram' % (host, port)) - client.testscram.command('dbstats') - - self.listener.results.clear() + username="both", password="pwd", authSource="testscram", event_listeners=[self.listener] + ) + client.testscram.command("dbstats") + if client_context.version.at_least(4, 4, -1): + # Speculative authentication in 4.4+ sends saslStart with the + # handshake. + self.assertEqual(self.listener.started_events, []) + else: + started = self.listener.started_events[0] + self.assertEqual(started.command.get("mechanism"), "SCRAM-SHA-256") + + # Step 3: verify auth failure conditions client = rs_or_single_client_noauth( - 'mongodb://both:pwd@%s:%d/testscram' % (host, port), - event_listeners=[self.listener]) - client.testscram.command('dbstats') - started = self.listener.results['started'][0] - self.assertEqual(started.command.get('mechanism'), 'SCRAM-SHA-256') + username="sha1", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-256" + ) + with self.assertRaises(OperationFailure): + client.testscram.command("dbstats") client = rs_or_single_client_noauth( - 'mongodb://both:pwd@%s:%d/testscram?authMechanism=SCRAM-SHA-1' - % (host, port)) - client.testscram.command('dbstats') + username="sha256", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-1" + ) + with self.assertRaises(OperationFailure): + client.testscram.command("dbstats") client = rs_or_single_client_noauth( - 'mongodb://both:pwd@%s:%d/testscram?authMechanism=SCRAM-SHA-256' - % (host, port)) - client.testscram.command('dbstats') + username="not-a-user", password="pwd", authSource="testscram" + ) + with self.assertRaises(OperationFailure): + client.testscram.command("dbstats") if client_context.is_rs: - uri = ('mongodb://both:pwd@%s:%d/testscram' - '?replicaSet=%s' % (host, port, - client_context.replica_set_name)) + host, port = client_context.host, client_context.port + uri = "mongodb://both:pwd@%s:%d/testscram?replicaSet=%s" % ( + host, + port, + client_context.replica_set_name, + ) client = single_client_noauth(uri) - client.testscram.command('dbstats') - db = client.get_database( - 'testscram', read_preference=ReadPreference.SECONDARY) - db.command('dbstats') + client.testscram.command("dbstats") + db = client.get_database("testscram", read_preference=ReadPreference.SECONDARY) + db.command("dbstats") + + @unittest.skipUnless(HAVE_STRINGPREP, "Cannot test without stringprep") + def test_scram_saslprep(self): + # Step 4: test SASLprep + host, port = client_context.host, client_context.port + # Test the use of SASLprep on passwords. For example, + # saslprep('\u2136') becomes 'IV' and saslprep('I\u00ADX') + # becomes 'IX'. SASLprep is only supported when the standard + # library provides stringprep. + client_context.create_user( + "testscram", "\u2168", "\u2163", roles=["dbOwner"], mechanisms=["SCRAM-SHA-256"] + ) + client_context.create_user( + "testscram", "IX", "IX", roles=["dbOwner"], mechanisms=["SCRAM-SHA-256"] + ) + + client = rs_or_single_client_noauth( + username="\u2168", password="\u2163", authSource="testscram" + ) + client.testscram.command("dbstats") + + client = rs_or_single_client_noauth( + username="\u2168", + password="\u2163", + authSource="testscram", + authMechanism="SCRAM-SHA-256", + ) + client.testscram.command("dbstats") + + client = rs_or_single_client_noauth( + username="\u2168", password="IV", authSource="testscram" + ) + client.testscram.command("dbstats") + + client = rs_or_single_client_noauth( + username="IX", password="I\u00ADX", authSource="testscram" + ) + client.testscram.command("dbstats") + + client = rs_or_single_client_noauth( + username="IX", + password="I\u00ADX", + authSource="testscram", + authMechanism="SCRAM-SHA-256", + ) + client.testscram.command("dbstats") + + client = rs_or_single_client_noauth( + username="IX", password="IX", authSource="testscram", authMechanism="SCRAM-SHA-256" + ) + client.testscram.command("dbstats") + + client = rs_or_single_client_noauth( + "mongodb://\u2168:\u2163@%s:%d/testscram" % (host, port) + ) + client.testscram.command("dbstats") + client = rs_or_single_client_noauth("mongodb://\u2168:IV@%s:%d/testscram" % (host, port)) + client.testscram.command("dbstats") + + client = rs_or_single_client_noauth("mongodb://IX:I\u00ADX@%s:%d/testscram" % (host, port)) + client.testscram.command("dbstats") + client = rs_or_single_client_noauth("mongodb://IX:IX@%s:%d/testscram" % (host, port)) + client.testscram.command("dbstats") def test_cache(self): client = single_client() + credentials = client.options.pool_options._credentials + cache = credentials.cache + self.assertIsNotNone(cache) + self.assertIsNone(cache.data) # Force authentication. - client.admin.command('ismaster') - all_credentials = client._MongoClient__all_credentials - credentials = all_credentials.get('admin') + client.admin.command("ping") cache = credentials.cache self.assertIsNotNone(cache) data = cache.data @@ -593,27 +576,15 @@ def test_cache(self): self.assertIsInstance(salt, bytes) self.assertIsInstance(iterations, int) - pool = next(iter(client._topology._servers.values()))._pool - with pool.get_socket(all_credentials) as sock_info: - authset = sock_info.authset - cached = set(all_credentials.values()) - self.assertEqual(len(cached), 1) - self.assertFalse(authset - cached) - self.assertFalse(cached - authset) - - sock_credentials = next(iter(authset)) - sock_cache = sock_credentials.cache - self.assertIsNotNone(sock_cache) - self.assertEqual(sock_cache.data, data) - def test_scram_threaded(self): - coll = client_context.client.db.test coll.drop() - coll.insert_one({'_id': 1}) + coll.insert_one({"_id": 1}) # The first thread to call find() will authenticate - coll = rs_or_single_client().db.test + client = rs_or_single_client() + self.addCleanup(client.close) + coll = client.db.test threads = [] for _ in range(4): threads.append(AutoAuthenticateThread(coll)) @@ -623,105 +594,70 @@ def test_scram_threaded(self): thread.join() self.assertTrue(thread.success) -class TestThreadedAuth(unittest.TestCase): - - @client_context.require_auth - def test_db_authenticate_threaded(self): - - db = client_context.client.db - coll = db.test - coll.drop() - coll.insert_one({'_id': 1}) - - client_context.create_user( - 'db', - 'user', - 'pass', - roles=['dbOwner']) - self.addCleanup(db.command, 'dropUser', 'user') - - db = rs_or_single_client_noauth().db - db.authenticate('user', 'pass') - # No error. - db.authenticate('user', 'pass') - - db = rs_or_single_client_noauth().db - threads = [] - for _ in range(4): - threads.append(DBAuthenticateThread(db, 'user', 'pass')) - for thread in threads: - thread.start() - for thread in threads: - thread.join() - self.assertTrue(thread.success) - - -class TestAuthURIOptions(unittest.TestCase): +class TestAuthURIOptions(IntegrationTest): @client_context.require_auth def setUp(self): - client_context.create_user('admin', 'admin', 'pass') - client_context.create_user( - 'pymongo_test', 'user', 'pass', ['userAdmin', 'readWrite']) - self.client = rs_or_single_client_noauth( - username='admin', password='pass') + super().setUp() + client_context.create_user("admin", "admin", "pass") + client_context.create_user("pymongo_test", "user", "pass", ["userAdmin", "readWrite"]) def tearDown(self): - client_context.drop_user('pymongo_test', 'user') - client_context.drop_user('admin', 'admin') + client_context.drop_user("pymongo_test", "user") + client_context.drop_user("admin", "admin") + super().tearDown() def test_uri_options(self): # Test default to admin host, port = client_context.host, client_context.port - client = rs_or_single_client_noauth( - 'mongodb://admin:pass@%s:%d' % (host, port)) - self.assertTrue(client.admin.command('dbstats')) + client = rs_or_single_client_noauth("mongodb://admin:pass@%s:%d" % (host, port)) + self.assertTrue(client.admin.command("dbstats")) if client_context.is_rs: - uri = ('mongodb://admin:pass@%s:%d/?replicaSet=%s' % ( - host, port, client_context.replica_set_name)) + uri = "mongodb://admin:pass@%s:%d/?replicaSet=%s" % ( + host, + port, + client_context.replica_set_name, + ) client = single_client_noauth(uri) - self.assertTrue(client.admin.command('dbstats')) - db = client.get_database( - 'admin', read_preference=ReadPreference.SECONDARY) - self.assertTrue(db.command('dbstats')) + self.assertTrue(client.admin.command("dbstats")) + db = client.get_database("admin", read_preference=ReadPreference.SECONDARY) + self.assertTrue(db.command("dbstats")) # Test explicit database - uri = 'mongodb://user:pass@%s:%d/pymongo_test' % (host, port) + uri = "mongodb://user:pass@%s:%d/pymongo_test" % (host, port) client = rs_or_single_client_noauth(uri) - self.assertRaises(OperationFailure, client.admin.command, 'dbstats') - self.assertTrue(client.pymongo_test.command('dbstats')) + self.assertRaises(OperationFailure, client.admin.command, "dbstats") + self.assertTrue(client.pymongo_test.command("dbstats")) if client_context.is_rs: - uri = ('mongodb://user:pass@%s:%d/pymongo_test?replicaSet=%s' % ( - host, port, client_context.replica_set_name)) + uri = "mongodb://user:pass@%s:%d/pymongo_test?replicaSet=%s" % ( + host, + port, + client_context.replica_set_name, + ) client = single_client_noauth(uri) - self.assertRaises(OperationFailure, - client.admin.command, 'dbstats') - self.assertTrue(client.pymongo_test.command('dbstats')) - db = client.get_database( - 'pymongo_test', read_preference=ReadPreference.SECONDARY) - self.assertTrue(db.command('dbstats')) + self.assertRaises(OperationFailure, client.admin.command, "dbstats") + self.assertTrue(client.pymongo_test.command("dbstats")) + db = client.get_database("pymongo_test", read_preference=ReadPreference.SECONDARY) + self.assertTrue(db.command("dbstats")) # Test authSource - uri = ('mongodb://user:pass@%s:%d' - '/pymongo_test2?authSource=pymongo_test' % (host, port)) + uri = "mongodb://user:pass@%s:%d/pymongo_test2?authSource=pymongo_test" % (host, port) client = rs_or_single_client_noauth(uri) - self.assertRaises(OperationFailure, - client.pymongo_test2.command, 'dbstats') - self.assertTrue(client.pymongo_test.command('dbstats')) + self.assertRaises(OperationFailure, client.pymongo_test2.command, "dbstats") + self.assertTrue(client.pymongo_test.command("dbstats")) if client_context.is_rs: - uri = ('mongodb://user:pass@%s:%d/pymongo_test2?replicaSet=' - '%s;authSource=pymongo_test' % ( - host, port, client_context.replica_set_name)) + uri = ( + "mongodb://user:pass@%s:%d/pymongo_test2?replicaSet=" + "%s;authSource=pymongo_test" % (host, port, client_context.replica_set_name) + ) client = single_client_noauth(uri) - self.assertRaises(OperationFailure, - client.pymongo_test2.command, 'dbstats') - self.assertTrue(client.pymongo_test.command('dbstats')) - db = client.get_database( - 'pymongo_test', read_preference=ReadPreference.SECONDARY) - self.assertTrue(db.command('dbstats')) + self.assertRaises(OperationFailure, client.pymongo_test2.command, "dbstats") + self.assertTrue(client.pymongo_test.command("dbstats")) + db = client.get_database("pymongo_test", read_preference=ReadPreference.SECONDARY) + self.assertTrue(db.command("dbstats")) if __name__ == "__main__": diff --git a/test/test_auth_spec.py b/test/test_auth_spec.py index 947bdbb987..4976a6dd49 100644 --- a/test/test_auth_spec.py +++ b/test/test_auth_spec.py @@ -13,6 +13,7 @@ # limitations under the License. """Run the auth spec tests.""" +from __future__ import annotations import glob import json @@ -21,12 +22,12 @@ sys.path[0:0] = [""] -from pymongo import MongoClient from test import unittest +from test.unified_format import generate_test_classes +from pymongo import MongoClient -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'auth') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "auth") class TestAuthSpec(unittest.TestCase): @@ -34,63 +35,88 @@ class TestAuthSpec(unittest.TestCase): def create_test(test_case): - def run_test(self): - uri = test_case['uri'] - valid = test_case['valid'] - auth = test_case['auth'] - options = test_case['options'] + uri = test_case["uri"] + valid = test_case["valid"] + credential = test_case.get("credential") if not valid: self.assertRaises(Exception, MongoClient, uri, connect=False) else: - client = MongoClient(uri, connect=False) - credentials = client._MongoClient__options.credentials - if auth is not None: - self.assertEqual(credentials.username, auth['username']) - self.assertEqual(credentials.password, auth['password']) - self.assertEqual(credentials.source, auth['db']) - if options is not None: - if 'authmechanism' in options: - self.assertEqual( - credentials.mechanism, options['authmechanism']) + props = {} + if credential: + props = credential["mechanism_properties"] or {} + if props.get("REQUEST_TOKEN_CALLBACK"): + props["request_token_callback"] = lambda x, y: 1 + del props["REQUEST_TOKEN_CALLBACK"] + client = MongoClient(uri, connect=False, authmechanismproperties=props) + credentials = client.options.pool_options._credentials + if credential is None: + self.assertIsNone(credentials) + else: + self.assertIsNotNone(credentials) + self.assertEqual(credentials.username, credential["username"]) + self.assertEqual(credentials.password, credential["password"]) + self.assertEqual(credentials.source, credential["source"]) + if credential["mechanism"] is not None: + self.assertEqual(credentials.mechanism, credential["mechanism"]) else: - self.assertEqual(credentials.mechanism, 'DEFAULT') - if 'authmechanismproperties' in options: - expected = options['authmechanismproperties'] + self.assertEqual(credentials.mechanism, "DEFAULT") + expected = credential["mechanism_properties"] + if expected is not None: actual = credentials.mechanism_properties - if 'SERVICE_NAME' in expected: - self.assertEqual( - actual.service_name, expected['SERVICE_NAME']) - if 'CANONICALIZE_HOST_NAME' in expected: - self.assertEqual( - actual.canonicalize_host_name, - expected['CANONICALIZE_HOST_NAME']) - if 'SERVICE_REALM' in expected: - self.assertEqual( - actual.service_realm, expected['SERVICE_REALM']) + for key, _val in expected.items(): + if "SERVICE_NAME" in expected: + self.assertEqual(actual.service_name, expected["SERVICE_NAME"]) + elif "CANONICALIZE_HOST_NAME" in expected: + self.assertEqual( + actual.canonicalize_host_name, expected["CANONICALIZE_HOST_NAME"] + ) + elif "SERVICE_REALM" in expected: + self.assertEqual(actual.service_realm, expected["SERVICE_REALM"]) + elif "AWS_SESSION_TOKEN" in expected: + self.assertEqual( + actual.aws_session_token, expected["AWS_SESSION_TOKEN"] + ) + elif "PROVIDER_NAME" in expected: + self.assertEqual(actual.provider_name, expected["PROVIDER_NAME"]) + elif "request_token_callback" in expected: + self.assertEqual( + actual.request_token_callback, expected["request_token_callback"] + ) + else: + self.fail(f"Unhandled property: {key}") + else: + if credential["mechanism"] == "MONGODB-AWS": + self.assertIsNone(credentials.mechanism_properties.aws_session_token) + else: + self.assertIsNone(credentials.mechanism_properties) return run_test def create_tests(): - for filename in glob.glob(os.path.join(_TEST_PATH, '*.json')): + for filename in glob.glob(os.path.join(_TEST_PATH, "legacy", "*.json")): test_suffix, _ = os.path.splitext(os.path.basename(filename)) with open(filename) as auth_tests: - test_cases = json.load(auth_tests)['tests'] + test_cases = json.load(auth_tests)["tests"] for test_case in test_cases: - if test_case.get('optional', False): + if test_case.get("optional", False): continue test_method = create_test(test_case) - name = str(test_case['description'].lower().replace(' ', '_')) - setattr( - TestAuthSpec, - 'test_%s_%s' % (test_suffix, name), - test_method) + name = str(test_case["description"].lower().replace(" ", "_")) + setattr(TestAuthSpec, f"test_{test_suffix}_{name}", test_method) create_tests() +globals().update( + generate_test_classes( + os.path.join(_TEST_PATH, "unified"), + module=__name__, + ) +) + if __name__ == "__main__": unittest.main() diff --git a/test/test_binary.py b/test/test_binary.py index 392cd97c84..fafb6da162 100644 --- a/test/test_binary.py +++ b/test/test_binary.py @@ -13,66 +13,71 @@ # limitations under the License. """Tests for the Binary wrapper.""" +from __future__ import annotations import array import base64 import copy +import mmap import pickle -import platform import sys import uuid sys.path[0:0] = [""] -import bson +from test import IntegrationTest, client_context, unittest +import bson from bson import decode, encode from bson.binary import * from bson.codec_options import CodecOptions -from bson.py3compat import PY3 from bson.son import SON +from pymongo.common import validate_uuid_representation from pymongo.mongo_client import MongoClient -from test import client_context, unittest -from test.utils import ignore_deprecations +from pymongo.write_concern import WriteConcern class TestBinary(unittest.TestCase): + csharp_data: bytes + java_data: bytes @classmethod def setUpClass(cls): # Generated by the Java driver from_java = ( - b'bAAAAAdfaWQAUCBQxkVm+XdxJ9tOBW5ld2d1aWQAEAAAAAMIQkfACFu' - b'Z/0RustLOU/G6Am5ld2d1aWRzdHJpbmcAJQAAAGZmOTk1YjA4LWMwND' - b'ctNDIwOC1iYWYxLTUzY2VkMmIyNmU0NAAAbAAAAAdfaWQAUCBQxkVm+' - b'XdxJ9tPBW5ld2d1aWQAEAAAAANgS/xhRXXv8kfIec+dYdyCAm5ld2d1' - b'aWRzdHJpbmcAJQAAAGYyZWY3NTQ1LTYxZmMtNGI2MC04MmRjLTYxOWR' - b'jZjc5Yzg0NwAAbAAAAAdfaWQAUCBQxkVm+XdxJ9tQBW5ld2d1aWQAEA' - b'AAAAPqREIbhZPUJOSdHCJIgaqNAm5ld2d1aWRzdHJpbmcAJQAAADI0Z' - b'DQ5Mzg1LTFiNDItNDRlYS04ZGFhLTgxNDgyMjFjOWRlNAAAbAAAAAdf' - b'aWQAUCBQxkVm+XdxJ9tRBW5ld2d1aWQAEAAAAANjQBn/aQuNfRyfNyx' - b'29COkAm5ld2d1aWRzdHJpbmcAJQAAADdkOGQwYjY5LWZmMTktNDA2My' - b'1hNDIzLWY0NzYyYzM3OWYxYwAAbAAAAAdfaWQAUCBQxkVm+XdxJ9tSB' - b'W5ld2d1aWQAEAAAAAMtSv/Et1cAQUFHUYevqxaLAm5ld2d1aWRzdHJp' - b'bmcAJQAAADQxMDA1N2I3LWM0ZmYtNGEyZC04YjE2LWFiYWY4NzUxNDc' - b'0MQAA') + b"bAAAAAdfaWQAUCBQxkVm+XdxJ9tOBW5ld2d1aWQAEAAAAAMIQkfACFu" + b"Z/0RustLOU/G6Am5ld2d1aWRzdHJpbmcAJQAAAGZmOTk1YjA4LWMwND" + b"ctNDIwOC1iYWYxLTUzY2VkMmIyNmU0NAAAbAAAAAdfaWQAUCBQxkVm+" + b"XdxJ9tPBW5ld2d1aWQAEAAAAANgS/xhRXXv8kfIec+dYdyCAm5ld2d1" + b"aWRzdHJpbmcAJQAAAGYyZWY3NTQ1LTYxZmMtNGI2MC04MmRjLTYxOWR" + b"jZjc5Yzg0NwAAbAAAAAdfaWQAUCBQxkVm+XdxJ9tQBW5ld2d1aWQAEA" + b"AAAAPqREIbhZPUJOSdHCJIgaqNAm5ld2d1aWRzdHJpbmcAJQAAADI0Z" + b"DQ5Mzg1LTFiNDItNDRlYS04ZGFhLTgxNDgyMjFjOWRlNAAAbAAAAAdf" + b"aWQAUCBQxkVm+XdxJ9tRBW5ld2d1aWQAEAAAAANjQBn/aQuNfRyfNyx" + b"29COkAm5ld2d1aWRzdHJpbmcAJQAAADdkOGQwYjY5LWZmMTktNDA2My" + b"1hNDIzLWY0NzYyYzM3OWYxYwAAbAAAAAdfaWQAUCBQxkVm+XdxJ9tSB" + b"W5ld2d1aWQAEAAAAAMtSv/Et1cAQUFHUYevqxaLAm5ld2d1aWRzdHJp" + b"bmcAJQAAADQxMDA1N2I3LWM0ZmYtNGEyZC04YjE2LWFiYWY4NzUxNDc" + b"0MQAA" + ) cls.java_data = base64.b64decode(from_java) # Generated by the .net driver from_csharp = ( - b'ZAAAABBfaWQAAAAAAAVuZXdndWlkABAAAAAD+MkoCd/Jy0iYJ7Vhl' - b'iF3BAJuZXdndWlkc3RyaW5nACUAAAAwOTI4YzlmOC1jOWRmLTQ4Y2' - b'ItOTgyNy1iNTYxOTYyMTc3MDQAAGQAAAAQX2lkAAEAAAAFbmV3Z3V' - b'pZAAQAAAAA9MD0oXQe6VOp7mK4jkttWUCbmV3Z3VpZHN0cmluZwAl' - b'AAAAODVkMjAzZDMtN2JkMC00ZWE1LWE3YjktOGFlMjM5MmRiNTY1A' - b'ABkAAAAEF9pZAACAAAABW5ld2d1aWQAEAAAAAPRmIO2auc/Tprq1Z' - b'oQ1oNYAm5ld2d1aWRzdHJpbmcAJQAAAGI2ODM5OGQxLWU3NmEtNGU' - b'zZi05YWVhLWQ1OWExMGQ2ODM1OAAAZAAAABBfaWQAAwAAAAVuZXdn' - b'dWlkABAAAAADISpriopuTEaXIa7arYOCFAJuZXdndWlkc3RyaW5nA' - b'CUAAAA4YTZiMmEyMS02ZThhLTQ2NGMtOTcyMS1hZWRhYWQ4MzgyMT' - b'QAAGQAAAAQX2lkAAQAAAAFbmV3Z3VpZAAQAAAAA98eg0CFpGlPihP' - b'MwOmYGOMCbmV3Z3VpZHN0cmluZwAlAAAANDA4MzFlZGYtYTQ4NS00' - b'ZjY5LThhMTMtY2NjMGU5OTgxOGUzAAA=') + b"ZAAAABBfaWQAAAAAAAVuZXdndWlkABAAAAAD+MkoCd/Jy0iYJ7Vhl" + b"iF3BAJuZXdndWlkc3RyaW5nACUAAAAwOTI4YzlmOC1jOWRmLTQ4Y2" + b"ItOTgyNy1iNTYxOTYyMTc3MDQAAGQAAAAQX2lkAAEAAAAFbmV3Z3V" + b"pZAAQAAAAA9MD0oXQe6VOp7mK4jkttWUCbmV3Z3VpZHN0cmluZwAl" + b"AAAAODVkMjAzZDMtN2JkMC00ZWE1LWE3YjktOGFlMjM5MmRiNTY1A" + b"ABkAAAAEF9pZAACAAAABW5ld2d1aWQAEAAAAAPRmIO2auc/Tprq1Z" + b"oQ1oNYAm5ld2d1aWRzdHJpbmcAJQAAAGI2ODM5OGQxLWU3NmEtNGU" + b"zZi05YWVhLWQ1OWExMGQ2ODM1OAAAZAAAABBfaWQAAwAAAAVuZXdn" + b"dWlkABAAAAADISpriopuTEaXIa7arYOCFAJuZXdndWlkc3RyaW5nA" + b"CUAAAA4YTZiMmEyMS02ZThhLTQ2NGMtOTcyMS1hZWRhYWQ4MzgyMT" + b"QAAGQAAAAQX2lkAAQAAAAFbmV3Z3VpZAAQAAAAA98eg0CFpGlPihP" + b"MwOmYGOMCbmV3Z3VpZHN0cmluZwAlAAAANDA4MzFlZGYtYTQ4NS00" + b"ZjY5LThhMTMtY2NjMGU5OTgxOGUzAAA=" + ) cls.csharp_data = base64.b64decode(from_csharp) def test_binary(self): @@ -93,10 +98,7 @@ def test_exceptions(self): self.assertRaises(ValueError, Binary, b"hello", 256) self.assertTrue(Binary(b"hello", 0)) self.assertTrue(Binary(b"hello", 255)) - if platform.python_implementation() != "Jython": - # Jython's memoryview accepts unicode strings... - # https://bugs.jython.org/issue2784 - self.assertRaises(TypeError, Binary, u"hello") + self.assertRaises(TypeError, Binary, "hello") def test_subtype(self): one = Binary(b"hello") @@ -121,20 +123,15 @@ def test_equality(self): def test_repr(self): one = Binary(b"hello world") - self.assertEqual(repr(one), - "Binary(%s, 0)" % (repr(b"hello world"),)) + self.assertEqual(repr(one), "Binary({}, 0)".format(repr(b"hello world"))) two = Binary(b"hello world", 2) - self.assertEqual(repr(two), - "Binary(%s, 2)" % (repr(b"hello world"),)) + self.assertEqual(repr(two), "Binary({}, 2)".format(repr(b"hello world"))) three = Binary(b"\x08\xFF") - self.assertEqual(repr(three), - "Binary(%s, 0)" % (repr(b"\x08\xFF"),)) + self.assertEqual(repr(three), "Binary({}, 0)".format(repr(b"\x08\xFF"))) four = Binary(b"\x08\xFF", 2) - self.assertEqual(repr(four), - "Binary(%s, 2)" % (repr(b"\x08\xFF"),)) + self.assertEqual(repr(four), "Binary({}, 2)".format(repr(b"\x08\xFF"))) five = Binary(b"test", 100) - self.assertEqual(repr(five), - "Binary(%s, 100)" % (repr(b"test"),)) + self.assertEqual(repr(five), "Binary({}, 100)".format(repr(b"test"))) def test_hash(self): one = Binary(b"hello world") @@ -144,202 +141,193 @@ def test_hash(self): self.assertEqual(hash(Binary(b"hello world", 42)), hash(two)) def test_uuid_subtype_4(self): - """uuid_representation should be ignored when decoding subtype 4.""" + """Only STANDARD should decode subtype 4 as native uuid.""" expected_uuid = uuid.uuid4() - doc = {"uuid": Binary(expected_uuid.bytes, 4)} + expected_bin = Binary(expected_uuid.bytes, 4) + doc = {"uuid": expected_bin} encoded = encode(doc) - for uuid_representation in ALL_UUID_REPRESENTATIONS: - options = CodecOptions(uuid_representation=uuid_representation) - self.assertEqual(expected_uuid, decode(encoded, options)["uuid"]) + for uuid_rep in ( + UuidRepresentation.PYTHON_LEGACY, + UuidRepresentation.JAVA_LEGACY, + UuidRepresentation.CSHARP_LEGACY, + ): + opts = CodecOptions(uuid_representation=uuid_rep) + self.assertEqual(expected_bin, decode(encoded, opts)["uuid"]) + opts = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + self.assertEqual(expected_uuid, decode(encoded, opts)["uuid"]) def test_legacy_java_uuid(self): # Test decoding data = self.java_data - docs = bson.decode_all(data, CodecOptions(SON, False, PYTHON_LEGACY)) + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, PYTHON_LEGACY)) for d in docs: - self.assertNotEqual(d['newguid'], uuid.UUID(d['newguidstring'])) + self.assertNotEqual(d["newguid"], uuid.UUID(d["newguidstring"])) - docs = bson.decode_all(data, CodecOptions(SON, False, STANDARD)) + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, STANDARD)) for d in docs: - self.assertNotEqual(d['newguid'], uuid.UUID(d['newguidstring'])) + self.assertNotEqual(d["newguid"], uuid.UUID(d["newguidstring"])) - docs = bson.decode_all(data, CodecOptions(SON, False, CSHARP_LEGACY)) + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, CSHARP_LEGACY)) for d in docs: - self.assertNotEqual(d['newguid'], uuid.UUID(d['newguidstring'])) + self.assertNotEqual(d["newguid"], uuid.UUID(d["newguidstring"])) - docs = bson.decode_all(data, CodecOptions(SON, False, JAVA_LEGACY)) + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, JAVA_LEGACY)) for d in docs: - self.assertEqual(d['newguid'], uuid.UUID(d['newguidstring'])) + self.assertEqual(d["newguid"], uuid.UUID(d["newguidstring"])) # Test encoding - encoded = b''.join([ - encode(doc, False, CodecOptions(uuid_representation=PYTHON_LEGACY)) - for doc in docs]) + encoded = b"".join( + [encode(doc, False, CodecOptions(uuid_representation=PYTHON_LEGACY)) for doc in docs] + ) self.assertNotEqual(data, encoded) - encoded = b''.join([ - encode(doc, False, CodecOptions(uuid_representation=STANDARD)) - for doc in docs]) + encoded = b"".join( + [encode(doc, False, CodecOptions(uuid_representation=STANDARD)) for doc in docs] + ) self.assertNotEqual(data, encoded) - encoded = b''.join([ - encode(doc, False, CodecOptions(uuid_representation=CSHARP_LEGACY)) - for doc in docs]) + encoded = b"".join( + [encode(doc, False, CodecOptions(uuid_representation=CSHARP_LEGACY)) for doc in docs] + ) self.assertNotEqual(data, encoded) - encoded = b''.join([ - encode(doc, False, CodecOptions(uuid_representation=JAVA_LEGACY)) - for doc in docs]) + encoded = b"".join( + [encode(doc, False, CodecOptions(uuid_representation=JAVA_LEGACY)) for doc in docs] + ) self.assertEqual(data, encoded) @client_context.require_connection def test_legacy_java_uuid_roundtrip(self): data = self.java_data - docs = bson.decode_all(data, CodecOptions(SON, False, JAVA_LEGACY)) + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, JAVA_LEGACY)) - client_context.client.pymongo_test.drop_collection('java_uuid') + client_context.client.pymongo_test.drop_collection("java_uuid") db = client_context.client.pymongo_test - coll = db.get_collection( - 'java_uuid', CodecOptions(uuid_representation=JAVA_LEGACY)) + coll = db.get_collection("java_uuid", CodecOptions(uuid_representation=JAVA_LEGACY)) coll.insert_many(docs) self.assertEqual(5, coll.count_documents({})) for d in coll.find(): - self.assertEqual(d['newguid'], uuid.UUID(d['newguidstring'])) + self.assertEqual(d["newguid"], uuid.UUID(d["newguidstring"])) - coll = db.get_collection( - 'java_uuid', CodecOptions(uuid_representation=PYTHON_LEGACY)) + coll = db.get_collection("java_uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) for d in coll.find(): - self.assertNotEqual(d['newguid'], d['newguidstring']) - client_context.client.pymongo_test.drop_collection('java_uuid') + self.assertNotEqual(d["newguid"], d["newguidstring"]) + client_context.client.pymongo_test.drop_collection("java_uuid") def test_legacy_csharp_uuid(self): data = self.csharp_data # Test decoding - docs = bson.decode_all(data, CodecOptions(SON, False, PYTHON_LEGACY)) + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, PYTHON_LEGACY)) for d in docs: - self.assertNotEqual(d['newguid'], uuid.UUID(d['newguidstring'])) + self.assertNotEqual(d["newguid"], uuid.UUID(d["newguidstring"])) - docs = bson.decode_all(data, CodecOptions(SON, False, STANDARD)) + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, STANDARD)) for d in docs: - self.assertNotEqual(d['newguid'], uuid.UUID(d['newguidstring'])) + self.assertNotEqual(d["newguid"], uuid.UUID(d["newguidstring"])) - docs = bson.decode_all(data, CodecOptions(SON, False, JAVA_LEGACY)) + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, JAVA_LEGACY)) for d in docs: - self.assertNotEqual(d['newguid'], uuid.UUID(d['newguidstring'])) + self.assertNotEqual(d["newguid"], uuid.UUID(d["newguidstring"])) - docs = bson.decode_all(data, CodecOptions(SON, False, CSHARP_LEGACY)) + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, CSHARP_LEGACY)) for d in docs: - self.assertEqual(d['newguid'], uuid.UUID(d['newguidstring'])) + self.assertEqual(d["newguid"], uuid.UUID(d["newguidstring"])) # Test encoding - encoded = b''.join([ - encode(doc, False, CodecOptions(uuid_representation=PYTHON_LEGACY)) - for doc in docs]) + encoded = b"".join( + [encode(doc, False, CodecOptions(uuid_representation=PYTHON_LEGACY)) for doc in docs] + ) self.assertNotEqual(data, encoded) - encoded = b''.join([ - encode(doc, False, CodecOptions(uuid_representation=STANDARD)) - for doc in docs]) + encoded = b"".join( + [encode(doc, False, CodecOptions(uuid_representation=STANDARD)) for doc in docs] + ) self.assertNotEqual(data, encoded) - encoded = b''.join([ - encode(doc, False, CodecOptions(uuid_representation=JAVA_LEGACY)) - for doc in docs]) + encoded = b"".join( + [encode(doc, False, CodecOptions(uuid_representation=JAVA_LEGACY)) for doc in docs] + ) self.assertNotEqual(data, encoded) - encoded = b''.join([ - encode(doc, False, CodecOptions(uuid_representation=CSHARP_LEGACY)) - for doc in docs]) + encoded = b"".join( + [encode(doc, False, CodecOptions(uuid_representation=CSHARP_LEGACY)) for doc in docs] + ) self.assertEqual(data, encoded) @client_context.require_connection def test_legacy_csharp_uuid_roundtrip(self): data = self.csharp_data - docs = bson.decode_all(data, CodecOptions(SON, False, CSHARP_LEGACY)) + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, CSHARP_LEGACY)) - client_context.client.pymongo_test.drop_collection('csharp_uuid') + client_context.client.pymongo_test.drop_collection("csharp_uuid") db = client_context.client.pymongo_test - coll = db.get_collection( - 'csharp_uuid', CodecOptions(uuid_representation=CSHARP_LEGACY)) + coll = db.get_collection("csharp_uuid", CodecOptions(uuid_representation=CSHARP_LEGACY)) coll.insert_many(docs) self.assertEqual(5, coll.count_documents({})) for d in coll.find(): - self.assertEqual(d['newguid'], uuid.UUID(d['newguidstring'])) + self.assertEqual(d["newguid"], uuid.UUID(d["newguidstring"])) - coll = db.get_collection( - 'csharp_uuid', CodecOptions(uuid_representation=PYTHON_LEGACY)) + coll = db.get_collection("csharp_uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) for d in coll.find(): - self.assertNotEqual(d['newguid'], d['newguidstring']) - client_context.client.pymongo_test.drop_collection('csharp_uuid') + self.assertNotEqual(d["newguid"], d["newguidstring"]) + client_context.client.pymongo_test.drop_collection("csharp_uuid") def test_uri_to_uuid(self): uri = "mongodb://foo/?uuidrepresentation=csharpLegacy" client = MongoClient(uri, connect=False) - self.assertEqual( - client.pymongo_test.test.codec_options.uuid_representation, - CSHARP_LEGACY) + self.assertEqual(client.pymongo_test.test.codec_options.uuid_representation, CSHARP_LEGACY) @client_context.require_connection - @ignore_deprecations def test_uuid_queries(self): - db = client_context.client.pymongo_test coll = db.test coll.drop() uu = uuid.uuid4() - coll.insert_one({'uuid': Binary(uu.bytes, 3)}) + coll.insert_one({"uuid": Binary(uu.bytes, 3)}) self.assertEqual(1, coll.count_documents({})) - # Test UUIDLegacy queries. - coll = db.get_collection("test", - CodecOptions(uuid_representation=STANDARD)) - self.assertEqual(0, coll.find({'uuid': uu}).count()) - cur = coll.find({'uuid': UUIDLegacy(uu)}) - self.assertEqual(1, cur.count()) - retrieved = next(cur) - self.assertEqual(uu, retrieved['uuid']) - # Test regular UUID queries (using subtype 4). - coll.insert_one({'uuid': uu}) + coll = db.get_collection( + "test", CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + ) + self.assertEqual(0, coll.count_documents({"uuid": uu})) + coll.insert_one({"uuid": uu}) self.assertEqual(2, coll.count_documents({})) - cur = coll.find({'uuid': uu}) - self.assertEqual(1, cur.count()) - retrieved = next(cur) - self.assertEqual(uu, retrieved['uuid']) + docs = list(coll.find({"uuid": uu})) + self.assertEqual(1, len(docs)) + self.assertEqual(uu, docs[0]["uuid"]) # Test both. - predicate = {'uuid': {'$in': [uu, UUIDLegacy(uu)]}} + uu_legacy = Binary.from_uuid(uu, UuidRepresentation.PYTHON_LEGACY) + predicate = {"uuid": {"$in": [uu, uu_legacy]}} self.assertEqual(2, coll.count_documents(predicate)) - cur = coll.find(predicate) - self.assertEqual(2, cur.count()) + docs = list(coll.find(predicate)) + self.assertEqual(2, len(docs)) coll.drop() def test_pickle(self): - b1 = Binary(b'123', 2) + b1 = Binary(b"123", 2) # For testing backwards compatibility with pre-2.4 pymongo - if PY3: - p = (b"\x80\x03cbson.binary\nBinary\nq\x00C\x03123q\x01\x85q" - b"\x02\x81q\x03}q\x04X\x10\x00\x00\x00_Binary__subtypeq" - b"\x05K\x02sb.") - else: - p = (b"ccopy_reg\n_reconstructor\np0\n(cbson.binary\nBinary\np1\nc" - b"__builtin__\nstr\np2\nS'123'\np3\ntp4\nRp5\n(dp6\nS'_Binary" - b"__subtype'\np7\nI2\nsb.") - - if not sys.version.startswith('3.0'): + p = ( + b"\x80\x03cbson.binary\nBinary\nq\x00C\x03123q\x01\x85q" + b"\x02\x81q\x03}q\x04X\x10\x00\x00\x00_Binary__subtypeq" + b"\x05K\x02sb." + ) + + if not sys.version.startswith("3.0"): self.assertEqual(b1, pickle.loads(p)) for proto in range(pickle.HIGHEST_PROTOCOL + 1): self.assertEqual(b1, pickle.loads(pickle.dumps(b1, proto))) uu = uuid.uuid4() - uul = UUIDLegacy(uu) + uul = Binary.from_uuid(uu, UuidRepresentation.PYTHON_LEGACY) self.assertEqual(uul, copy.copy(uul)) self.assertEqual(uul, copy.deepcopy(uul)) @@ -348,20 +336,216 @@ def test_pickle(self): self.assertEqual(uul, pickle.loads(pickle.dumps(uul, proto))) def test_buffer_protocol(self): - b0 = Binary(b'123', 2) - - self.assertEqual(b0, Binary(memoryview(b'123'), 2)) - self.assertEqual(b0, Binary(bytearray(b'123'), 2)) - # mmap.mmap and array.array only expose the - # buffer interface in python 3.x - if PY3: - # No mmap module in Jython - import mmap - with mmap.mmap(-1, len(b'123')) as mm: - mm.write(b'123') - mm.seek(0) - self.assertEqual(b0, Binary(mm, 2)) - self.assertEqual(b0, Binary(array.array('B', b'123'), 2)) + b0 = Binary(b"123", 2) + + self.assertEqual(b0, Binary(memoryview(b"123"), 2)) + self.assertEqual(b0, Binary(bytearray(b"123"), 2)) + with mmap.mmap(-1, len(b"123")) as mm: + mm.write(b"123") + mm.seek(0) + self.assertEqual(b0, Binary(mm, 2)) + self.assertEqual(b0, Binary(array.array("B", b"123"), 2)) + + +class TestUuidSpecExplicitCoding(unittest.TestCase): + uuid: uuid.UUID + + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.uuid = uuid.UUID("00112233445566778899AABBCCDDEEFF") + + @staticmethod + def _hex_to_bytes(hexstring): + return bytes.fromhex(hexstring) + + # Explicit encoding prose test #1 + def test_encoding_1(self): + obj = Binary.from_uuid(self.uuid) + expected_obj = Binary(self._hex_to_bytes("00112233445566778899AABBCCDDEEFF"), 4) + self.assertEqual(obj, expected_obj) + + def _test_encoding_w_uuid_rep(self, uuid_rep, expected_hexstring, expected_subtype): + obj = Binary.from_uuid(self.uuid, uuid_rep) + expected_obj = Binary(self._hex_to_bytes(expected_hexstring), expected_subtype) + self.assertEqual(obj, expected_obj) + + # Explicit encoding prose test #2 + def test_encoding_2(self): + self._test_encoding_w_uuid_rep( + UuidRepresentation.STANDARD, "00112233445566778899AABBCCDDEEFF", 4 + ) + + # Explicit encoding prose test #3 + def test_encoding_3(self): + self._test_encoding_w_uuid_rep( + UuidRepresentation.JAVA_LEGACY, "7766554433221100FFEEDDCCBBAA9988", 3 + ) + + # Explicit encoding prose test #4 + def test_encoding_4(self): + self._test_encoding_w_uuid_rep( + UuidRepresentation.CSHARP_LEGACY, "33221100554477668899AABBCCDDEEFF", 3 + ) + + # Explicit encoding prose test #5 + def test_encoding_5(self): + self._test_encoding_w_uuid_rep( + UuidRepresentation.PYTHON_LEGACY, "00112233445566778899AABBCCDDEEFF", 3 + ) + + # Explicit encoding prose test #6 + def test_encoding_6(self): + with self.assertRaises(ValueError): + Binary.from_uuid(self.uuid, UuidRepresentation.UNSPECIFIED) + + # Explicit decoding prose test #1 + def test_decoding_1(self): + obj = Binary(self._hex_to_bytes("00112233445566778899AABBCCDDEEFF"), 4) + + # Case i: + self.assertEqual(obj.as_uuid(), self.uuid) + # Case ii: + self.assertEqual(obj.as_uuid(UuidRepresentation.STANDARD), self.uuid) + # Cases iii-vi: + for uuid_rep in ( + UuidRepresentation.JAVA_LEGACY, + UuidRepresentation.CSHARP_LEGACY, + UuidRepresentation.PYTHON_LEGACY, + ): + with self.assertRaises(ValueError): + obj.as_uuid(uuid_rep) + + def _test_decoding_legacy(self, hexstring, uuid_rep): + obj = Binary(self._hex_to_bytes(hexstring), 3) + + # Case i: + with self.assertRaises(ValueError): + obj.as_uuid() + # Cases ii-iii: + for rep in (UuidRepresentation.STANDARD, UuidRepresentation.UNSPECIFIED): + with self.assertRaises(ValueError): + obj.as_uuid(rep) + # Case iv: + self.assertEqual(obj.as_uuid(uuid_rep), self.uuid) + + # Explicit decoding prose test #2 + def test_decoding_2(self): + self._test_decoding_legacy( + "7766554433221100FFEEDDCCBBAA9988", UuidRepresentation.JAVA_LEGACY + ) + + # Explicit decoding prose test #3 + def test_decoding_3(self): + self._test_decoding_legacy( + "33221100554477668899AABBCCDDEEFF", UuidRepresentation.CSHARP_LEGACY + ) + + # Explicit decoding prose test #4 + def test_decoding_4(self): + self._test_decoding_legacy( + "00112233445566778899AABBCCDDEEFF", UuidRepresentation.PYTHON_LEGACY + ) + + +class TestUuidSpecImplicitCoding(IntegrationTest): + uuid: uuid.UUID + + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.uuid = uuid.UUID("00112233445566778899AABBCCDDEEFF") + + @staticmethod + def _hex_to_bytes(hexstring): + return bytes.fromhex(hexstring) + + def _get_coll_w_uuid_rep(self, uuid_rep): + codec_options = self.client.codec_options.with_options( + uuid_representation=validate_uuid_representation(None, uuid_rep) + ) + coll = self.db.get_collection( + "pymongo_test", codec_options=codec_options, write_concern=WriteConcern("majority") + ) + return coll + + def _test_encoding(self, uuid_rep, expected_hexstring, expected_subtype): + coll = self._get_coll_w_uuid_rep(uuid_rep) + coll.delete_many({}) + coll.insert_one({"_id": self.uuid}) + self.assertTrue( + coll.find_one({"_id": Binary(self._hex_to_bytes(expected_hexstring), expected_subtype)}) + ) + + # Implicit encoding prose test #1 + def test_encoding_1(self): + self._test_encoding("javaLegacy", "7766554433221100FFEEDDCCBBAA9988", 3) + + # Implicit encoding prose test #2 + def test_encoding_2(self): + self._test_encoding("csharpLegacy", "33221100554477668899AABBCCDDEEFF", 3) + + # Implicit encoding prose test #3 + def test_encoding_3(self): + self._test_encoding("pythonLegacy", "00112233445566778899AABBCCDDEEFF", 3) + + # Implicit encoding prose test #4 + def test_encoding_4(self): + self._test_encoding("standard", "00112233445566778899AABBCCDDEEFF", 4) + + # Implicit encoding prose test #5 + def test_encoding_5(self): + with self.assertRaises(ValueError): + self._test_encoding("unspecifed", "dummy", -1) + + def _test_decoding( + self, + client_uuid_representation_string, + legacy_field_uuid_representation, + expected_standard_field_value, + expected_legacy_field_value, + ): + coll = self._get_coll_w_uuid_rep(client_uuid_representation_string) + coll.drop() + + standard_val = Binary.from_uuid(self.uuid, UuidRepresentation.STANDARD) + legacy_val = Binary.from_uuid(self.uuid, legacy_field_uuid_representation) + coll.insert_one({"standard": standard_val, "legacy": legacy_val}) + + doc = coll.find_one() + self.assertEqual(doc["standard"], expected_standard_field_value) + self.assertEqual(doc["legacy"], expected_legacy_field_value) + + # Implicit decoding prose test #1 + def test_decoding_1(self): + standard_binary = Binary.from_uuid(self.uuid, UuidRepresentation.STANDARD) + self._test_decoding( + "javaLegacy", UuidRepresentation.JAVA_LEGACY, standard_binary, self.uuid + ) + self._test_decoding( + "csharpLegacy", UuidRepresentation.CSHARP_LEGACY, standard_binary, self.uuid + ) + self._test_decoding( + "pythonLegacy", UuidRepresentation.PYTHON_LEGACY, standard_binary, self.uuid + ) + + # Implicit decoding pose test #2 + def test_decoding_2(self): + legacy_binary = Binary.from_uuid(self.uuid, UuidRepresentation.PYTHON_LEGACY) + self._test_decoding("standard", UuidRepresentation.PYTHON_LEGACY, self.uuid, legacy_binary) + + # Implicit decoding pose test #3 + def test_decoding_3(self): + expected_standard_value = Binary.from_uuid(self.uuid, UuidRepresentation.STANDARD) + for legacy_uuid_rep in ( + UuidRepresentation.PYTHON_LEGACY, + UuidRepresentation.CSHARP_LEGACY, + UuidRepresentation.JAVA_LEGACY, + ): + expected_legacy_value = Binary.from_uuid(self.uuid, legacy_uuid_rep) + self._test_decoding( + "unspecified", legacy_uuid_rep, expected_standard_value, expected_legacy_value + ) if __name__ == "__main__": diff --git a/test/test_bson.py b/test/test_bson.py index dd604c7389..749c63bdf3 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright 2009-present MongoDB, Inc. # @@ -15,49 +14,53 @@ # limitations under the License. """Test the bson module.""" +from __future__ import annotations +import array import collections import datetime +import mmap import os +import pickle import re import sys import tempfile import uuid +from collections import OrderedDict, abc +from io import BytesIO sys.path[0:0] = [""] +from test import qcheck, unittest +from test.utils import ExceptionCatchingThread + import bson -from bson import (BSON, - decode, - decode_all, - decode_file_iter, - decode_iter, - encode, - EPOCH_AWARE, - is_valid, - Regex) -from bson.binary import Binary, UUIDLegacy +from bson import ( + BSON, + EPOCH_AWARE, + DatetimeMS, + Regex, + _datetime_to_millis, + decode, + decode_all, + decode_file_iter, + decode_iter, + encode, + is_valid, +) +from bson.binary import Binary, UuidRepresentation from bson.code import Code -from bson.codec_options import CodecOptions +from bson.codec_options import CodecOptions, DatetimeConversion +from bson.datetime_ms import _DATETIME_ERROR_SUGGESTION +from bson.dbref import DBRef +from bson.errors import InvalidBSON, InvalidDocument from bson.int64 import Int64 +from bson.max_key import MaxKey +from bson.min_key import MinKey from bson.objectid import ObjectId -from bson.dbref import DBRef -from bson.py3compat import abc, iteritems, PY3, StringIO, text_type from bson.son import SON from bson.timestamp import Timestamp -from bson.errors import (InvalidBSON, - InvalidDocument, - InvalidStringData) -from bson.max_key import MaxKey -from bson.min_key import MinKey -from bson.tz_util import (FixedOffset, - utc) - -from test import qcheck, SkipTest, unittest -from test.utils import ExceptionCatchingThread - -if PY3: - long = int +from bson.tz_util import FixedOffset, utc class NotADict(abc.MutableMapping): @@ -94,7 +97,6 @@ def __repr__(self): class DSTAwareTimezone(datetime.tzinfo): - def __init__(self, offset, name, dst_start_month, dst_end_month): self.__offset = offset self.__dst_start_month = dst_start_month @@ -120,11 +122,9 @@ class TestBSON(unittest.TestCase): def assertInvalid(self, data): self.assertRaises(InvalidBSON, decode, data) - def check_encode_then_decode(self, doc_class=dict, decoder=decode, - encoder=encode): - + def check_encode_then_decode(self, doc_class=dict, decoder=decode, encoder=encode): # Work around http://bugs.jython.org/issue1728 - if sys.platform.startswith('java'): + if sys.platform.startswith("java"): doc_class = SON def helper(doc): @@ -132,24 +132,22 @@ def helper(doc): self.assertEqual(doc, decoder(encoder(doc))) helper({}) - helper({"test": u"hello"}) - self.assertTrue(isinstance(decoder(encoder( - {"hello": "world"}))["hello"], text_type)) + helper({"test": "hello"}) + self.assertTrue(isinstance(decoder(encoder({"hello": "world"}))["hello"], str)) helper({"mike": -10120}) helper({"long": Int64(10)}) helper({"really big long": 2147483648}) - helper({u"hello": 0.0013109}) + helper({"hello": 0.0013109}) helper({"something": True}) helper({"false": False}) - helper({"an array": [1, True, 3.8, u"world"]}) - helper({"an object": doc_class({"test": u"something"})}) + helper({"an array": [1, True, 3.8, "world"]}) + helper({"an object": doc_class({"test": "something"})}) helper({"a binary": Binary(b"test", 100)}) helper({"a binary": Binary(b"test", 128)}) helper({"a binary": Binary(b"test", 254)}) helper({"another binary": Binary(b"test", 2)}) - helper(SON([(u'test dst', datetime.datetime(1993, 4, 4, 2))])) - helper(SON([(u'test negative dst', - datetime.datetime(1, 1, 1, 1, 1, 1))])) + helper(SON([("test dst", datetime.datetime(1993, 4, 4, 2))])) + helper(SON([("test negative dst", datetime.datetime(1, 1, 1, 1, 1, 1))])) helper({"big float": float(10000000000)}) helper({"ref": DBRef("coll", 5)}) helper({"ref": DBRef("coll", 5, foo="bar", bar=4)}) @@ -159,14 +157,12 @@ def helper(doc): helper({"foo": MinKey()}) helper({"foo": MaxKey()}) helper({"$field": Code("function(){ return true; }")}) - helper({"$field": Code("return function(){ return x; }", scope={'x': False})}) + helper({"$field": Code("return function(){ return x; }", scope={"x": False})}) def encode_then_decode(doc): - return doc_class(doc) == decoder(encode(doc), CodecOptions( - document_class=doc_class)) + return doc_class(doc) == decoder(encode(doc), CodecOptions(document_class=doc_class)) - qcheck.check_unittest(self, encode_then_decode, - qcheck.gen_mongo_dict(3)) + qcheck.check_unittest(self, encode_then_decode, qcheck.gen_mongo_dict(3)) def test_encode_then_decode(self): self.check_encode_then_decode() @@ -176,22 +172,24 @@ def test_encode_then_decode_any_mapping(self): def test_encode_then_decode_legacy(self): self.check_encode_then_decode( - encoder=BSON.encode, - decoder=lambda *args: BSON(args[0]).decode(*args[1:])) + encoder=BSON.encode, decoder=lambda *args: BSON(args[0]).decode(*args[1:]) + ) def test_encode_then_decode_any_mapping_legacy(self): self.check_encode_then_decode( - doc_class=NotADict, encoder=BSON.encode, - decoder=lambda *args: BSON(args[0]).decode(*args[1:])) + doc_class=NotADict, + encoder=BSON.encode, + decoder=lambda *args: BSON(args[0]).decode(*args[1:]), + ) def test_encoding_defaultdict(self): - dct = collections.defaultdict(dict, [('foo', 'bar')]) + dct = collections.defaultdict(dict, [("foo", "bar")]) # type: ignore[arg-type] encode(dct) - self.assertEqual(dct, collections.defaultdict(dict, [('foo', 'bar')])) + self.assertEqual(dct, collections.defaultdict(dict, [("foo", "bar")])) def test_basic_validation(self): self.assertRaises(TypeError, is_valid, 100) - self.assertRaises(TypeError, is_valid, u"test") + self.assertRaises(TypeError, is_valid, "test") self.assertRaises(TypeError, is_valid, 10.4) self.assertInvalid(b"test") @@ -208,134 +206,140 @@ def test_basic_validation(self): self.assertInvalid(b"\x07\x00\x00\x00\x02a\x00\x78\x56\x34\x12") self.assertInvalid(b"\x09\x00\x00\x00\x10a\x00\x05\x00") self.assertInvalid(b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00") - self.assertInvalid(b"\x13\x00\x00\x00\x02foo\x00" - b"\x04\x00\x00\x00bar\x00\x00") - self.assertInvalid(b"\x18\x00\x00\x00\x03foo\x00\x0f\x00\x00" - b"\x00\x10bar\x00\xff\xff\xff\x7f\x00\x00") - self.assertInvalid(b"\x15\x00\x00\x00\x03foo\x00\x0c" - b"\x00\x00\x00\x08bar\x00\x01\x00\x00") - self.assertInvalid(b"\x1c\x00\x00\x00\x03foo\x00" - b"\x12\x00\x00\x00\x02bar\x00" - b"\x05\x00\x00\x00baz\x00\x00\x00") - self.assertInvalid(b"\x10\x00\x00\x00\x02a\x00" - b"\x04\x00\x00\x00abc\xff\x00") - - def test_bad_string_lengths(self): - self.assertInvalid( - b"\x0c\x00\x00\x00\x02\x00" - b"\x00\x00\x00\x00\x00\x00") + self.assertInvalid(b"\x13\x00\x00\x00\x02foo\x00\x04\x00\x00\x00bar\x00\x00") self.assertInvalid( - b"\x12\x00\x00\x00\x02\x00" - b"\xff\xff\xff\xfffoobar\x00\x00") - self.assertInvalid( - b"\x0c\x00\x00\x00\x0e\x00" - b"\x00\x00\x00\x00\x00\x00") + b"\x18\x00\x00\x00\x03foo\x00\x0f\x00\x00\x00\x10bar\x00\xff\xff\xff\x7f\x00\x00" + ) + self.assertInvalid(b"\x15\x00\x00\x00\x03foo\x00\x0c\x00\x00\x00\x08bar\x00\x01\x00\x00") self.assertInvalid( - b"\x12\x00\x00\x00\x0e\x00" - b"\xff\xff\xff\xfffoobar\x00\x00") + b"\x1c\x00\x00\x00\x03foo\x00" + b"\x12\x00\x00\x00\x02bar\x00" + b"\x05\x00\x00\x00baz\x00\x00\x00" + ) + self.assertInvalid(b"\x10\x00\x00\x00\x02a\x00\x04\x00\x00\x00abc\xff\x00") + + def test_bad_string_lengths(self): + self.assertInvalid(b"\x0c\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00") + self.assertInvalid(b"\x12\x00\x00\x00\x02\x00\xff\xff\xff\xfffoobar\x00\x00") + self.assertInvalid(b"\x0c\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00") + self.assertInvalid(b"\x12\x00\x00\x00\x0e\x00\xff\xff\xff\xfffoobar\x00\x00") self.assertInvalid( - b"\x18\x00\x00\x00\x0c\x00" - b"\x00\x00\x00\x00\x00RY\xb5j" - b"\xfa[\xd8A\xd6X]\x99\x00") + b"\x18\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00RY\xb5j\xfa[\xd8A\xd6X]\x99\x00" + ) self.assertInvalid( b"\x1e\x00\x00\x00\x0c\x00" b"\xff\xff\xff\xfffoobar\x00" - b"RY\xb5j\xfa[\xd8A\xd6X]\x99\x00") - self.assertInvalid( - b"\x0c\x00\x00\x00\r\x00" - b"\x00\x00\x00\x00\x00\x00") - self.assertInvalid( - b"\x0c\x00\x00\x00\r\x00" - b"\xff\xff\xff\xff\x00\x00") + b"RY\xb5j\xfa[\xd8A\xd6X]\x99\x00" + ) + self.assertInvalid(b"\x0c\x00\x00\x00\r\x00\x00\x00\x00\x00\x00\x00") + self.assertInvalid(b"\x0c\x00\x00\x00\r\x00\xff\xff\xff\xff\x00\x00") self.assertInvalid( b"\x1c\x00\x00\x00\x0f\x00" b"\x15\x00\x00\x00\x00\x00" b"\x00\x00\x00\x0c\x00\x00" b"\x00\x02\x00\x01\x00\x00" - b"\x00\x00\x00\x00") + b"\x00\x00\x00\x00" + ) self.assertInvalid( b"\x1c\x00\x00\x00\x0f\x00" b"\x15\x00\x00\x00\xff\xff" b"\xff\xff\x00\x0c\x00\x00" b"\x00\x02\x00\x01\x00\x00" - b"\x00\x00\x00\x00") + b"\x00\x00\x00\x00" + ) self.assertInvalid( b"\x1c\x00\x00\x00\x0f\x00" b"\x15\x00\x00\x00\x01\x00" b"\x00\x00\x00\x0c\x00\x00" b"\x00\x02\x00\x00\x00\x00" - b"\x00\x00\x00\x00") + b"\x00\x00\x00\x00" + ) self.assertInvalid( b"\x1c\x00\x00\x00\x0f\x00" b"\x15\x00\x00\x00\x01\x00" b"\x00\x00\x00\x0c\x00\x00" b"\x00\x02\x00\xff\xff\xff" - b"\xff\x00\x00\x00") + b"\xff\x00\x00\x00" + ) def test_random_data_is_not_bson(self): - qcheck.check_unittest(self, qcheck.isnt(is_valid), - qcheck.gen_string(qcheck.gen_range(0, 40))) + qcheck.check_unittest( + self, qcheck.isnt(is_valid), qcheck.gen_string(qcheck.gen_range(0, 40)) + ) def test_basic_decode(self): - self.assertEqual({"test": u"hello world"}, - decode(b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74\x00\x0C" - b"\x00\x00\x00\x68\x65\x6C\x6C\x6F\x20\x77\x6F" - b"\x72\x6C\x64\x00\x00")) - self.assertEqual([{"test": u"hello world"}, {}], - decode_all(b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" - b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" - b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" - b"\x05\x00\x00\x00\x00")) - self.assertEqual([{"test": u"hello world"}, {}], - list(decode_iter( - b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" - b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" - b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" - b"\x05\x00\x00\x00\x00"))) - self.assertEqual([{"test": u"hello world"}, {}], - list(decode_file_iter(StringIO( - b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" - b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" - b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" - b"\x05\x00\x00\x00\x00")))) + self.assertEqual( + {"test": "hello world"}, + decode( + b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74\x00\x0C" + b"\x00\x00\x00\x68\x65\x6C\x6C\x6F\x20\x77\x6F" + b"\x72\x6C\x64\x00\x00" + ), + ) + self.assertEqual( + [{"test": "hello world"}, {}], + decode_all( + b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" + b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" + b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" + b"\x05\x00\x00\x00\x00" + ), + ) + self.assertEqual( + [{"test": "hello world"}, {}], + list( + decode_iter( + b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" + b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" + b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" + b"\x05\x00\x00\x00\x00" + ) + ), + ) + self.assertEqual( + [{"test": "hello world"}, {}], + list( + decode_file_iter( + BytesIO( + b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" + b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" + b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" + b"\x05\x00\x00\x00\x00" + ) + ) + ), + ) def test_decode_all_buffer_protocol(self): - docs = [{'foo': 'bar'}, {}] - bs = b"".join(map(encode, docs)) + docs = [{"foo": "bar"}, {}] + bs = b"".join(map(encode, docs)) # type: ignore[arg-type] self.assertEqual(docs, decode_all(bytearray(bs))) self.assertEqual(docs, decode_all(memoryview(bs))) - self.assertEqual(docs, decode_all(memoryview(b'1' + bs + b'1')[1:-1])) - if PY3: - import array - import mmap - self.assertEqual(docs, decode_all(array.array('B', bs))) - with mmap.mmap(-1, len(bs)) as mm: - mm.write(bs) - mm.seek(0) - self.assertEqual(docs, decode_all(mm)) + self.assertEqual(docs, decode_all(memoryview(b"1" + bs + b"1")[1:-1])) + self.assertEqual(docs, decode_all(array.array("B", bs))) + with mmap.mmap(-1, len(bs)) as mm: + mm.write(bs) + mm.seek(0) + self.assertEqual(docs, decode_all(mm)) def test_decode_buffer_protocol(self): - doc = {'foo': 'bar'} + doc = {"foo": "bar"} bs = encode(doc) self.assertEqual(doc, decode(bs)) self.assertEqual(doc, decode(bytearray(bs))) self.assertEqual(doc, decode(memoryview(bs))) - self.assertEqual(doc, decode(memoryview(b'1' + bs + b'1')[1:-1])) - if PY3: - import array - import mmap - self.assertEqual(doc, decode(array.array('B', bs))) - with mmap.mmap(-1, len(bs)) as mm: - mm.write(bs) - mm.seek(0) - self.assertEqual(doc, decode(mm)) + self.assertEqual(doc, decode(memoryview(b"1" + bs + b"1")[1:-1])) + self.assertEqual(doc, decode(array.array("B", bs))) + with mmap.mmap(-1, len(bs)) as mm: + mm.write(bs) + mm.seek(0) + self.assertEqual(doc, decode(mm)) def test_invalid_decodes(self): # Invalid object size (not enough bytes in document for even # an object size of first object. # NOTE: decode_all and decode_iter don't care, not sure if they should? - self.assertRaises(InvalidBSON, list, - decode_file_iter(StringIO(b"\x1B"))) + self.assertRaises(InvalidBSON, list, decode_file_iter(BytesIO(b"\x1B"))) bad_bsons = [ # An object size that's too small to even include the object size, @@ -343,40 +347,54 @@ def test_invalid_decodes(self): b"\x01\x00\x00\x00\x00", # One object, but with object size listed smaller than it is in the # data. - (b"\x1A\x00\x00\x00\x0E\x74\x65\x73\x74" - b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" - b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" - b"\x05\x00\x00\x00\x00"), + ( + b"\x1A\x00\x00\x00\x0E\x74\x65\x73\x74" + b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" + b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" + b"\x05\x00\x00\x00\x00" + ), # One object, missing the EOO at the end. - (b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" - b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" - b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" - b"\x05\x00\x00\x00"), + ( + b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" + b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" + b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" + b"\x05\x00\x00\x00" + ), # One object, sized correctly, with a spot for an EOO, but the EOO # isn't 0x00. - (b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" - b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" - b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" - b"\x05\x00\x00\x00\xFF"), + ( + b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" + b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" + b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" + b"\x05\x00\x00\x00\xFF" + ), ] for i, data in enumerate(bad_bsons): - msg = "bad_bson[{}]".format(i) + msg = f"bad_bson[{i}]" with self.assertRaises(InvalidBSON, msg=msg): decode_all(data) with self.assertRaises(InvalidBSON, msg=msg): list(decode_iter(data)) with self.assertRaises(InvalidBSON, msg=msg): - list(decode_file_iter(StringIO(data))) + list(decode_file_iter(BytesIO(data))) with tempfile.TemporaryFile() as scratch: scratch.write(data) scratch.seek(0, os.SEEK_SET) with self.assertRaises(InvalidBSON, msg=msg): list(decode_file_iter(scratch)) + def test_invalid_field_name(self): + # Decode a truncated field + with self.assertRaises(InvalidBSON) as ctx: + decode(b"\x0b\x00\x00\x00\x02field\x00") + # Assert that the InvalidBSON error message is not empty. + self.assertTrue(str(ctx.exception)) + def test_data_timestamp(self): - self.assertEqual({"test": Timestamp(4, 20)}, - decode(b"\x13\x00\x00\x00\x11\x74\x65\x73\x74\x00\x14" - b"\x00\x00\x00\x04\x00\x00\x00\x00")) + self.assertEqual( + {"test": Timestamp(4, 20)}, + decode(b"\x13\x00\x00\x00\x11\x74\x65\x73\x74\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00"), + ) def test_basic_encode(self): self.assertRaises(TypeError, encode, 100) @@ -386,83 +404,102 @@ def test_basic_encode(self): self.assertEqual(encode({}), BSON(b"\x05\x00\x00\x00\x00")) self.assertEqual(encode({}), b"\x05\x00\x00\x00\x00") - self.assertEqual(encode({"test": u"hello world"}), - b"\x1B\x00\x00\x00\x02\x74\x65\x73\x74\x00\x0C\x00" - b"\x00\x00\x68\x65\x6C\x6C\x6F\x20\x77\x6F\x72\x6C" - b"\x64\x00\x00") - self.assertEqual(encode({u"mike": 100}), - b"\x0F\x00\x00\x00\x10\x6D\x69\x6B\x65\x00\x64\x00" - b"\x00\x00\x00") - self.assertEqual(encode({"hello": 1.5}), - b"\x14\x00\x00\x00\x01\x68\x65\x6C\x6C\x6F\x00\x00" - b"\x00\x00\x00\x00\x00\xF8\x3F\x00") - self.assertEqual(encode({"true": True}), - b"\x0C\x00\x00\x00\x08\x74\x72\x75\x65\x00\x01\x00") - self.assertEqual(encode({"false": False}), - b"\x0D\x00\x00\x00\x08\x66\x61\x6C\x73\x65\x00\x00" - b"\x00") - self.assertEqual(encode({"empty": []}), - b"\x11\x00\x00\x00\x04\x65\x6D\x70\x74\x79\x00\x05" - b"\x00\x00\x00\x00\x00") - self.assertEqual(encode({"none": {}}), - b"\x10\x00\x00\x00\x03\x6E\x6F\x6E\x65\x00\x05\x00" - b"\x00\x00\x00\x00") - self.assertEqual(encode({"test": Binary(b"test", 0)}), - b"\x14\x00\x00\x00\x05\x74\x65\x73\x74\x00\x04\x00" - b"\x00\x00\x00\x74\x65\x73\x74\x00") - self.assertEqual(encode({"test": Binary(b"test", 2)}), - b"\x18\x00\x00\x00\x05\x74\x65\x73\x74\x00\x08\x00" - b"\x00\x00\x02\x04\x00\x00\x00\x74\x65\x73\x74\x00") - self.assertEqual(encode({"test": Binary(b"test", 128)}), - b"\x14\x00\x00\x00\x05\x74\x65\x73\x74\x00\x04\x00" - b"\x00\x00\x80\x74\x65\x73\x74\x00") - self.assertEqual(encode({"test": None}), - b"\x0B\x00\x00\x00\x0A\x74\x65\x73\x74\x00\x00") - self.assertEqual(encode({"date": datetime.datetime(2007, 1, 8, - 0, 30, 11)}), - b"\x13\x00\x00\x00\x09\x64\x61\x74\x65\x00\x38\xBE" - b"\x1C\xFF\x0F\x01\x00\x00\x00") - self.assertEqual(encode({"regex": re.compile(b"a*b", - re.IGNORECASE)}), - b"\x12\x00\x00\x00\x0B\x72\x65\x67\x65\x78\x00\x61" - b"\x2A\x62\x00\x69\x00\x00") - self.assertEqual(encode({"$where": Code("test")}), - b"\x16\x00\x00\x00\r$where\x00\x05\x00\x00\x00test" - b"\x00\x00") - self.assertEqual(encode({"$field": - Code("function(){ return true;}", scope=None)}), - b"+\x00\x00\x00\r$field\x00\x1a\x00\x00\x00" - b"function(){ return true;}\x00\x00") - self.assertEqual(encode({"$field": - Code("return function(){ return x; }", - scope={'x': False})}), - b"=\x00\x00\x00\x0f$field\x000\x00\x00\x00\x1f\x00" - b"\x00\x00return function(){ return x; }\x00\t\x00" - b"\x00\x00\x08x\x00\x00\x00\x00") - unicode_empty_scope = Code(u"function(){ return 'héllo';}", {}) - self.assertEqual(encode({'$field': unicode_empty_scope}), - b"8\x00\x00\x00\x0f$field\x00+\x00\x00\x00\x1e\x00" - b"\x00\x00function(){ return 'h\xc3\xa9llo';}\x00\x05" - b"\x00\x00\x00\x00\x00") + self.assertEqual( + encode({"test": "hello world"}), + b"\x1B\x00\x00\x00\x02\x74\x65\x73\x74\x00\x0C\x00" + b"\x00\x00\x68\x65\x6C\x6C\x6F\x20\x77\x6F\x72\x6C" + b"\x64\x00\x00", + ) + self.assertEqual( + encode({"mike": 100}), + b"\x0F\x00\x00\x00\x10\x6D\x69\x6B\x65\x00\x64\x00\x00\x00\x00", + ) + self.assertEqual( + encode({"hello": 1.5}), + b"\x14\x00\x00\x00\x01\x68\x65\x6C\x6C\x6F\x00\x00\x00\x00\x00\x00\x00\xF8\x3F\x00", + ) + self.assertEqual( + encode({"true": True}), b"\x0C\x00\x00\x00\x08\x74\x72\x75\x65\x00\x01\x00" + ) + self.assertEqual( + encode({"false": False}), b"\x0D\x00\x00\x00\x08\x66\x61\x6C\x73\x65\x00\x00\x00" + ) + self.assertEqual( + encode({"empty": []}), + b"\x11\x00\x00\x00\x04\x65\x6D\x70\x74\x79\x00\x05\x00\x00\x00\x00\x00", + ) + self.assertEqual( + encode({"none": {}}), + b"\x10\x00\x00\x00\x03\x6E\x6F\x6E\x65\x00\x05\x00\x00\x00\x00\x00", + ) + self.assertEqual( + encode({"test": Binary(b"test", 0)}), + b"\x14\x00\x00\x00\x05\x74\x65\x73\x74\x00\x04\x00\x00\x00\x00\x74\x65\x73\x74\x00", + ) + self.assertEqual( + encode({"test": Binary(b"test", 2)}), + b"\x18\x00\x00\x00\x05\x74\x65\x73\x74\x00\x08\x00" + b"\x00\x00\x02\x04\x00\x00\x00\x74\x65\x73\x74\x00", + ) + self.assertEqual( + encode({"test": Binary(b"test", 128)}), + b"\x14\x00\x00\x00\x05\x74\x65\x73\x74\x00\x04\x00\x00\x00\x80\x74\x65\x73\x74\x00", + ) + self.assertEqual(encode({"test": None}), b"\x0B\x00\x00\x00\x0A\x74\x65\x73\x74\x00\x00") + self.assertEqual( + encode({"date": datetime.datetime(2007, 1, 8, 0, 30, 11)}), + b"\x13\x00\x00\x00\x09\x64\x61\x74\x65\x00\x38\xBE\x1C\xFF\x0F\x01\x00\x00\x00", + ) + self.assertEqual( + encode({"regex": re.compile(b"a*b", re.IGNORECASE)}), + b"\x12\x00\x00\x00\x0B\x72\x65\x67\x65\x78\x00\x61\x2A\x62\x00\x69\x00\x00", + ) + self.assertEqual( + encode({"$where": Code("test")}), + b"\x16\x00\x00\x00\r$where\x00\x05\x00\x00\x00test\x00\x00", + ) + self.assertEqual( + encode({"$field": Code("function(){ return true;}", scope=None)}), + b"+\x00\x00\x00\r$field\x00\x1a\x00\x00\x00function(){ return true;}\x00\x00", + ) + self.assertEqual( + encode({"$field": Code("return function(){ return x; }", scope={"x": False})}), + b"=\x00\x00\x00\x0f$field\x000\x00\x00\x00\x1f\x00" + b"\x00\x00return function(){ return x; }\x00\t\x00" + b"\x00\x00\x08x\x00\x00\x00\x00", + ) + unicode_empty_scope = Code("function(){ return 'héllo';}", {}) + self.assertEqual( + encode({"$field": unicode_empty_scope}), + b"8\x00\x00\x00\x0f$field\x00+\x00\x00\x00\x1e\x00" + b"\x00\x00function(){ return 'h\xc3\xa9llo';}\x00\x05" + b"\x00\x00\x00\x00\x00", + ) a = ObjectId(b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B") - self.assertEqual(encode({"oid": a}), - b"\x16\x00\x00\x00\x07\x6F\x69\x64\x00\x00\x01\x02" - b"\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x00") - self.assertEqual(encode({"ref": DBRef("coll", a)}), - b"\x2F\x00\x00\x00\x03ref\x00\x25\x00\x00\x00\x02" - b"$ref\x00\x05\x00\x00\x00coll\x00\x07$id\x00\x00" - b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x00" - b"\x00") + self.assertEqual( + encode({"oid": a}), + b"\x16\x00\x00\x00\x07\x6F\x69\x64\x00\x00\x01\x02" + b"\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x00", + ) + self.assertEqual( + encode({"ref": DBRef("coll", a)}), + b"\x2F\x00\x00\x00\x03ref\x00\x25\x00\x00\x00\x02" + b"$ref\x00\x05\x00\x00\x00coll\x00\x07$id\x00\x00" + b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x00" + b"\x00", + ) def test_unknown_type(self): # Repr value differs with major python version - part = "type %r for fieldname 'foo'" % (b'\x14',) + part = "type {!r} for fieldname 'foo'".format(b"\x14") docs = [ - b'\x0e\x00\x00\x00\x14foo\x00\x01\x00\x00\x00\x00', - (b'\x16\x00\x00\x00\x04foo\x00\x0c\x00\x00\x00\x140' - b'\x00\x01\x00\x00\x00\x00\x00'), - (b' \x00\x00\x00\x04bar\x00\x16\x00\x00\x00\x030\x00\x0e\x00\x00' - b'\x00\x14foo\x00\x01\x00\x00\x00\x00\x00\x00')] + b"\x0e\x00\x00\x00\x14foo\x00\x01\x00\x00\x00\x00", + (b"\x16\x00\x00\x00\x04foo\x00\x0c\x00\x00\x00\x140\x00\x01\x00\x00\x00\x00\x00"), + ( + b" \x00\x00\x00\x04bar\x00\x16\x00\x00\x00\x030\x00\x0e\x00\x00" + b"\x00\x14foo\x00\x01\x00\x00\x00\x00\x00\x00" + ), + ] for bs in docs: try: decode(bs) @@ -479,29 +516,23 @@ def test_dbpointer(self): # not support creation of the DBPointer type, but will decode # DBPointer to DBRef. - bs = (b"\x18\x00\x00\x00\x0c\x00\x01\x00\x00" - b"\x00\x00RY\xb5j\xfa[\xd8A\xd6X]\x99\x00") + bs = b"\x18\x00\x00\x00\x0c\x00\x01\x00\x00\x00\x00RY\xb5j\xfa[\xd8A\xd6X]\x99\x00" - self.assertEqual({'': DBRef('', ObjectId('5259b56afa5bd841d6585d99'))}, - decode(bs)) + self.assertEqual({"": DBRef("", ObjectId("5259b56afa5bd841d6585d99"))}, decode(bs)) def test_bad_dbref(self): - ref_only = {'ref': {'$ref': 'collection'}} - id_only = {'ref': {'$id': ObjectId()}} + ref_only = {"ref": {"$ref": "collection"}} + id_only = {"ref": {"$id": ObjectId()}} - self.assertEqual(DBRef('collection', id=None), - decode(encode(ref_only))['ref']) + self.assertEqual(ref_only, decode(encode(ref_only))) self.assertEqual(id_only, decode(encode(id_only))) def test_bytes_as_keys(self): - doc = {b"foo": 'bar'} + doc = {b"foo": "bar"} # Since `bytes` are stored as Binary you can't use them - # as keys in python 3.x. Using binary data as a key makes - # no sense in BSON anyway and little sense in python. - if PY3: - self.assertRaises(InvalidDocument, encode, doc) - else: - self.assertTrue(encode(doc)) + # as keys. Using binary data as a key makes no sense in BSON + # anyway and little sense in python. + self.assertRaises(InvalidDocument, encode, doc) def test_datetime_encode_decode(self): # Negative timestamps @@ -530,13 +561,12 @@ def test_large_datetime_truncation(self): self.assertEqual(dt2.second, dt1.second) def test_aware_datetime(self): - aware = datetime.datetime(1993, 4, 4, 2, - tzinfo=FixedOffset(555, "SomeZone")) - as_utc = (aware - aware.utcoffset()).replace(tzinfo=utc) - self.assertEqual(datetime.datetime(1993, 4, 3, 16, 45, tzinfo=utc), - as_utc) - after = decode(encode({"date": aware}), CodecOptions(tz_aware=True))[ - "date"] + aware = datetime.datetime(1993, 4, 4, 2, tzinfo=FixedOffset(555, "SomeZone")) + offset = aware.utcoffset() + assert offset is not None + as_utc = (aware - offset).replace(tzinfo=utc) + self.assertEqual(datetime.datetime(1993, 4, 3, 16, 45, tzinfo=utc), as_utc) + after = decode(encode({"date": aware}), CodecOptions(tz_aware=True))["date"] self.assertEqual(utc, after.tzinfo) self.assertEqual(as_utc, after) @@ -545,55 +575,50 @@ def test_local_datetime(self): tz = DSTAwareTimezone(60, "sixty-minutes", 4, 7) # It's not DST. - local = datetime.datetime(year=2025, month=12, hour=2, day=1, - tzinfo=tz) + local = datetime.datetime(year=2025, month=12, hour=2, day=1, tzinfo=tz) options = CodecOptions(tz_aware=True, tzinfo=tz) # Encode with this timezone, then decode to UTC. - encoded = encode({'date': local}, codec_options=options) - self.assertEqual(local.replace(hour=1, tzinfo=None), - decode(encoded)['date']) + encoded = encode({"date": local}, codec_options=options) + self.assertEqual(local.replace(hour=1, tzinfo=None), decode(encoded)["date"]) # It's DST. - local = datetime.datetime(year=2025, month=4, hour=1, day=1, - tzinfo=tz) - encoded = encode({'date': local}, codec_options=options) - self.assertEqual(local.replace(month=3, day=31, hour=23, tzinfo=None), - decode(encoded)['date']) + local = datetime.datetime(year=2025, month=4, hour=1, day=1, tzinfo=tz) + encoded = encode({"date": local}, codec_options=options) + self.assertEqual( + local.replace(month=3, day=31, hour=23, tzinfo=None), decode(encoded)["date"] + ) # Encode UTC, then decode in a different timezone. - encoded = encode({'date': local.replace(tzinfo=utc)}) - decoded = decode(encoded, options)['date'] + encoded = encode({"date": local.replace(tzinfo=utc)}) + decoded = decode(encoded, options)["date"] self.assertEqual(local.replace(hour=3), decoded) self.assertEqual(tz, decoded.tzinfo) # Test round-tripping. self.assertEqual( - local, decode(encode( - {'date': local}, codec_options=options), options)['date']) + local, decode(encode({"date": local}, codec_options=options), options)["date"] + ) # Test around the Unix Epoch. epochs = ( EPOCH_AWARE, - EPOCH_AWARE.astimezone(FixedOffset(120, 'one twenty')), - EPOCH_AWARE.astimezone(FixedOffset(-120, 'minus one twenty')) + EPOCH_AWARE.astimezone(FixedOffset(120, "one twenty")), + EPOCH_AWARE.astimezone(FixedOffset(-120, "minus one twenty")), ) utc_co = CodecOptions(tz_aware=True) for epoch in epochs: - doc = {'epoch': epoch} + doc = {"epoch": epoch} # We always retrieve datetimes in UTC unless told to do otherwise. - self.assertEqual( - EPOCH_AWARE, - decode(encode(doc), codec_options=utc_co)['epoch']) + self.assertEqual(EPOCH_AWARE, decode(encode(doc), codec_options=utc_co)["epoch"]) # Round-trip the epoch. local_co = CodecOptions(tz_aware=True, tzinfo=epoch.tzinfo) - self.assertEqual( - epoch, - decode(encode(doc), codec_options=local_co)['epoch']) + self.assertEqual(epoch, decode(encode(doc), codec_options=local_co)["epoch"]) def test_naive_decode(self): - aware = datetime.datetime(1993, 4, 4, 2, - tzinfo=FixedOffset(555, "SomeZone")) - naive_utc = (aware - aware.utcoffset()).replace(tzinfo=None) + aware = datetime.datetime(1993, 4, 4, 2, tzinfo=FixedOffset(555, "SomeZone")) + offset = aware.utcoffset() + assert offset is not None + naive_utc = (aware - offset).replace(tzinfo=None) self.assertEqual(datetime.datetime(1993, 4, 3, 16, 45), naive_utc) after = decode(encode({"date": aware}))["date"] self.assertEqual(None, after.tzinfo) @@ -603,41 +628,30 @@ def test_dst(self): d = {"x": datetime.datetime(1993, 4, 4, 2)} self.assertEqual(d, decode(encode(d))) + @unittest.skip("Disabled due to http://bugs.python.org/issue25222") def test_bad_encode(self): - if not PY3: - # Python3 treats this as a unicode string which won't raise - # an exception. If we passed the string as bytes instead we - # still wouldn't get an error since we store bytes as BSON - # binary subtype 0. - self.assertRaises(InvalidStringData, encode, - {"lalala": '\xf4\xe0\xf0\xe1\xc0 Color Touch'}) - # Work around what seems like a regression in python 3.5.0. - # See http://bugs.python.org/issue25222 - if sys.version_info[:2] < (3, 5): - evil_list = {'a': []} - evil_list['a'].append(evil_list) - evil_dict = {} - evil_dict['a'] = evil_dict - for evil_data in [evil_dict, evil_list]: - self.assertRaises(Exception, encode, evil_data) + evil_list: dict = {"a": []} + evil_list["a"].append(evil_list) + evil_dict: dict = {} + evil_dict["a"] = evil_dict + for evil_data in [evil_dict, evil_list]: + self.assertRaises(Exception, encode, evil_data) def test_overflow(self): - self.assertTrue(encode({"x": long(9223372036854775807)})) - self.assertRaises(OverflowError, encode, - {"x": long(9223372036854775808)}) + self.assertTrue(encode({"x": 9223372036854775807})) + self.assertRaises(OverflowError, encode, {"x": 9223372036854775808}) - self.assertTrue(encode({"x": long(-9223372036854775808)})) - self.assertRaises(OverflowError, encode, - {"x": long(-9223372036854775809)}) + self.assertTrue(encode({"x": -9223372036854775808})) + self.assertRaises(OverflowError, encode, {"x": -9223372036854775809}) def test_small_long_encode_decode(self): - encoded1 = encode({'x': 256}) - decoded1 = decode(encoded1)['x'] + encoded1 = encode({"x": 256}) + decoded1 = decode(encoded1)["x"] self.assertEqual(256, decoded1) - self.assertEqual(type(256), type(decoded1)) + self.assertEqual(int, type(decoded1)) - encoded2 = encode({'x': Int64(256)}) - decoded2 = decode(encoded2)['x'] + encoded2 = encode({"x": Int64(256)}) + decoded2 = decode(encoded2)["x"] expected = Int64(256) self.assertEqual(expected, decoded2) self.assertEqual(type(expected), type(decoded2)) @@ -645,99 +659,81 @@ def test_small_long_encode_decode(self): self.assertNotEqual(type(decoded1), type(decoded2)) def test_tuple(self): - self.assertEqual({"tuple": [1, 2]}, - decode(encode({"tuple": (1, 2)}))) + self.assertEqual({"tuple": [1, 2]}, decode(encode({"tuple": (1, 2)}))) def test_uuid(self): - id = uuid.uuid4() - transformed_id = decode(encode({"id": id}))["id"] + # The default uuid_representation is UNSPECIFIED + with self.assertRaisesRegex(ValueError, "cannot encode native uuid"): + bson.decode_all(encode({"uuid": id})) + opts = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + transformed_id = decode(encode({"id": id}, codec_options=opts), codec_options=opts)["id"] self.assertTrue(isinstance(transformed_id, uuid.UUID)) self.assertEqual(id, transformed_id) self.assertNotEqual(uuid.uuid4(), transformed_id) def test_uuid_legacy(self): - id = uuid.uuid4() - legacy = UUIDLegacy(id) + legacy = Binary.from_uuid(id, UuidRepresentation.PYTHON_LEGACY) self.assertEqual(3, legacy.subtype) - transformed = decode(encode({"uuid": legacy}))["uuid"] - self.assertTrue(isinstance(transformed, uuid.UUID)) + bin = decode(encode({"uuid": legacy}))["uuid"] + self.assertTrue(isinstance(bin, Binary)) + transformed = bin.as_uuid(UuidRepresentation.PYTHON_LEGACY) self.assertEqual(id, transformed) - self.assertNotEqual(UUIDLegacy(uuid.uuid4()), UUIDLegacy(transformed)) # The C extension was segfaulting on unicode RegExs, so we have this test # that doesn't really test anything but the lack of a segfault. def test_unicode_regex(self): - regex = re.compile(u'revisi\xf3n') + regex = re.compile("revisi\xf3n") decode(encode({"regex": regex})) def test_non_string_keys(self): self.assertRaises(InvalidDocument, encode, {8.9: "test"}) def test_utf8(self): - w = {u"aéあ": u"aéあ"} + w = {"aéあ": "aéあ"} self.assertEqual(w, decode(encode(w))) - # b'a\xe9' == u"aé".encode("iso-8859-1") - iso8859_bytes = b'a\xe9' + # b'a\xe9' == "aé".encode("iso-8859-1") + iso8859_bytes = b"a\xe9" y = {"hello": iso8859_bytes} - if PY3: - # Stored as BSON binary subtype 0. - out = decode(encode(y)) - self.assertTrue(isinstance(out['hello'], bytes)) - self.assertEqual(out['hello'], iso8859_bytes) - else: - # Python 2. - try: - encode(y) - except InvalidStringData as e: - self.assertTrue(repr(iso8859_bytes) in str(e)) - - # The next two tests only make sense in python 2.x since - # you can't use `bytes` type as document keys in python 3.x. - x = {u"aéあ".encode("utf-8"): u"aéあ".encode("utf-8")} - self.assertEqual(w, decode(encode(x))) - - z = {iso8859_bytes: "hello"} - self.assertRaises(InvalidStringData, encode, z) + # Stored as BSON binary subtype 0. + out = decode(encode(y)) + self.assertTrue(isinstance(out["hello"], bytes)) + self.assertEqual(out["hello"], iso8859_bytes) def test_null_character(self): doc = {"a": "\x00"} self.assertEqual(doc, decode(encode(doc))) - # This test doesn't make much sense in Python2 - # since {'a': '\x00'} == {'a': u'\x00'}. - # Decoding here actually returns {'a': '\x00'} - doc = {"a": u"\x00"} + doc = {"a": "\x00"} self.assertEqual(doc, decode(encode(doc))) self.assertRaises(InvalidDocument, encode, {b"\x00": "a"}) - self.assertRaises(InvalidDocument, encode, {u"\x00": "a"}) + self.assertRaises(InvalidDocument, encode, {"\x00": "a"}) - self.assertRaises(InvalidDocument, encode, - {"a": re.compile(b"ab\x00c")}) - self.assertRaises(InvalidDocument, encode, - {"a": re.compile(u"ab\x00c")}) + self.assertRaises(InvalidDocument, encode, {"a": re.compile(b"ab\x00c")}) + self.assertRaises(InvalidDocument, encode, {"a": re.compile("ab\x00c")}) def test_move_id(self): - self.assertEqual(b"\x19\x00\x00\x00\x02_id\x00\x02\x00\x00\x00a\x00" - b"\x02a\x00\x02\x00\x00\x00a\x00\x00", - encode(SON([("a", "a"), ("_id", "a")]))) - - self.assertEqual(b"\x2c\x00\x00\x00" - b"\x02_id\x00\x02\x00\x00\x00b\x00" - b"\x03b\x00" - b"\x19\x00\x00\x00\x02a\x00\x02\x00\x00\x00a\x00" - b"\x02_id\x00\x02\x00\x00\x00a\x00\x00\x00", - encode(SON([("b", - SON([("a", "a"), ("_id", "a")])), - ("_id", "b")]))) + self.assertEqual( + b"\x19\x00\x00\x00\x02_id\x00\x02\x00\x00\x00a\x00" + b"\x02a\x00\x02\x00\x00\x00a\x00\x00", + encode(SON([("a", "a"), ("_id", "a")])), + ) + + self.assertEqual( + b"\x2c\x00\x00\x00" + b"\x02_id\x00\x02\x00\x00\x00b\x00" + b"\x03b\x00" + b"\x19\x00\x00\x00\x02a\x00\x02\x00\x00\x00a\x00" + b"\x02_id\x00\x02\x00\x00\x00a\x00\x00\x00", + encode(SON([("b", SON([("a", "a"), ("_id", "a")])), ("_id", "b")])), + ) def test_dates(self): - doc = {"early": datetime.datetime(1686, 5, 5), - "late": datetime.datetime(2086, 5, 5)} + doc = {"early": datetime.datetime(1686, 5, 5), "late": datetime.datetime(2086, 5, 5)} try: self.assertEqual(doc, decode(encode(doc))) except ValueError: @@ -750,15 +746,12 @@ def test_dates(self): def test_custom_class(self): self.assertIsInstance(decode(encode({})), dict) self.assertNotIsInstance(decode(encode({})), SON) - self.assertIsInstance( - decode(encode({}), CodecOptions(document_class=SON)), SON) + self.assertIsInstance(decode(encode({}), CodecOptions(document_class=SON)), SON) # type: ignore[type-var] - self.assertEqual( - 1, decode(encode({"x": 1}), CodecOptions(document_class=SON))["x"]) + self.assertEqual(1, decode(encode({"x": 1}), CodecOptions(document_class=SON))["x"]) # type: ignore[type-var] x = encode({"x": [{"y": 1}]}) - self.assertIsInstance( - decode(x, CodecOptions(document_class=SON))["x"][0], SON) + self.assertIsInstance(decode(x, CodecOptions(document_class=SON))["x"][0], SON) # type: ignore[type-var] def test_subclasses(self): # make sure we can serialize subclasses of native Python types. @@ -768,84 +761,69 @@ class _myint(int): class _myfloat(float): pass - class _myunicode(text_type): + class _myunicode(str): pass - d = {'a': _myint(42), 'b': _myfloat(63.9), - 'c': _myunicode('hello world') - } + d = {"a": _myint(42), "b": _myfloat(63.9), "c": _myunicode("hello world")} d2 = decode(encode(d)) - for key, value in iteritems(d2): + for key, value in d2.items(): orig_value = d[key] orig_type = orig_value.__class__.__bases__[0] self.assertEqual(type(value), orig_type) self.assertEqual(value, orig_type(value)) def test_ordered_dict(self): - try: - from collections import OrderedDict - except ImportError: - raise SkipTest("No OrderedDict") d = OrderedDict([("one", 1), ("two", 2), ("three", 3), ("four", 4)]) - self.assertEqual( - d, decode(encode(d), CodecOptions(document_class=OrderedDict))) + self.assertEqual(d, decode(encode(d), CodecOptions(document_class=OrderedDict))) # type: ignore[type-var] def test_bson_regex(self): # Invalid Python regex, though valid PCRE. - bson_re1 = Regex(r'[\w-\.]') - self.assertEqual(r'[\w-\.]', bson_re1.pattern) + bson_re1 = Regex(r"[\w-\.]") + self.assertEqual(r"[\w-\.]", bson_re1.pattern) self.assertEqual(0, bson_re1.flags) - doc1 = {'r': bson_re1} - doc1_bson = ( - b'\x11\x00\x00\x00' # document length - b'\x0br\x00[\\w-\\.]\x00\x00' # r: regex - b'\x00') # document terminator + doc1 = {"r": bson_re1} + doc1_bson = b"\x11\x00\x00\x00\x0br\x00[\\w-\\.]\x00\x00\x00" # document length # r: regex # document terminator self.assertEqual(doc1_bson, encode(doc1)) self.assertEqual(doc1, decode(doc1_bson)) # Valid Python regex, with flags. - re2 = re.compile(u'.*', re.I | re.M | re.S | re.U | re.X) - bson_re2 = Regex(u'.*', re.I | re.M | re.S | re.U | re.X) + re2 = re.compile(".*", re.I | re.M | re.S | re.U | re.X) + bson_re2 = Regex(".*", re.I | re.M | re.S | re.U | re.X) - doc2_with_re = {'r': re2} - doc2_with_bson_re = {'r': bson_re2} - doc2_bson = ( - b"\x11\x00\x00\x00" # document length - b"\x0br\x00.*\x00imsux\x00" # r: regex - b"\x00") # document terminator + doc2_with_re = {"r": re2} + doc2_with_bson_re = {"r": bson_re2} + doc2_bson = b"\x11\x00\x00\x00\x0br\x00.*\x00imsux\x00\x00" # document length # r: regex # document terminator self.assertEqual(doc2_bson, encode(doc2_with_re)) self.assertEqual(doc2_bson, encode(doc2_with_bson_re)) - self.assertEqual(re2.pattern, decode(doc2_bson)['r'].pattern) - self.assertEqual(re2.flags, decode(doc2_bson)['r'].flags) + self.assertEqual(re2.pattern, decode(doc2_bson)["r"].pattern) + self.assertEqual(re2.flags, decode(doc2_bson)["r"].flags) def test_regex_from_native(self): - self.assertEqual('.*', Regex.from_native(re.compile('.*')).pattern) - self.assertEqual(0, Regex.from_native(re.compile(b'')).flags) + self.assertEqual(".*", Regex.from_native(re.compile(".*")).pattern) + self.assertEqual(0, Regex.from_native(re.compile(b"")).flags) - regex = re.compile(b'', re.I | re.L | re.M | re.S | re.X) - self.assertEqual( - re.I | re.L | re.M | re.S | re.X, - Regex.from_native(regex).flags) + regex = re.compile(b"", re.I | re.L | re.M | re.S | re.X) + self.assertEqual(re.I | re.L | re.M | re.S | re.X, Regex.from_native(regex).flags) - unicode_regex = re.compile('', re.U) + unicode_regex = re.compile("", re.U) self.assertEqual(re.U, Regex.from_native(unicode_regex).flags) def test_regex_hash(self): - self.assertRaises(TypeError, hash, Regex('hello')) + self.assertRaises(TypeError, hash, Regex("hello")) def test_regex_comparison(self): - re1 = Regex('a') - re2 = Regex('b') + re1 = Regex("a") + re2 = Regex("b") self.assertNotEqual(re1, re2) - re1 = Regex('a', re.I) - re2 = Regex('a', re.M) + re1 = Regex("a", re.I) + re2 = Regex("a", re.M) self.assertNotEqual(re1, re2) - re1 = Regex('a', re.I) - re2 = Regex('a', re.I) + re1 = Regex("a", re.I) + re2 = Regex("a", re.I) self.assertEqual(re1, re2) def test_exception_wrapping(self): @@ -853,13 +831,12 @@ def test_exception_wrapping(self): # the final exception always matches InvalidBSON. # {'s': '\xff'}, will throw attempting to decode utf-8. - bad_doc = b'\x0f\x00\x00\x00\x02s\x00\x03\x00\x00\x00\xff\x00\x00\x00' + bad_doc = b"\x0f\x00\x00\x00\x02s\x00\x03\x00\x00\x00\xff\x00\x00\x00" with self.assertRaises(InvalidBSON) as context: decode_all(bad_doc) - self.assertIn("codec can't decode byte 0xff", - str(context.exception)) + self.assertIn("codec can't decode byte 0xff", str(context.exception)) def test_minkey_maxkey_comparison(self): # MinKey's <, <=, >, >=, !=, and ==. @@ -933,29 +910,25 @@ def test_timestamp_comparison(self): self.assertFalse(Timestamp(1, 0) > Timestamp(1, 0)) def test_timestamp_highorder_bits(self): - doc = {'a': Timestamp(0xFFFFFFFF, 0xFFFFFFFF)} - doc_bson = (b'\x10\x00\x00\x00' - b'\x11a\x00\xff\xff\xff\xff\xff\xff\xff\xff' - b'\x00') + doc = {"a": Timestamp(0xFFFFFFFF, 0xFFFFFFFF)} + doc_bson = b"\x10\x00\x00\x00\x11a\x00\xff\xff\xff\xff\xff\xff\xff\xff\x00" self.assertEqual(doc_bson, encode(doc)) self.assertEqual(doc, decode(doc_bson)) def test_bad_id_keys(self): - self.assertRaises(InvalidDocument, encode, - {"_id": {"$bad": 123}}, True) - self.assertRaises(InvalidDocument, encode, - {"_id": {'$oid': "52d0b971b3ba219fdeb4170e"}}, True) - encode({"_id": {'$oid': "52d0b971b3ba219fdeb4170e"}}) + self.assertRaises(InvalidDocument, encode, {"_id": {"$bad": 123}}, True) + self.assertRaises( + InvalidDocument, encode, {"_id": {"$oid": "52d0b971b3ba219fdeb4170e"}}, True + ) + encode({"_id": {"$oid": "52d0b971b3ba219fdeb4170e"}}) def test_bson_encode_thread_safe(self): - def target(i): for j in range(1000): - my_int = type('MyInt_%s_%s' % (i, j), (int,), {}) - bson.encode({'my_int': my_int()}) + my_int = type(f"MyInt_{i}_{j}", (int,), {}) + bson.encode({"my_int": my_int()}) - threads = [ExceptionCatchingThread(target=target, args=(i,)) - for i in range(3)] + threads = [ExceptionCatchingThread(target=target, args=(i,)) for i in range(3)] for t in threads: t.start() @@ -966,24 +939,24 @@ def target(i): self.assertIsNone(t.exc) def test_raise_invalid_document(self): - class Wrapper(object): + class Wrapper: def __init__(self, val): self.val = val def __repr__(self): return repr(self.val) - self.assertEqual('1', repr(Wrapper(1))) + self.assertEqual("1", repr(Wrapper(1))) with self.assertRaisesRegex( - InvalidDocument, - "cannot encode object: 1, of type: " + repr(Wrapper)): - encode({'t': Wrapper(1)}) + InvalidDocument, "cannot encode object: 1, of type: " + repr(Wrapper) + ): + encode({"t": Wrapper(1)}) class TestCodecOptions(unittest.TestCase): def test_document_class(self): self.assertRaises(TypeError, CodecOptions, document_class=object) - self.assertIs(SON, CodecOptions(document_class=SON).document_class) + self.assertIs(SON, CodecOptions(document_class=SON).document_class) # type: ignore[type-var] def test_tz_aware(self): self.assertRaises(TypeError, CodecOptions, tz_aware=1) @@ -991,100 +964,366 @@ def test_tz_aware(self): self.assertTrue(CodecOptions(tz_aware=True).tz_aware) def test_uuid_representation(self): - self.assertRaises(ValueError, CodecOptions, uuid_representation=None) self.assertRaises(ValueError, CodecOptions, uuid_representation=7) self.assertRaises(ValueError, CodecOptions, uuid_representation=2) def test_tzinfo(self): - self.assertRaises(TypeError, CodecOptions, tzinfo='pacific') - tz = FixedOffset(42, 'forty-two') + self.assertRaises(TypeError, CodecOptions, tzinfo="pacific") + tz = FixedOffset(42, "forty-two") self.assertRaises(ValueError, CodecOptions, tzinfo=tz) self.assertEqual(tz, CodecOptions(tz_aware=True, tzinfo=tz).tzinfo) def test_codec_options_repr(self): - r = ("CodecOptions(document_class=dict, tz_aware=False, " - "uuid_representation=PYTHON_LEGACY, " - "unicode_decode_error_handler='strict', " - "tzinfo=None, type_registry=TypeRegistry(type_codecs=[], " - "fallback_encoder=None))") + r = ( + "CodecOptions(document_class=dict, tz_aware=False, " + "uuid_representation=UuidRepresentation.UNSPECIFIED, " + "unicode_decode_error_handler='strict', " + "tzinfo=None, type_registry=TypeRegistry(type_codecs=[], " + "fallback_encoder=None), " + "datetime_conversion=DatetimeConversion.DATETIME)" + ) self.assertEqual(r, repr(CodecOptions())) def test_decode_all_defaults(self): # Test decode_all()'s default document_class is dict and tz_aware is - # False. The default uuid_representation is PYTHON_LEGACY but this - # decodes same as STANDARD, so all this test proves about UUID decoding - # is that it's not CSHARP_LEGACY or JAVA_LEGACY. - doc = {'sub_document': {}, - 'uuid': uuid.uuid4(), - 'dt': datetime.datetime.utcnow()} + # False. + doc = {"sub_document": {}, "dt": datetime.datetime.now(tz=datetime.timezone.utc)} decoded = bson.decode_all(bson.encode(doc))[0] - self.assertIsInstance(decoded['sub_document'], dict) - self.assertEqual(decoded['uuid'], doc['uuid']) - self.assertIsNone(decoded['dt'].tzinfo) + self.assertIsInstance(decoded["sub_document"], dict) + self.assertIsNone(decoded["dt"].tzinfo) + # The default uuid_representation is UNSPECIFIED + with self.assertRaisesRegex(ValueError, "cannot encode native uuid"): + bson.decode_all(bson.encode({"uuid": uuid.uuid4()})) + + def test_decode_all_no_options(self): + # Test decode_all()'s default document_class is dict and tz_aware is + # False. + doc = {"sub_document": {}, "dt": datetime.datetime.now(tz=datetime.timezone.utc)} + + decoded = bson.decode_all(bson.encode(doc), None)[0] + self.assertIsInstance(decoded["sub_document"], dict) + self.assertIsNone(decoded["dt"].tzinfo) + + doc2 = {"id": Binary.from_uuid(uuid.uuid4())} + decoded = bson.decode_all(bson.encode(doc2), None)[0] + self.assertIsInstance(decoded["id"], Binary) + + def test_decode_all_kwarg(self): + doc = {"a": uuid.uuid4()} + opts = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + encoded = encode(doc, codec_options=opts) + # Positional codec_options + self.assertEqual([doc], decode_all(encoded, opts)) + # Keyword codec_options + self.assertEqual([doc], decode_all(encoded, codec_options=opts)) def test_unicode_decode_error_handler(self): enc = encode({"keystr": "foobar"}) - # Test handling of bad key value. - invalid_key = enc[:7] + b'\xe9' + enc[8:] - replaced_key = b'ke\xe9str'.decode('utf-8', 'replace') - ignored_key = b'ke\xe9str'.decode('utf-8', 'ignore') + # Test handling of bad key value, bad string value, and both. + invalid_key = enc[:7] + b"\xe9" + enc[8:] + invalid_val = enc[:18] + b"\xe9" + enc[19:] + invalid_both = enc[:7] + b"\xe9" + enc[8:18] + b"\xe9" + enc[19:] + + # Ensure that strict mode raises an error. + for invalid in [invalid_key, invalid_val, invalid_both]: + self.assertRaises( + InvalidBSON, + decode, + invalid, + CodecOptions(unicode_decode_error_handler="strict"), + ) + self.assertRaises(InvalidBSON, decode, invalid, CodecOptions()) + self.assertRaises(InvalidBSON, decode, invalid) + + # Test all other error handlers. + for handler in ["replace", "backslashreplace", "surrogateescape", "ignore"]: + expected_key = b"ke\xe9str".decode("utf-8", handler) + expected_val = b"fo\xe9bar".decode("utf-8", handler) + doc = decode(invalid_key, CodecOptions(unicode_decode_error_handler=handler)) + self.assertEqual(doc, {expected_key: "foobar"}) + doc = decode(invalid_val, CodecOptions(unicode_decode_error_handler=handler)) + self.assertEqual(doc, {"keystr": expected_val}) + doc = decode(invalid_both, CodecOptions(unicode_decode_error_handler=handler)) + self.assertEqual(doc, {expected_key: expected_val}) - dec = decode(invalid_key, - CodecOptions(unicode_decode_error_handler="replace")) - self.assertEqual(dec, {replaced_key: u"foobar"}) + # Test handling bad error mode. + dec = decode(enc, CodecOptions(unicode_decode_error_handler="junk")) + self.assertEqual(dec, {"keystr": "foobar"}) - dec = decode(invalid_key, - CodecOptions(unicode_decode_error_handler="ignore")) - self.assertEqual(dec, {ignored_key: u"foobar"}) + self.assertRaises( + InvalidBSON, + decode, + invalid_both, + CodecOptions(unicode_decode_error_handler="junk"), + ) - self.assertRaises(InvalidBSON, decode, invalid_key, CodecOptions( - unicode_decode_error_handler="strict")) - self.assertRaises(InvalidBSON, decode, invalid_key, CodecOptions()) - self.assertRaises(InvalidBSON, decode, invalid_key) + def round_trip_pickle(self, obj, pickled_with_older): + pickled_with_older_obj = pickle.loads(pickled_with_older) + for protocol in range(pickle.HIGHEST_PROTOCOL + 1): + pkl = pickle.dumps(obj, protocol=protocol) + obj2 = pickle.loads(pkl) + self.assertEqual(obj, obj2) + self.assertEqual(pickled_with_older_obj, obj2) + + def test_regex_pickling(self): + reg = Regex(".?") + pickled_with_3 = ( + b"\x80\x04\x959\x00\x00\x00\x00\x00\x00\x00\x8c\n" + b"bson.regex\x94\x8c\x05Regex\x94\x93\x94)\x81\x94}" + b"\x94(\x8c\x07pattern\x94\x8c\x02.?\x94\x8c\x05flag" + b"s\x94K\x00ub." + ) + self.round_trip_pickle(reg, pickled_with_3) + + def test_timestamp_pickling(self): + ts = Timestamp(0, 1) + pickled_with_3 = ( + b"\x80\x04\x95Q\x00\x00\x00\x00\x00\x00\x00\x8c" + b"\x0ebson.timestamp\x94\x8c\tTimestamp\x94\x93\x94)" + b"\x81\x94}\x94(" + b"\x8c\x10_Timestamp__time\x94K\x00\x8c" + b"\x0f_Timestamp__inc\x94K\x01ub." + ) + self.round_trip_pickle(ts, pickled_with_3) + + def test_dbref_pickling(self): + dbr = DBRef("foo", 5) + pickled_with_3 = ( + b"\x80\x04\x95q\x00\x00\x00\x00\x00\x00\x00\x8c\n" + b"bson.dbref\x94\x8c\x05DBRef\x94\x93\x94)\x81\x94}" + b"\x94(\x8c\x12_DBRef__collection\x94\x8c\x03foo\x94" + b"\x8c\n_DBRef__id\x94K\x05\x8c\x10_DBRef__database" + b"\x94N\x8c\x0e_DBRef__kwargs\x94}\x94ub." + ) + self.round_trip_pickle(dbr, pickled_with_3) + + dbr = DBRef("foo", 5, database="db", kwargs1=None) + pickled_with_3 = ( + b"\x80\x04\x95\x81\x00\x00\x00\x00\x00\x00\x00\x8c" + b"\nbson.dbref\x94\x8c\x05DBRef\x94\x93\x94)\x81\x94}" + b"\x94(\x8c\x12_DBRef__collection\x94\x8c\x03foo\x94" + b"\x8c\n_DBRef__id\x94K\x05\x8c\x10_DBRef__database" + b"\x94\x8c\x02db\x94\x8c\x0e_DBRef__kwargs\x94}\x94" + b"\x8c\x07kwargs1\x94Nsub." + ) - # Test handing of bad string value. - invalid_val = BSON(enc[:18] + b'\xe9' + enc[19:]) - replaced_val = b'fo\xe9bar'.decode('utf-8', 'replace') - ignored_val = b'fo\xe9bar'.decode('utf-8', 'ignore') + self.round_trip_pickle(dbr, pickled_with_3) - dec = decode(invalid_val, - CodecOptions(unicode_decode_error_handler="replace")) - self.assertEqual(dec, {u"keystr": replaced_val}) + def test_minkey_pickling(self): + mink = MinKey() + pickled_with_3 = ( + b"\x80\x04\x95\x1e\x00\x00\x00\x00\x00\x00\x00\x8c" + b"\x0cbson.min_key\x94\x8c\x06MinKey\x94\x93\x94)" + b"\x81\x94." + ) - dec = decode(invalid_val, - CodecOptions(unicode_decode_error_handler="ignore")) - self.assertEqual(dec, {u"keystr": ignored_val}) + self.round_trip_pickle(mink, pickled_with_3) - self.assertRaises(InvalidBSON, decode, invalid_val, CodecOptions( - unicode_decode_error_handler="strict")) - self.assertRaises(InvalidBSON, decode, invalid_val, CodecOptions()) - self.assertRaises(InvalidBSON, decode, invalid_val) + def test_maxkey_pickling(self): + maxk = MaxKey() + pickled_with_3 = ( + b"\x80\x04\x95\x1e\x00\x00\x00\x00\x00\x00\x00\x8c" + b"\x0cbson.max_key\x94\x8c\x06MaxKey\x94\x93\x94)" + b"\x81\x94." + ) - # Test handing bad key + bad value. - invalid_both = enc[:7] + b'\xe9' + enc[8:18] + b'\xe9' + enc[19:] + self.round_trip_pickle(maxk, pickled_with_3) - dec = decode(invalid_both, - CodecOptions(unicode_decode_error_handler="replace")) - self.assertEqual(dec, {replaced_key: replaced_val}) + def test_int64_pickling(self): + i64 = Int64(9) + pickled_with_3 = ( + b"\x80\x04\x95\x1e\x00\x00\x00\x00\x00\x00\x00\x8c\n" + b"bson.int64\x94\x8c\x05Int64\x94\x93\x94K\t\x85\x94" + b"\x81\x94." + ) + self.round_trip_pickle(i64, pickled_with_3) + + def test_bson_encode_decode(self) -> None: + doc = {"_id": ObjectId()} + encoded = bson.encode(doc) + decoded = bson.decode(encoded) + encoded = bson.encode(decoded) + decoded = bson.decode(encoded) + # Documents returned from decode are mutable. + decoded["new_field"] = 1 + self.assertTrue(decoded["_id"].generation_time) + + +class TestDatetimeConversion(unittest.TestCase): + def test_comps(self): + # Tests other timestamp formats. + # Test each of the rich comparison methods. + pairs = [ + (DatetimeMS(-1), DatetimeMS(1)), + (DatetimeMS(0), DatetimeMS(0)), + (DatetimeMS(1), DatetimeMS(-1)), + ] - dec = decode(invalid_both, - CodecOptions(unicode_decode_error_handler="ignore")) - self.assertEqual(dec, {ignored_key: ignored_val}) + comp_ops = ["__lt__", "__le__", "__eq__", "__ne__", "__gt__", "__ge__"] + for lh, rh in pairs: + for op in comp_ops: + self.assertEqual(getattr(lh, op)(rh), getattr(lh._value, op)(rh._value)) + + def test_class_conversions(self): + # Test class conversions. + dtr1 = DatetimeMS(1234) + dt1 = dtr1.as_datetime() + self.assertEqual(dtr1, DatetimeMS(dt1)) + + dt2 = datetime.datetime(1969, 1, 1) + dtr2 = DatetimeMS(dt2) + self.assertEqual(dtr2.as_datetime(), dt2) + + # Test encode and decode without codec options. Expect: DatetimeMS => datetime + dtr1 = DatetimeMS(0) + enc1 = encode({"x": dtr1}) + dec1 = decode(enc1) + self.assertEqual(dec1["x"], datetime.datetime(1970, 1, 1)) + self.assertNotEqual(type(dtr1), type(dec1["x"])) + + # Test encode and decode with codec options. Expect: UTCDateimteRaw => DatetimeMS + opts1 = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_MS) + enc1 = encode({"x": dtr1}) + dec1 = decode(enc1, opts1) + self.assertEqual(type(dtr1), type(dec1["x"])) + self.assertEqual(dtr1, dec1["x"]) + + # Expect: datetime => DatetimeMS + opts1 = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_MS) + dt1 = datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc) + enc1 = encode({"x": dt1}) + dec1 = decode(enc1, opts1) + self.assertEqual(dec1["x"], DatetimeMS(0)) + self.assertNotEqual(dt1, type(dec1["x"])) + + def test_clamping(self): + # Test clamping from below and above. + opts1 = CodecOptions( + datetime_conversion=DatetimeConversion.DATETIME_CLAMP, + tz_aware=True, + tzinfo=datetime.timezone.utc, + ) + below = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 1)}) + dec_below = decode(below, opts1) + self.assertEqual( + dec_below["x"], datetime.datetime.min.replace(tzinfo=datetime.timezone.utc) + ) - self.assertRaises(InvalidBSON, decode, invalid_both, CodecOptions( - unicode_decode_error_handler="strict")) - self.assertRaises(InvalidBSON, decode, invalid_both, CodecOptions()) - self.assertRaises(InvalidBSON, decode, invalid_both) + above = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 1)}) + dec_above = decode(above, opts1) + self.assertEqual( + dec_above["x"], + datetime.datetime.max.replace(tzinfo=datetime.timezone.utc, microsecond=999000), + ) - # Test handling bad error mode. - dec = decode(enc, - CodecOptions(unicode_decode_error_handler="junk")) - self.assertEqual(dec, {"keystr": "foobar"}) + def test_tz_clamping(self): + # Naive clamping to local tz. + opts1 = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_CLAMP, tz_aware=False) + below = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60)}) + + dec_below = decode(below, opts1) + self.assertEqual(dec_below["x"], datetime.datetime.min) + + above = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 24 * 60 * 60)}) + dec_above = decode(above, opts1) + self.assertEqual( + dec_above["x"], + datetime.datetime.max.replace(microsecond=999000), + ) + + # Aware clamping. + opts2 = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_CLAMP, tz_aware=True) + below = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60)}) + dec_below = decode(below, opts2) + self.assertEqual( + dec_below["x"], datetime.datetime.min.replace(tzinfo=datetime.timezone.utc) + ) + + above = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 24 * 60 * 60)}) + dec_above = decode(above, opts2) + self.assertEqual( + dec_above["x"], + datetime.datetime.max.replace(tzinfo=datetime.timezone.utc, microsecond=999000), + ) + + def test_datetime_auto(self): + # Naive auto, in range. + opts1 = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_AUTO) + inr = encode({"x": datetime.datetime(1970, 1, 1)}, codec_options=opts1) + dec_inr = decode(inr) + self.assertEqual(dec_inr["x"], datetime.datetime(1970, 1, 1)) + + # Naive auto, below range. + below = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60)}) + dec_below = decode(below, opts1) + self.assertEqual( + dec_below["x"], DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60) + ) - self.assertRaises(InvalidBSON, decode, invalid_both, CodecOptions( - unicode_decode_error_handler="junk")) + # Naive auto, above range. + above = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 24 * 60 * 60)}) + dec_above = decode(above, opts1) + self.assertEqual( + dec_above["x"], + DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 24 * 60 * 60), + ) + + # Aware auto, in range. + opts2 = CodecOptions( + datetime_conversion=DatetimeConversion.DATETIME_AUTO, + tz_aware=True, + tzinfo=datetime.timezone.utc, + ) + inr = encode({"x": datetime.datetime(1970, 1, 1)}, codec_options=opts2) + dec_inr = decode(inr) + self.assertEqual(dec_inr["x"], datetime.datetime(1970, 1, 1)) + + # Aware auto, below range. + below = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60)}) + dec_below = decode(below, opts2) + self.assertEqual( + dec_below["x"], DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60) + ) + + # Aware auto, above range. + above = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 24 * 60 * 60)}) + dec_above = decode(above, opts2) + self.assertEqual( + dec_above["x"], + DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 24 * 60 * 60), + ) + + def test_millis_from_datetime_ms(self): + # Test 65+ bit integer conversion, expect OverflowError. + big_ms = 2**65 + with self.assertRaises(OverflowError): + encode({"x": DatetimeMS(big_ms)}) + + # Subclass of DatetimeMS w/ __int__ override, expect an Error. + class DatetimeMSOverride(DatetimeMS): + def __int__(self): + return float(self._value) + + float_ms = DatetimeMSOverride(2) + with self.assertRaises(TypeError): + encode({"x": float_ms}) + + # Test InvalidBSON errors on conversion include _DATETIME_ERROR_SUGGESTION + small_ms = -2 << 51 + with self.assertRaisesRegex(InvalidBSON, re.compile(re.escape(_DATETIME_ERROR_SUGGESTION))): + decode(encode({"a": DatetimeMS(small_ms)})) + + +class TestLongLongToString(unittest.TestCase): + def test_long_long_to_string(self): + try: + from bson import _cbson + + _cbson._test_long_long_to_str() + except ImportError: + print("_cbson was not imported. Check compilation logs.") if __name__ == "__main__": diff --git a/test/test_bson_corpus.py b/test/test_bson_corpus.py index 0c461cf404..96ef458ec5 100644 --- a/test/test_bson_corpus.py +++ b/test/test_bson_corpus.py @@ -13,6 +13,7 @@ # limitations under the License. """Run the BSON corpus specification tests.""" +from __future__ import annotations import binascii import codecs @@ -21,74 +22,78 @@ import json import os import sys - from decimal import DecimalException sys.path[0:0] = [""] +from test import unittest + from bson import decode, encode, json_util from bson.binary import STANDARD from bson.codec_options import CodecOptions -from bson.decimal128 import Decimal128 from bson.dbref import DBRef -from bson.errors import InvalidBSON, InvalidId +from bson.decimal128 import Decimal128 +from bson.errors import InvalidBSON, InvalidDocument, InvalidId from bson.json_util import JSONMode -from bson.py3compat import text_type, b from bson.son import SON -from test import unittest - -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'bson_corpus') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "bson_corpus") -_TESTS_TO_SKIP = set([ +_TESTS_TO_SKIP = { # Python cannot decode dates after year 9999. - 'Y10K', -]) + "Y10K", +} -_NON_PARSE_ERRORS = set([ +_NON_PARSE_ERRORS = { # {"$date": } is our legacy format which we still need to parse. - 'Bad $date (number, not string or hash)', + "Bad $date (number, not string or hash)", # This variant of $numberLong may have been generated by an old version # of mongoexport. - 'Bad $numberLong (number, not string)', -]) + "Bad $numberLong (number, not string)", + # Python's UUID constructor is very permissive. + "$uuid invalid value--misplaced hyphens", + # We parse Regex flags with extra characters, including nulls. + "Null byte in $regularExpression options", +} + +_IMPLCIT_LOSSY_TESTS = { + # JSON decodes top-level $ref+$id as a DBRef but BSON doesn't. + "Document with key names similar to those of a DBRef" +} _DEPRECATED_BSON_TYPES = { # Symbol - '0x0E': text_type, + "0x0E": str, # Undefined - '0x06': type(None), + "0x06": type(None), # DBPointer - '0x0C': DBRef + "0x0C": DBRef, } # Need to set tz_aware=True in order to use "strict" dates in extended JSON. -codec_options = CodecOptions(tz_aware=True, document_class=SON) +codec_options: CodecOptions = CodecOptions(tz_aware=True, document_class=SON) +codec_options_no_tzaware: CodecOptions = CodecOptions(document_class=SON) # We normally encode UUID as binary subtype 0x03, # but we'll need to encode to subtype 0x04 for one of the tests. codec_options_uuid_04 = codec_options._replace(uuid_representation=STANDARD) -json_options_uuid_04 = json_util.JSONOptions(json_mode=JSONMode.CANONICAL, - uuid_representation=STANDARD) +json_options_uuid_04 = json_util.JSONOptions( + json_mode=JSONMode.CANONICAL, uuid_representation=STANDARD +) json_options_iso8601 = json_util.JSONOptions( - datetime_representation=json_util.DatetimeRepresentation.ISO8601) -to_extjson = functools.partial(json_util.dumps, - json_options=json_util.CANONICAL_JSON_OPTIONS) -to_extjson_uuid_04 = functools.partial(json_util.dumps, - json_options=json_options_uuid_04) -to_extjson_iso8601 = functools.partial(json_util.dumps, - json_options=json_options_iso8601) -to_relaxed_extjson = functools.partial( - json_util.dumps, json_options=json_util.RELAXED_JSON_OPTIONS) -to_bson_uuid_04 = functools.partial(encode, - codec_options=codec_options_uuid_04) + datetime_representation=json_util.DatetimeRepresentation.ISO8601, json_mode=JSONMode.LEGACY +) +to_extjson = functools.partial(json_util.dumps, json_options=json_util.CANONICAL_JSON_OPTIONS) +to_extjson_uuid_04 = functools.partial(json_util.dumps, json_options=json_options_uuid_04) +to_extjson_iso8601 = functools.partial(json_util.dumps, json_options=json_options_iso8601) +to_relaxed_extjson = functools.partial(json_util.dumps, json_options=json_util.RELAXED_JSON_OPTIONS) +to_bson_uuid_04 = functools.partial(encode, codec_options=codec_options_uuid_04) to_bson = functools.partial(encode, codec_options=codec_options) -decode_bson = lambda bbytes: decode(bbytes, codec_options=codec_options) +decode_bson = functools.partial(decode, codec_options=codec_options_no_tzaware) decode_extjson = functools.partial( json_util.loads, - json_options=json_util.JSONOptions(json_mode=JSONMode.CANONICAL, - document_class=SON)) + json_options=json_util.JSONOptions(json_mode=JSONMode.CANONICAL, document_class=SON), +) loads = functools.partial(json.loads, object_pairs_hook=SON) @@ -103,60 +108,64 @@ def assertJsonEqual(self, first, second, msg=None): def create_test(case_spec): - bson_type = case_spec['bson_type'] + bson_type = case_spec["bson_type"] # Test key is absent when testing top-level documents. - test_key = case_spec.get('test_key') - deprecated = case_spec.get('deprecated') + test_key = case_spec.get("test_key") + deprecated = case_spec.get("deprecated") def run_test(self): - for valid_case in case_spec.get('valid', []): - description = valid_case['description'] + for valid_case in case_spec.get("valid", []): + description = valid_case["description"] if description in _TESTS_TO_SKIP: continue # Special case for testing encoding UUID as binary subtype 0x04. - if description == 'subtype 0x04': + if description.startswith("subtype 0x04"): encode_extjson = to_extjson_uuid_04 encode_bson = to_bson_uuid_04 else: encode_extjson = to_extjson encode_bson = to_bson - cB = binascii.unhexlify(b(valid_case['canonical_bson'])) - cEJ = valid_case['canonical_extjson'] - rEJ = valid_case.get('relaxed_extjson') - dEJ = valid_case.get('degenerate_extjson') - lossy = valid_case.get('lossy') + cB = binascii.unhexlify(valid_case["canonical_bson"].encode("utf8")) + cEJ = valid_case["canonical_extjson"] + rEJ = valid_case.get("relaxed_extjson") + dEJ = valid_case.get("degenerate_extjson") + if description in _IMPLCIT_LOSSY_TESTS: + valid_case.setdefault("lossy", True) + lossy = valid_case.get("lossy") + + # BSON double, use lowercase 'e+' to match Python's encoding + if bson_type == "0x01": + cEJ = cEJ.replace("E+", "e+") decoded_bson = decode_bson(cB) if not lossy: # Make sure we can parse the legacy (default) JSON format. legacy_json = json_util.dumps( - decoded_bson, json_options=json_util.LEGACY_JSON_OPTIONS) - self.assertEqual(decode_extjson(legacy_json), decoded_bson) + decoded_bson, json_options=json_util.LEGACY_JSON_OPTIONS + ) + self.assertEqual(decode_extjson(legacy_json), decoded_bson, description) if deprecated: - if 'converted_bson' in valid_case: - converted_bson = binascii.unhexlify( - b(valid_case['converted_bson'])) + if "converted_bson" in valid_case: + converted_bson = binascii.unhexlify(valid_case["converted_bson"].encode("utf8")) self.assertEqual(encode_bson(decoded_bson), converted_bson) self.assertJsonEqual( - encode_extjson(decode_bson(converted_bson)), - valid_case['converted_extjson']) + encode_extjson(decode_bson(converted_bson)), valid_case["converted_extjson"] + ) # Make sure we can decode the type. self.assertEqual(decoded_bson, decode_extjson(cEJ)) if test_key is not None: - self.assertIsInstance(decoded_bson[test_key], - _DEPRECATED_BSON_TYPES[bson_type]) + self.assertIsInstance(decoded_bson[test_key], _DEPRECATED_BSON_TYPES[bson_type]) continue # Jython can't handle NaN with a payload from # struct.(un)pack if endianness is specified in the format string. - if not (sys.platform.startswith("java") and - description == 'NaN with payload'): + if not (sys.platform.startswith("java") and description == "NaN with payload"): # Test round-tripping canonical bson. - self.assertEqual(encode_bson(decoded_bson), cB) + self.assertEqual(encode_bson(decoded_bson), cB, description) self.assertJsonEqual(encode_extjson(decoded_bson), cEJ) # Test round-tripping canonical extended json. @@ -166,8 +175,8 @@ def run_test(self): self.assertEqual(encode_bson(decoded_json), cB) # Test round-tripping degenerate bson. - if 'degenerate_bson' in valid_case: - dB = binascii.unhexlify(b(valid_case['degenerate_bson'])) + if "degenerate_bson" in valid_case: + dB = binascii.unhexlify(valid_case["degenerate_bson"].encode("utf8")) self.assertEqual(encode_bson(decode_bson(dB)), cB) # Test round-tripping degenerate extended json. @@ -183,42 +192,48 @@ def run_test(self): decoded_json = decode_extjson(rEJ) self.assertJsonEqual(to_relaxed_extjson(decoded_json), rEJ) - for decode_error_case in case_spec.get('decodeErrors', []): + for decode_error_case in case_spec.get("decodeErrors", []): with self.assertRaises(InvalidBSON): - decode_bson( - binascii.unhexlify(b(decode_error_case['bson']))) - - for parse_error_case in case_spec.get('parseErrors', []): - if bson_type == '0x13': - self.assertRaises( - DecimalException, Decimal128, parse_error_case['string']) - elif bson_type == '0x00': - description = parse_error_case['description'] - if description in _NON_PARSE_ERRORS: - decode_extjson(parse_error_case['string']) - else: - try: - decode_extjson(parse_error_case['string']) - raise AssertionError('exception not raised for test ' - 'case: ' + description) - except (ValueError, KeyError, TypeError, InvalidId): - pass + decode_bson(binascii.unhexlify(decode_error_case["bson"].encode("utf8"))) + + for parse_error_case in case_spec.get("parseErrors", []): + description = parse_error_case["description"] + if description in _NON_PARSE_ERRORS: + decode_extjson(parse_error_case["string"]) + continue + if bson_type == "0x13": + self.assertRaises(DecimalException, Decimal128, parse_error_case["string"]) + elif bson_type == "0x00": + try: + doc = decode_extjson(parse_error_case["string"]) + # Null bytes are validated when encoding to BSON. + if "Null" in description: + to_bson(doc) + raise AssertionError("exception not raised for test case: " + description) + except (ValueError, KeyError, TypeError, InvalidId, InvalidDocument): + pass + elif bson_type == "0x05": + try: + decode_extjson(parse_error_case["string"]) + raise AssertionError("exception not raised for test case: " + description) + except (TypeError, ValueError): + pass else: - raise AssertionError('cannot test parseErrors for type ' + - bson_type) + raise AssertionError("cannot test parseErrors for type " + bson_type) + return run_test def create_tests(): - for filename in glob.glob(os.path.join(_TEST_PATH, '*.json')): + for filename in glob.glob(os.path.join(_TEST_PATH, "*.json")): test_suffix, _ = os.path.splitext(os.path.basename(filename)) - with codecs.open(filename, encoding='utf-8') as bson_test_file: + with codecs.open(filename, encoding="utf-8") as bson_test_file: test_method = create_test(json.load(bson_test_file)) - setattr(TestBSONCorpus, 'test_' + test_suffix, test_method) + setattr(TestBSONCorpus, "test_" + test_suffix, test_method) create_tests() -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/test_bulk.py b/test/test_bulk.py index ec148a4ac5..6619d33f47 100644 --- a/test/test_bulk.py +++ b/test/test_bulk.py @@ -13,125 +13,140 @@ # limitations under the License. """Test the bulk API.""" +from __future__ import annotations import sys +import uuid + +from pymongo.mongo_client import MongoClient sys.path[0:0] = [""] +from test import IntegrationTest, client_context, unittest +from test.utils import ( + remove_all_users, + rs_or_single_client_noauth, + single_client, + wait_until, +) + +from bson.binary import Binary, UuidRepresentation +from bson.codec_options import CodecOptions from bson.objectid import ObjectId +from pymongo.collection import Collection +from pymongo.common import partition_node +from pymongo.errors import ( + BulkWriteError, + ConfigurationError, + InvalidOperation, + OperationFailure, +) from pymongo.operations import * -from pymongo.errors import (ConfigurationError, - InvalidOperation, - OperationFailure) from pymongo.write_concern import WriteConcern -from test import (client_context, - unittest, - IntegrationTest) -from test.utils import (remove_all_users, - rs_or_single_client_noauth) class BulkTestBase(IntegrationTest): + coll: Collection + coll_w0: Collection @classmethod def setUpClass(cls): - super(BulkTestBase, cls).setUpClass() + super().setUpClass() cls.coll = cls.db.test - ismaster = client_context.client.admin.command('ismaster') - cls.has_write_commands = (ismaster.get("maxWireVersion", 0) > 1) + cls.coll_w0 = cls.coll.with_options(write_concern=WriteConcern(w=0)) def setUp(self): - super(BulkTestBase, self).setUp() + super().setUp() self.coll.drop() def assertEqualResponse(self, expected, actual): """Compare response from bulk.execute() to expected response.""" for key, value in expected.items(): - if key == 'nModified': - if self.has_write_commands: - self.assertEqual(value, actual['nModified']) - else: - # Legacy servers don't include nModified in the response. - self.assertFalse('nModified' in actual) - elif key == 'upserted': + if key == "nModified": + self.assertEqual(value, actual["nModified"]) + elif key == "upserted": expected_upserts = value - actual_upserts = actual['upserted'] + actual_upserts = actual["upserted"] self.assertEqual( - len(expected_upserts), len(actual_upserts), - 'Expected %d elements in "upserted", got %d' % ( - len(expected_upserts), len(actual_upserts))) + len(expected_upserts), + len(actual_upserts), + 'Expected %d elements in "upserted", got %d' + % (len(expected_upserts), len(actual_upserts)), + ) for e, a in zip(expected_upserts, actual_upserts): self.assertEqualUpsert(e, a) - elif key == 'writeErrors': + elif key == "writeErrors": expected_errors = value - actual_errors = actual['writeErrors'] + actual_errors = actual["writeErrors"] self.assertEqual( - len(expected_errors), len(actual_errors), - 'Expected %d elements in "writeErrors", got %d' % ( - len(expected_errors), len(actual_errors))) + len(expected_errors), + len(actual_errors), + 'Expected %d elements in "writeErrors", got %d' + % (len(expected_errors), len(actual_errors)), + ) for e, a in zip(expected_errors, actual_errors): self.assertEqualWriteError(e, a) else: self.assertEqual( - actual.get(key), value, - '%r value of %r does not match expected %r' % - (key, actual.get(key), value)) + actual.get(key), + value, + f"{key!r} value of {actual.get(key)!r} does not match expected {value!r}", + ) def assertEqualUpsert(self, expected, actual): """Compare bulk.execute()['upserts'] to expected value. Like: {'index': 0, '_id': ObjectId()} """ - self.assertEqual(expected['index'], actual['index']) - if expected['_id'] == '...': + self.assertEqual(expected["index"], actual["index"]) + if expected["_id"] == "...": # Unspecified value. - self.assertTrue('_id' in actual) + self.assertTrue("_id" in actual) else: - self.assertEqual(expected['_id'], actual['_id']) + self.assertEqual(expected["_id"], actual["_id"]) def assertEqualWriteError(self, expected, actual): """Compare bulk.execute()['writeErrors'] to expected value. Like: {'index': 0, 'code': 123, 'errmsg': '...', 'op': { ... }} """ - self.assertEqual(expected['index'], actual['index']) - self.assertEqual(expected['code'], actual['code']) - if expected['errmsg'] == '...': + self.assertEqual(expected["index"], actual["index"]) + self.assertEqual(expected["code"], actual["code"]) + if expected["errmsg"] == "...": # Unspecified value. - self.assertTrue('errmsg' in actual) + self.assertTrue("errmsg" in actual) else: - self.assertEqual(expected['errmsg'], actual['errmsg']) + self.assertEqual(expected["errmsg"], actual["errmsg"]) - expected_op = expected['op'].copy() - actual_op = actual['op'].copy() - if expected_op.get('_id') == '...': + expected_op = expected["op"].copy() + actual_op = actual["op"].copy() + if expected_op.get("_id") == "...": # Unspecified _id. - self.assertTrue('_id' in actual_op) - actual_op.pop('_id') - expected_op.pop('_id') + self.assertTrue("_id" in actual_op) + actual_op.pop("_id") + expected_op.pop("_id") self.assertEqual(expected_op, actual_op) class TestBulk(BulkTestBase): - def test_empty(self): self.assertRaises(InvalidOperation, self.coll.bulk_write, []) def test_insert(self): expected = { - 'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 1, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': [] + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 1, + "nRemoved": 0, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], } result = self.coll.bulk_write([InsertOne({})]) @@ -142,14 +157,14 @@ def test_insert(self): def _test_update_many(self, update): expected = { - 'nMatched': 2, - 'nModified': 2, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': [] + "nMatched": 2, + "nModified": 2, + "nUpserted": 0, + "nInserted": 0, + "nRemoved": 0, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], } self.coll.insert_many([{}, {}]) @@ -159,47 +174,33 @@ def _test_update_many(self, update): self.assertTrue(result.modified_count in (2, None)) def test_update_many(self): - self._test_update_many({'$set': {'foo': 'bar'}}) + self._test_update_many({"$set": {"foo": "bar"}}) @client_context.require_version_min(4, 1, 11) def test_update_many_pipeline(self): - self._test_update_many([{'$set': {'foo': 'bar'}}]) - - @client_context.require_version_max(3, 5, 5) - def test_array_filters_unsupported(self): - requests = [ - UpdateMany( - {}, {'$set': {'y.$[i].b': 5}}, array_filters=[{'i.b': 1}]), - UpdateOne( - {}, {'$set': {"y.$[i].b": 2}}, array_filters=[{'i.b': 3}]) - ] - for bulk_op in requests: - self.assertRaises( - ConfigurationError, self.coll.bulk_write, [bulk_op]) + self._test_update_many([{"$set": {"foo": "bar"}}]) def test_array_filters_validation(self): self.assertRaises(TypeError, UpdateMany, {}, {}, array_filters={}) self.assertRaises(TypeError, UpdateOne, {}, {}, array_filters={}) def test_array_filters_unacknowledged(self): - coll = self.coll.with_options(write_concern=WriteConcern(w=0)) - update_one = UpdateOne( - {}, {'$set': {'y.$[i].b': 5}}, array_filters=[{'i.b': 1}]) - update_many = UpdateMany( - {}, {'$set': {'y.$[i].b': 5}}, array_filters=[{'i.b': 1}]) + coll = self.coll_w0 + update_one = UpdateOne({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) + update_many = UpdateMany({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) self.assertRaises(ConfigurationError, coll.bulk_write, [update_one]) self.assertRaises(ConfigurationError, coll.bulk_write, [update_many]) def _test_update_one(self, update): expected = { - 'nMatched': 1, - 'nModified': 1, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': [] + "nMatched": 1, + "nModified": 1, + "nUpserted": 0, + "nInserted": 0, + "nRemoved": 0, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], } self.coll.insert_many([{}, {}]) @@ -210,28 +211,28 @@ def _test_update_one(self, update): self.assertTrue(result.modified_count in (1, None)) def test_update_one(self): - self._test_update_one({'$set': {'foo': 'bar'}}) + self._test_update_one({"$set": {"foo": "bar"}}) @client_context.require_version_min(4, 1, 11) def test_update_one_pipeline(self): - self._test_update_one([{'$set': {'foo': 'bar'}}]) + self._test_update_one([{"$set": {"foo": "bar"}}]) def test_replace_one(self): expected = { - 'nMatched': 1, - 'nModified': 1, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': [] + "nMatched": 1, + "nModified": 1, + "nUpserted": 0, + "nInserted": 0, + "nRemoved": 0, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], } self.coll.insert_many([{}, {}]) - result = self.coll.bulk_write([ReplaceOne({}, {'foo': 'bar'})]) + result = self.coll.bulk_write([ReplaceOne({}, {"foo": "bar"})]) self.assertEqualResponse(expected, result.bulk_api_result) self.assertEqual(1, result.matched_count) self.assertTrue(result.modified_count in (1, None)) @@ -239,14 +240,14 @@ def test_replace_one(self): def test_remove(self): # Test removing all documents, ordered. expected = { - 'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 2, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': [] + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 0, + "nRemoved": 2, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], } self.coll.insert_many([{}, {}]) @@ -258,14 +259,14 @@ def test_remove_one(self): # Test removing one document, empty selector. self.coll.insert_many([{}, {}]) expected = { - 'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 1, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': [] + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 0, + "nRemoved": 1, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], } result = self.coll.bulk_write([DeleteOne({})]) @@ -276,28 +277,27 @@ def test_remove_one(self): def test_upsert(self): expected = { - 'nMatched': 0, - 'nModified': 0, - 'nUpserted': 1, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [{'index': 0, '_id': '...'}] + "nMatched": 0, + "nModified": 0, + "nUpserted": 1, + "nInserted": 0, + "nRemoved": 0, + "upserted": [{"index": 0, "_id": "..."}], } - result = self.coll.bulk_write([ReplaceOne({}, - {'foo': 'bar'}, - upsert=True)]) + result = self.coll.bulk_write([ReplaceOne({}, {"foo": "bar"}, upsert=True)]) self.assertEqualResponse(expected, result.bulk_api_result) self.assertEqual(1, result.upserted_count) + assert result.upserted_ids is not None self.assertEqual(1, len(result.upserted_ids)) self.assertTrue(isinstance(result.upserted_ids.get(0), ObjectId)) - self.assertEqual(self.coll.count_documents({'foo': 'bar'}), 1) + self.assertEqual(self.coll.count_documents({"foo": "bar"}), 1) def test_numerous_inserts(self): - # Ensure we don't exceed server's 1000-document batch size limit. - n_docs = 2100 - requests = [InsertOne({}) for _ in range(n_docs)] + # Ensure we don't exceed server's maxWriteBatchSize size limit. + n_docs = client_context.max_write_batch_size + 100 + requests = [InsertOne[dict]({}) for _ in range(n_docs)] result = self.coll.bulk_write(requests, ordered=False) self.assertEqual(n_docs, result.inserted_count) self.assertEqual(n_docs, self.coll.count_documents({})) @@ -308,7 +308,6 @@ def test_numerous_inserts(self): self.assertEqual(n_docs, result.inserted_count) self.assertEqual(n_docs, self.coll.count_documents({})) - @client_context.require_version_min(3, 6) def test_bulk_max_message_size(self): self.coll.delete_many({}) self.addCleanup(self.coll.delete_many, {}) @@ -316,31 +315,29 @@ def test_bulk_max_message_size(self): # Generate a list of documents such that the first batched OP_MSG is # as close as possible to the 48MB limit. docs = [ - {'_id': 1, 'l': 's' * _16_MB}, - {'_id': 2, 'l': 's' * _16_MB}, - {'_id': 3, 'l': 's' * (_16_MB - 10000)}, + {"_id": 1, "l": "s" * _16_MB}, + {"_id": 2, "l": "s" * _16_MB}, + {"_id": 3, "l": "s" * (_16_MB - 10000)}, ] # Fill in the remaining ~10000 bytes with small documents. for i in range(4, 10000): - docs.append({'_id': i}) + docs.append({"_id": i}) result = self.coll.insert_many(docs) self.assertEqual(len(docs), len(result.inserted_ids)) def test_generator_insert(self): def gen(): - yield {'a': 1, 'b': 1} - yield {'a': 1, 'b': 2} - yield {'a': 2, 'b': 3} - yield {'a': 3, 'b': 5} - yield {'a': 5, 'b': 8} + yield {"a": 1, "b": 1} + yield {"a": 1, "b": 2} + yield {"a": 2, "b": 3} + yield {"a": 3, "b": 5} + yield {"a": 5, "b": 8} result = self.coll.insert_many(gen()) self.assertEqual(5, len(result.inserted_ids)) def test_bulk_write_no_results(self): - - coll = self.coll.with_options(write_concern=WriteConcern(w=0)) - result = coll.bulk_write([InsertOne({})]) + result = self.coll_w0.bulk_write([InsertOne({})]) self.assertFalse(result.acknowledged) self.assertRaises(InvalidOperation, lambda: result.inserted_count) self.assertRaises(InvalidOperation, lambda: result.matched_count) @@ -351,68 +348,749 @@ def test_bulk_write_no_results(self): def test_bulk_write_invalid_arguments(self): # The requests argument must be a list. - generator = (InsertOne({}) for _ in range(10)) + generator = (InsertOne[dict]({}) for _ in range(10)) with self.assertRaises(TypeError): - self.coll.bulk_write(generator) + self.coll.bulk_write(generator) # type: ignore[arg-type] # Document is not wrapped in a bulk write operation. with self.assertRaises(TypeError): - self.coll.bulk_write([{}]) + self.coll.bulk_write([{}]) # type: ignore[list-item] + + def test_upsert_large(self): + big = "a" * (client_context.max_bson_size - 37) + result = self.coll.bulk_write([UpdateOne({"x": 1}, {"$set": {"s": big}}, upsert=True)]) + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 1, + "nInserted": 0, + "nRemoved": 0, + "upserted": [{"index": 0, "_id": "..."}], + }, + result.bulk_api_result, + ) + + self.assertEqual(1, self.coll.count_documents({"x": 1})) + + def test_client_generated_upsert_id(self): + result = self.coll.bulk_write( + [ + UpdateOne({"_id": 0}, {"$set": {"a": 0}}, upsert=True), + ReplaceOne({"a": 1}, {"_id": 1}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({"_id": 2}, {"_id": 2}, upsert=True), + ] + ) + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 3, + "nInserted": 0, + "nRemoved": 0, + "upserted": [ + {"index": 0, "_id": 0}, + {"index": 1, "_id": 1}, + {"index": 2, "_id": 2}, + ], + }, + result.bulk_api_result, + ) + + def test_upsert_uuid_standard(self): + options = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + coll = self.coll.with_options(codec_options=options) + uuids = [uuid.uuid4() for _ in range(3)] + result = coll.bulk_write( + [ + UpdateOne({"_id": uuids[0]}, {"$set": {"a": 0}}, upsert=True), + ReplaceOne({"a": 1}, {"_id": uuids[1]}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({"_id": uuids[2]}, {"_id": uuids[2]}, upsert=True), + ] + ) + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 3, + "nInserted": 0, + "nRemoved": 0, + "upserted": [ + {"index": 0, "_id": uuids[0]}, + {"index": 1, "_id": uuids[1]}, + {"index": 2, "_id": uuids[2]}, + ], + }, + result.bulk_api_result, + ) + + def test_upsert_uuid_unspecified(self): + options = CodecOptions(uuid_representation=UuidRepresentation.UNSPECIFIED) + coll = self.coll.with_options(codec_options=options) + uuids = [Binary.from_uuid(uuid.uuid4()) for _ in range(3)] + result = coll.bulk_write( + [ + UpdateOne({"_id": uuids[0]}, {"$set": {"a": 0}}, upsert=True), + ReplaceOne({"a": 1}, {"_id": uuids[1]}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({"_id": uuids[2]}, {"_id": uuids[2]}, upsert=True), + ] + ) + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 3, + "nInserted": 0, + "nRemoved": 0, + "upserted": [ + {"index": 0, "_id": uuids[0]}, + {"index": 1, "_id": uuids[1]}, + {"index": 2, "_id": uuids[2]}, + ], + }, + result.bulk_api_result, + ) + + def test_upsert_uuid_standard_subdocuments(self): + options = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + coll = self.coll.with_options(codec_options=options) + ids: list = [{"f": Binary(bytes(i)), "f2": uuid.uuid4()} for i in range(3)] + + result = coll.bulk_write( + [ + UpdateOne({"_id": ids[0]}, {"$set": {"a": 0}}, upsert=True), + ReplaceOne({"a": 1}, {"_id": ids[1]}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({"_id": ids[2]}, {"_id": ids[2]}, upsert=True), + ] + ) + + # The `Binary` values are returned as `bytes` objects. + for _id in ids: + _id["f"] = bytes(_id["f"]) + + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 3, + "nInserted": 0, + "nRemoved": 0, + "upserted": [ + {"index": 0, "_id": ids[0]}, + {"index": 1, "_id": ids[1]}, + {"index": 2, "_id": ids[2]}, + ], + }, + result.bulk_api_result, + ) + + def test_single_ordered_batch(self): + result = self.coll.bulk_write( + [ + InsertOne({"a": 1}), + UpdateOne({"a": 1}, {"$set": {"b": 1}}), + UpdateOne({"a": 2}, {"$set": {"b": 2}}, upsert=True), + InsertOne({"a": 3}), + DeleteOne({"a": 3}), + ] + ) + self.assertEqualResponse( + { + "nMatched": 1, + "nModified": 1, + "nUpserted": 1, + "nInserted": 2, + "nRemoved": 1, + "upserted": [{"index": 2, "_id": "..."}], + }, + result.bulk_api_result, + ) + + def test_single_error_ordered_batch(self): + self.coll.create_index("a", unique=True) + self.addCleanup(self.coll.drop_index, [("a", 1)]) + requests: list = [ + InsertOne({"b": 1, "a": 1}), + UpdateOne({"b": 2}, {"$set": {"a": 1}}, upsert=True), + InsertOne({"b": 3, "a": 2}), + ] + try: + self.coll.bulk_write(requests) + except BulkWriteError as exc: + result = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 1, + "nRemoved": 0, + "upserted": [], + "writeConcernErrors": [], + "writeErrors": [ + { + "index": 1, + "code": 11000, + "errmsg": "...", + "op": { + "q": {"b": 2}, + "u": {"$set": {"a": 1}}, + "multi": False, + "upsert": True, + }, + } + ], + }, + result, + ) + + def test_multiple_error_ordered_batch(self): + self.coll.create_index("a", unique=True) + self.addCleanup(self.coll.drop_index, [("a", 1)]) + requests: list = [ + InsertOne({"b": 1, "a": 1}), + UpdateOne({"b": 2}, {"$set": {"a": 1}}, upsert=True), + UpdateOne({"b": 3}, {"$set": {"a": 2}}, upsert=True), + UpdateOne({"b": 2}, {"$set": {"a": 1}}, upsert=True), + InsertOne({"b": 4, "a": 3}), + InsertOne({"b": 5, "a": 1}), + ] + try: + self.coll.bulk_write(requests) + except BulkWriteError as exc: + result = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 1, + "nRemoved": 0, + "upserted": [], + "writeConcernErrors": [], + "writeErrors": [ + { + "index": 1, + "code": 11000, + "errmsg": "...", + "op": { + "q": {"b": 2}, + "u": {"$set": {"a": 1}}, + "multi": False, + "upsert": True, + }, + } + ], + }, + result, + ) + + def test_single_unordered_batch(self): + requests: list = [ + InsertOne({"a": 1}), + UpdateOne({"a": 1}, {"$set": {"b": 1}}), + UpdateOne({"a": 2}, {"$set": {"b": 2}}, upsert=True), + InsertOne({"a": 3}), + DeleteOne({"a": 3}), + ] + result = self.coll.bulk_write(requests, ordered=False) + self.assertEqualResponse( + { + "nMatched": 1, + "nModified": 1, + "nUpserted": 1, + "nInserted": 2, + "nRemoved": 1, + "upserted": [{"index": 2, "_id": "..."}], + "writeErrors": [], + "writeConcernErrors": [], + }, + result.bulk_api_result, + ) + + def test_single_error_unordered_batch(self): + self.coll.create_index("a", unique=True) + self.addCleanup(self.coll.drop_index, [("a", 1)]) + requests: list = [ + InsertOne({"b": 1, "a": 1}), + UpdateOne({"b": 2}, {"$set": {"a": 1}}, upsert=True), + InsertOne({"b": 3, "a": 2}), + ] -class BulkAuthorizationTestBase(BulkTestBase): + try: + self.coll.bulk_write(requests, ordered=False) + except BulkWriteError as exc: + result = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 2, + "nRemoved": 0, + "upserted": [], + "writeConcernErrors": [], + "writeErrors": [ + { + "index": 1, + "code": 11000, + "errmsg": "...", + "op": { + "q": {"b": 2}, + "u": {"$set": {"a": 1}}, + "multi": False, + "upsert": True, + }, + } + ], + }, + result, + ) + + def test_multiple_error_unordered_batch(self): + self.coll.create_index("a", unique=True) + self.addCleanup(self.coll.drop_index, [("a", 1)]) + requests: list = [ + InsertOne({"b": 1, "a": 1}), + UpdateOne({"b": 2}, {"$set": {"a": 3}}, upsert=True), + UpdateOne({"b": 3}, {"$set": {"a": 4}}, upsert=True), + UpdateOne({"b": 4}, {"$set": {"a": 3}}, upsert=True), + InsertOne({"b": 5, "a": 2}), + InsertOne({"b": 6, "a": 1}), + ] + try: + self.coll.bulk_write(requests, ordered=False) + except BulkWriteError as exc: + result = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + # Assume the update at index 1 runs before the update at index 3, + # although the spec does not require it. Same for inserts. + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 2, + "nInserted": 2, + "nRemoved": 0, + "upserted": [{"index": 1, "_id": "..."}, {"index": 2, "_id": "..."}], + "writeConcernErrors": [], + "writeErrors": [ + { + "index": 3, + "code": 11000, + "errmsg": "...", + "op": { + "q": {"b": 4}, + "u": {"$set": {"a": 3}}, + "multi": False, + "upsert": True, + }, + }, + { + "index": 5, + "code": 11000, + "errmsg": "...", + "op": {"_id": "...", "b": 6, "a": 1}, + }, + ], + }, + result, + ) + + def test_large_inserts_ordered(self): + big = "x" * client_context.max_bson_size + requests = [ + InsertOne({"b": 1, "a": 1}), + InsertOne({"big": big}), + InsertOne({"b": 2, "a": 2}), + ] + + try: + self.coll.bulk_write(requests) + except BulkWriteError as exc: + result = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqual(1, result["nInserted"]) + + self.coll.delete_many({}) + + big = "x" * (1024 * 1024 * 4) + write_result = self.coll.bulk_write( + [ + InsertOne({"a": 1, "big": big}), + InsertOne({"a": 2, "big": big}), + InsertOne({"a": 3, "big": big}), + InsertOne({"a": 4, "big": big}), + InsertOne({"a": 5, "big": big}), + InsertOne({"a": 6, "big": big}), + ] + ) + + self.assertEqual(6, write_result.inserted_count) + self.assertEqual(6, self.coll.count_documents({})) + + def test_large_inserts_unordered(self): + big = "x" * client_context.max_bson_size + requests = [ + InsertOne({"b": 1, "a": 1}), + InsertOne({"big": big}), + InsertOne({"b": 2, "a": 2}), + ] + + try: + self.coll.bulk_write(requests, ordered=False) + except BulkWriteError as exc: + details = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqual(2, details["nInserted"]) + + self.coll.delete_many({}) + + big = "x" * (1024 * 1024 * 4) + result = self.coll.bulk_write( + [ + InsertOne({"a": 1, "big": big}), + InsertOne({"a": 2, "big": big}), + InsertOne({"a": 3, "big": big}), + InsertOne({"a": 4, "big": big}), + InsertOne({"a": 5, "big": big}), + InsertOne({"a": 6, "big": big}), + ], + ordered=False, + ) + + self.assertEqual(6, result.inserted_count) + self.assertEqual(6, self.coll.count_documents({})) + + +class BulkAuthorizationTestBase(BulkTestBase): @classmethod @client_context.require_auth + @client_context.require_no_api_version def setUpClass(cls): - super(BulkAuthorizationTestBase, cls).setUpClass() + super().setUpClass() def setUp(self): - super(BulkAuthorizationTestBase, self).setUp() - client_context.create_user( - self.db.name, 'readonly', 'pw', ['read']) + super().setUp() + client_context.create_user(self.db.name, "readonly", "pw", ["read"]) self.db.command( - 'createRole', 'noremove', - privileges=[{ - 'actions': ['insert', 'update', 'find'], - 'resource': {'db': 'pymongo_test', 'collection': 'test'} - }], - roles=[]) - - client_context.create_user(self.db.name, 'noremove', 'pw', ['noremove']) + "createRole", + "noremove", + privileges=[ + { + "actions": ["insert", "update", "find"], + "resource": {"db": "pymongo_test", "collection": "test"}, + } + ], + roles=[], + ) + + client_context.create_user(self.db.name, "noremove", "pw", ["noremove"]) def tearDown(self): - self.db.command('dropRole', 'noremove') + self.db.command("dropRole", "noremove") remove_all_users(self.db) -class TestBulkAuthorization(BulkAuthorizationTestBase): +class TestBulkUnacknowledged(BulkTestBase): + def tearDown(self): + self.coll.delete_many({}) + def test_no_results_ordered_success(self): + requests: list = [ + InsertOne({"a": 1}), + UpdateOne({"a": 3}, {"$set": {"b": 1}}, upsert=True), + InsertOne({"a": 2}), + DeleteOne({"a": 1}), + ] + result = self.coll_w0.bulk_write(requests) + self.assertFalse(result.acknowledged) + wait_until(lambda: self.coll.count_documents({}) == 2, "insert 2 documents") + wait_until(lambda: self.coll.find_one({"_id": 1}) is None, 'removed {"_id": 1}') + + def test_no_results_ordered_failure(self): + requests: list = [ + InsertOne({"_id": 1}), + UpdateOne({"_id": 3}, {"$set": {"b": 1}}, upsert=True), + InsertOne({"_id": 2}), + # Fails with duplicate key error. + InsertOne({"_id": 1}), + # Should not be executed since the batch is ordered. + DeleteOne({"_id": 1}), + ] + result = self.coll_w0.bulk_write(requests) + self.assertFalse(result.acknowledged) + wait_until(lambda: self.coll.count_documents({}) == 3, "insert 3 documents") + self.assertEqual({"_id": 1}, self.coll.find_one({"_id": 1})) + + def test_no_results_unordered_success(self): + requests: list = [ + InsertOne({"a": 1}), + UpdateOne({"a": 3}, {"$set": {"b": 1}}, upsert=True), + InsertOne({"a": 2}), + DeleteOne({"a": 1}), + ] + result = self.coll_w0.bulk_write(requests, ordered=False) + self.assertFalse(result.acknowledged) + wait_until(lambda: self.coll.count_documents({}) == 2, "insert 2 documents") + wait_until(lambda: self.coll.find_one({"_id": 1}) is None, 'removed {"_id": 1}') + + def test_no_results_unordered_failure(self): + requests: list = [ + InsertOne({"_id": 1}), + UpdateOne({"_id": 3}, {"$set": {"b": 1}}, upsert=True), + InsertOne({"_id": 2}), + # Fails with duplicate key error. + InsertOne({"_id": 1}), + # Should be executed since the batch is unordered. + DeleteOne({"_id": 1}), + ] + result = self.coll_w0.bulk_write(requests, ordered=False) + self.assertFalse(result.acknowledged) + wait_until(lambda: self.coll.count_documents({}) == 2, "insert 2 documents") + wait_until(lambda: self.coll.find_one({"_id": 1}) is None, 'removed {"_id": 1}') + + +class TestBulkAuthorization(BulkAuthorizationTestBase): def test_readonly(self): # We test that an authorization failure aborts the batch and is raised # as OperationFailure. - cli = rs_or_single_client_noauth(username='readonly', password='pw', - authSource='pymongo_test') + cli = rs_or_single_client_noauth( + username="readonly", password="pw", authSource="pymongo_test" + ) coll = cli.pymongo_test.test coll.find_one() - self.assertRaises(OperationFailure, coll.bulk_write, - [InsertOne({'x': 1})]) + self.assertRaises(OperationFailure, coll.bulk_write, [InsertOne({"x": 1})]) def test_no_remove(self): # We test that an authorization failure aborts the batch and is raised # as OperationFailure. - cli = rs_or_single_client_noauth(username='noremove', password='pw', - authSource='pymongo_test') + cli = rs_or_single_client_noauth( + username="noremove", password="pw", authSource="pymongo_test" + ) coll = cli.pymongo_test.test coll.find_one() requests = [ - InsertOne({'x': 1}), - ReplaceOne({'x': 2}, {'x': 2}, upsert=True), - DeleteMany({}), # Prohibited. - InsertOne({'x': 3}), # Never attempted. + InsertOne({"x": 1}), + ReplaceOne({"x": 2}, {"x": 2}, upsert=True), + DeleteMany({}), # Prohibited. + InsertOne({"x": 3}), # Never attempted. ] self.assertRaises(OperationFailure, coll.bulk_write, requests) - self.assertEqual(set([1, 2]), set(self.coll.distinct('x'))) + self.assertEqual({1, 2}, set(self.coll.distinct("x"))) + + +class TestBulkWriteConcern(BulkTestBase): + w: Optional[int] + secondary: MongoClient + + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.w = client_context.w + cls.secondary = None + if cls.w is not None and cls.w > 1: + for member in client_context.hello["hosts"]: + if member != client_context.hello["primary"]: + cls.secondary = single_client(*partition_node(member)) + break + + @classmethod + def tearDownClass(cls): + if cls.secondary: + cls.secondary.close() + + def cause_wtimeout(self, requests, ordered): + if not client_context.test_commands_enabled: + self.skipTest("Test commands must be enabled.") + + # Use the rsSyncApplyStop failpoint to pause replication on a + # secondary which will cause a wtimeout error. + self.secondary.admin.command("configureFailPoint", "rsSyncApplyStop", mode="alwaysOn") + + try: + coll = self.coll.with_options(write_concern=WriteConcern(w=self.w, wtimeout=1)) + return coll.bulk_write(requests, ordered=ordered) + finally: + self.secondary.admin.command("configureFailPoint", "rsSyncApplyStop", mode="off") + + @client_context.require_replica_set + @client_context.require_secondaries_count(1) + def test_write_concern_failure_ordered(self): + # Ensure we don't raise on wnote. + coll_ww = self.coll.with_options(write_concern=WriteConcern(w=self.w)) + result = coll_ww.bulk_write([DeleteOne({"something": "that does no exist"})]) + self.assertTrue(result.acknowledged) + + requests = [InsertOne({"a": 1}), InsertOne({"a": 2})] + # Replication wtimeout is a 'soft' error. + # It shouldn't stop batch processing. + try: + self.cause_wtimeout(requests, ordered=True) + except BulkWriteError as exc: + details = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 2, + "nRemoved": 0, + "upserted": [], + "writeErrors": [], + }, + details, + ) + + # When talking to legacy servers there will be a + # write concern error for each operation. + self.assertTrue(len(details["writeConcernErrors"]) > 0) + + failed = details["writeConcernErrors"][0] + self.assertEqual(64, failed["code"]) + self.assertTrue(isinstance(failed["errmsg"], str)) + + self.coll.delete_many({}) + self.coll.create_index("a", unique=True) + self.addCleanup(self.coll.drop_index, [("a", 1)]) + + # Fail due to write concern support as well + # as duplicate key error on ordered batch. + requests = [ + InsertOne({"a": 1}), + ReplaceOne({"a": 3}, {"b": 1}, upsert=True), + InsertOne({"a": 1}), + InsertOne({"a": 2}), + ] + try: + self.cause_wtimeout(requests, ordered=True) + except BulkWriteError as exc: + details = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 1, + "nInserted": 1, + "nRemoved": 0, + "upserted": [{"index": 1, "_id": "..."}], + "writeErrors": [ + {"index": 2, "code": 11000, "errmsg": "...", "op": {"_id": "...", "a": 1}} + ], + }, + details, + ) + + self.assertTrue(len(details["writeConcernErrors"]) > 1) + failed = details["writeErrors"][0] + self.assertTrue("duplicate" in failed["errmsg"]) + + @client_context.require_replica_set + @client_context.require_secondaries_count(1) + def test_write_concern_failure_unordered(self): + # Ensure we don't raise on wnote. + coll_ww = self.coll.with_options(write_concern=WriteConcern(w=self.w)) + result = coll_ww.bulk_write([DeleteOne({"something": "that does no exist"})], ordered=False) + self.assertTrue(result.acknowledged) + + requests = [ + InsertOne({"a": 1}), + UpdateOne({"a": 3}, {"$set": {"a": 3, "b": 1}}, upsert=True), + InsertOne({"a": 2}), + ] + # Replication wtimeout is a 'soft' error. + # It shouldn't stop batch processing. + try: + self.cause_wtimeout(requests, ordered=False) + except BulkWriteError as exc: + details = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqual(2, details["nInserted"]) + self.assertEqual(1, details["nUpserted"]) + self.assertEqual(0, len(details["writeErrors"])) + # When talking to legacy servers there will be a + # write concern error for each operation. + self.assertTrue(len(details["writeConcernErrors"]) > 1) + + self.coll.delete_many({}) + self.coll.create_index("a", unique=True) + self.addCleanup(self.coll.drop_index, [("a", 1)]) + + # Fail due to write concern support as well + # as duplicate key error on unordered batch. + requests: list = [ + InsertOne({"a": 1}), + UpdateOne({"a": 3}, {"$set": {"a": 3, "b": 1}}, upsert=True), + InsertOne({"a": 1}), + InsertOne({"a": 2}), + ] + try: + self.cause_wtimeout(requests, ordered=False) + except BulkWriteError as exc: + details = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqual(2, details["nInserted"]) + self.assertEqual(1, details["nUpserted"]) + self.assertEqual(1, len(details["writeErrors"])) + # When talking to legacy servers there will be a + # write concern error for each operation. + self.assertTrue(len(details["writeConcernErrors"]) > 1) + + failed = details["writeErrors"][0] + self.assertEqual(2, failed["index"]) + self.assertEqual(11000, failed["code"]) + self.assertTrue(isinstance(failed["errmsg"], str)) + self.assertEqual(1, failed["op"]["a"]) + + failed = details["writeConcernErrors"][0] + self.assertEqual(64, failed["code"]) + self.assertTrue(isinstance(failed["errmsg"], str)) + + upserts = details["upserted"] + self.assertEqual(1, len(upserts)) + self.assertEqual(1, upserts[0]["index"]) + self.assertTrue(upserts[0].get("_id")) + if __name__ == "__main__": unittest.main() diff --git a/test/test_change_stream.py b/test/test_change_stream.py index 97b67fb69d..515ee436c1 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -13,44 +13,47 @@ # limitations under the License. """Test the change_stream module.""" +from __future__ import annotations -import random import os -import re -import sys +import random import string +import sys import threading import time import uuid - -from contextlib import contextmanager from itertools import product +from typing import no_type_check -sys.path[0:0] = [''] +sys.path[0:0] = [""] -from bson import ObjectId, SON, Timestamp, encode, json_util -from bson.binary import (ALL_UUID_REPRESENTATIONS, - Binary, - STANDARD, - PYTHON_LEGACY) -from bson.py3compat import iteritems -from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument +from test import IntegrationTest, Version, client_context, unittest +from test.unified_format import generate_test_classes +from test.utils import ( + AllowListEventListener, + EventListener, + rs_or_single_client, + wait_until, +) +from bson import SON, ObjectId, Timestamp, encode +from bson.binary import ALL_UUID_REPRESENTATIONS, PYTHON_LEGACY, STANDARD, Binary +from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument from pymongo import MongoClient -from pymongo.change_stream import _NON_RESUMABLE_GETMORE_ERRORS from pymongo.command_cursor import CommandCursor -from pymongo.errors import (InvalidOperation, OperationFailure, - ServerSelectionTimeoutError) +from pymongo.errors import ( + InvalidOperation, + OperationFailure, + ServerSelectionTimeoutError, +) from pymongo.message import _CursorAddress from pymongo.read_concern import ReadConcern from pymongo.write_concern import WriteConcern -from test import client_context, unittest, IntegrationTest -from test.utils import ( - EventListener, WhiteListEventListener, rs_or_single_client, wait_until) - class TestChangeStreamBase(IntegrationTest): + RUN_ON_LOAD_BALANCER = True + def change_stream_with_client(self, client, *args, **kwargs): """Create a change stream using the given client and return it.""" raise NotImplementedError @@ -60,8 +63,8 @@ def change_stream(self, *args, **kwargs): return self.change_stream_with_client(self.client, *args, **kwargs) def client_with_listener(self, *commands): - """Return a client with a WhiteListEventListener.""" - listener = WhiteListEventListener(*commands) + """Return a client with a AllowListEventListener.""" + listener = AllowListEventListener(*commands) client = rs_or_single_client(event_listeners=[listener]) self.addCleanup(client.close) return client, listener @@ -69,7 +72,7 @@ def client_with_listener(self, *commands): def watched_collection(self, *args, **kwargs): """Return a collection that is watched by self.change_stream().""" # Construct a unique collection for each test. - collname = '.'.join(self.id().rsplit('.', 2)[1:]) + collname = ".".join(self.id().rsplit(".", 2)[1:]) return self.db.get_collection(collname, *args, **kwargs) def generate_invalidate_event(self, change_stream): @@ -80,31 +83,30 @@ def generate_unique_collnames(self, numcolls): """Generate numcolls collection names unique to a test.""" collnames = [] for idx in range(1, numcolls + 1): - collnames.append(self.id() + '_' + str(idx)) + collnames.append(self.id() + "_" + str(idx)) return collnames def get_resume_token(self, invalidate=False): """Get a resume token to use for starting a change stream.""" # Ensure targeted collection exists before starting. - coll = self.watched_collection(write_concern=WriteConcern('majority')) + coll = self.watched_collection(write_concern=WriteConcern("majority")) coll.insert_one({}) if invalidate: - with self.change_stream( - [{'$match': {'operationType': 'invalidate'}}]) as cs: + with self.change_stream([{"$match": {"operationType": "invalidate"}}]) as cs: if isinstance(cs._target, MongoClient): - self.skipTest( - "cluster-level change streams cannot be invalidated") + self.skipTest("cluster-level change streams cannot be invalidated") self.generate_invalidate_event(cs) - return cs.next()['_id'] + return cs.next()["_id"] else: with self.change_stream() as cs: - coll.insert_one({'data': 1}) - return cs.next()['_id'] + coll.insert_one({"data": 1}) + return cs.next()["_id"] def get_start_at_operation_time(self): """Get an operationTime. Advances the operation clock beyond the most - recently returned timestamp.""" + recently returned timestamp. + """ optime = self.client.admin.command("ping")["operationTime"] return Timestamp(optime.time, optime.inc + 1) @@ -120,22 +122,22 @@ def kill_change_stream_cursor(self, change_stream): client._close_cursor_now(cursor.cursor_id, address) -class APITestsMixin(object): +class APITestsMixin: + @no_type_check def test_watch(self): with self.change_stream( - [{'$project': {'foo': 0}}], full_document='updateLookup', - max_await_time_ms=1000, batch_size=100) as change_stream: - self.assertEqual([{'$project': {'foo': 0}}], - change_stream._pipeline) - self.assertEqual('updateLookup', change_stream._full_document) - self.assertIsNone(change_stream.resume_token) + [{"$project": {"foo": 0}}], + full_document="updateLookup", + max_await_time_ms=1000, + batch_size=100, + ) as change_stream: + self.assertEqual([{"$project": {"foo": 0}}], change_stream._pipeline) + self.assertEqual("updateLookup", change_stream._full_document) self.assertEqual(1000, change_stream._max_await_time_ms) self.assertEqual(100, change_stream._batch_size) self.assertIsInstance(change_stream._cursor, CommandCursor) - self.assertEqual( - 1000, change_stream._cursor._CommandCursor__max_await_time_ms) - self.watched_collection( - write_concern=WriteConcern("majority")).insert_one({}) + self.assertEqual(1000, change_stream._cursor._CommandCursor__max_await_time_ms) + self.watched_collection(write_concern=WriteConcern("majority")).insert_one({}) _ = change_stream.next() resume_token = change_stream.resume_token with self.assertRaises(TypeError): @@ -146,158 +148,151 @@ def test_watch(self): with self.change_stream(resume_after=resume_token): pass + @no_type_check def test_try_next(self): # ChangeStreams only read majority committed data so use w:majority. - coll = self.watched_collection().with_options( - write_concern=WriteConcern("majority")) + coll = self.watched_collection().with_options(write_concern=WriteConcern("majority")) coll.drop() coll.insert_one({}) self.addCleanup(coll.drop) with self.change_stream(max_await_time_ms=250) as stream: - self.assertIsNone(stream.try_next()) # No changes initially. - coll.insert_one({}) # Generate a change. + self.assertIsNone(stream.try_next()) # No changes initially. + coll.insert_one({}) # Generate a change. # On sharded clusters, even majority-committed changes only show # up once an event that sorts after it shows up on the other # shard. So, we wait on try_next to eventually return changes. - wait_until(lambda: stream.try_next() is not None, - "get change from try_next") + wait_until(lambda: stream.try_next() is not None, "get change from try_next") + @no_type_check def test_try_next_runs_one_getmore(self): listener = EventListener() client = rs_or_single_client(event_listeners=[listener]) # Connect to the cluster. - client.admin.command('ping') - listener.results.clear() + client.admin.command("ping") + listener.reset() # ChangeStreams only read majority committed data so use w:majority. - coll = self.watched_collection().with_options( - write_concern=WriteConcern("majority")) + coll = self.watched_collection().with_options(write_concern=WriteConcern("majority")) coll.drop() # Create the watched collection before starting the change stream to # skip any "create" events. - coll.insert_one({'_id': 1}) + coll.insert_one({"_id": 1}) self.addCleanup(coll.drop) - with self.change_stream_with_client( - client, max_await_time_ms=250) as stream: + with self.change_stream_with_client(client, max_await_time_ms=250) as stream: self.assertEqual(listener.started_command_names(), ["aggregate"]) - listener.results.clear() + listener.reset() # Confirm that only a single getMore is run even when no documents # are returned. self.assertIsNone(stream.try_next()) self.assertEqual(listener.started_command_names(), ["getMore"]) - listener.results.clear() + listener.reset() self.assertIsNone(stream.try_next()) self.assertEqual(listener.started_command_names(), ["getMore"]) - listener.results.clear() + listener.reset() # Get at least one change before resuming. - coll.insert_one({'_id': 2}) - wait_until(lambda: stream.try_next() is not None, - "get change from try_next") - listener.results.clear() + coll.insert_one({"_id": 2}) + wait_until(lambda: stream.try_next() is not None, "get change from try_next") + listener.reset() # Cause the next request to initiate the resume process. self.kill_change_stream_cursor(stream) - listener.results.clear() + listener.reset() # The sequence should be: # - getMore, fail # - resume with aggregate command # - no results, return immediately without another getMore self.assertIsNone(stream.try_next()) - self.assertEqual( - listener.started_command_names(), ["getMore", "aggregate"]) - listener.results.clear() + self.assertEqual(listener.started_command_names(), ["getMore", "aggregate"]) + listener.reset() # Stream still works after a resume. - coll.insert_one({'_id': 3}) - wait_until(lambda: stream.try_next() is not None, - "get change from try_next") - self.assertEqual(set(listener.started_command_names()), - set(["getMore"])) + coll.insert_one({"_id": 3}) + wait_until(lambda: stream.try_next() is not None, "get change from try_next") + self.assertEqual(set(listener.started_command_names()), {"getMore"}) self.assertIsNone(stream.try_next()) + @no_type_check def test_batch_size_is_honored(self): listener = EventListener() client = rs_or_single_client(event_listeners=[listener]) # Connect to the cluster. - client.admin.command('ping') - listener.results.clear() + client.admin.command("ping") + listener.reset() # ChangeStreams only read majority committed data so use w:majority. - coll = self.watched_collection().with_options( - write_concern=WriteConcern("majority")) + coll = self.watched_collection().with_options(write_concern=WriteConcern("majority")) coll.drop() # Create the watched collection before starting the change stream to # skip any "create" events. - coll.insert_one({'_id': 1}) + coll.insert_one({"_id": 1}) self.addCleanup(coll.drop) # Expected batchSize. - expected = {'batchSize': 23} - with self.change_stream_with_client( - client, max_await_time_ms=250, batch_size=23) as stream: + expected = {"batchSize": 23} + with self.change_stream_with_client(client, max_await_time_ms=250, batch_size=23) as stream: # Confirm that batchSize is honored for initial batch. - cmd = listener.results['started'][0].command - self.assertEqual(cmd['cursor'], expected) - listener.results.clear() + cmd = listener.started_events[0].command + self.assertEqual(cmd["cursor"], expected) + listener.reset() # Confirm that batchSize is honored by getMores. self.assertIsNone(stream.try_next()) - cmd = listener.results['started'][0].command + cmd = listener.started_events[0].command key = next(iter(expected)) self.assertEqual(expected[key], cmd[key]) # $changeStream.startAtOperationTime was added in 4.0.0. + @no_type_check @client_context.require_version_min(4, 0, 0) def test_start_at_operation_time(self): optime = self.get_start_at_operation_time() - coll = self.watched_collection( - write_concern=WriteConcern("majority")) + coll = self.watched_collection(write_concern=WriteConcern("majority")) ndocs = 3 coll.insert_many([{"data": i} for i in range(ndocs)]) with self.change_stream(start_at_operation_time=optime) as cs: - for i in range(ndocs): + for _i in range(ndocs): cs.next() + @no_type_check def _test_full_pipeline(self, expected_cs_stage): client, listener = self.client_with_listener("aggregate") - results = listener.results - with self.change_stream_with_client( - client, [{'$project': {'foo': 0}}]) as _: + with self.change_stream_with_client(client, [{"$project": {"foo": 0}}]) as _: pass - self.assertEqual(1, len(results['started'])) - command = results['started'][0] - self.assertEqual('aggregate', command.command_name) - self.assertEqual([ - {'$changeStream': expected_cs_stage}, - {'$project': {'foo': 0}}], - command.command['pipeline']) + self.assertEqual(1, len(listener.started_events)) + command = listener.started_events[0] + self.assertEqual("aggregate", command.command_name) + self.assertEqual( + [{"$changeStream": expected_cs_stage}, {"$project": {"foo": 0}}], + command.command["pipeline"], + ) + @no_type_check def test_full_pipeline(self): """$changeStream must be the first stage in a change stream pipeline sent to the server. """ self._test_full_pipeline({}) + @no_type_check def test_iteration(self): with self.change_stream(batch_size=2) as change_stream: num_inserted = 10 - self.watched_collection().insert_many( - [{} for _ in range(num_inserted)]) + self.watched_collection().insert_many([{} for _ in range(num_inserted)]) inserts_received = 0 for change in change_stream: - self.assertEqual(change['operationType'], 'insert') + self.assertEqual(change["operationType"], "insert") inserts_received += 1 if inserts_received == num_inserted: break self._test_invalidate_stops_iteration(change_stream) + @no_type_check def _test_next_blocks(self, change_stream): - inserted_doc = {'_id': ObjectId()} + inserted_doc = {"_id": ObjectId()} changes = [] - t = threading.Thread( - target=lambda: changes.append(change_stream.next())) + t = threading.Thread(target=lambda: changes.append(change_stream.next())) t.start() # Sleep for a bit to prove that the call to next() blocks. time.sleep(1) @@ -309,28 +304,34 @@ def _test_next_blocks(self, change_stream): t.join(30) self.assertFalse(t.is_alive()) self.assertEqual(1, len(changes)) - self.assertEqual(changes[0]['operationType'], 'insert') - self.assertEqual(changes[0]['fullDocument'], inserted_doc) + self.assertEqual(changes[0]["operationType"], "insert") + self.assertEqual(changes[0]["fullDocument"], inserted_doc) + @no_type_check def test_next_blocks(self): """Test that next blocks until a change is readable""" # Use a short await time to speed up the test. with self.change_stream(max_await_time_ms=250) as change_stream: self._test_next_blocks(change_stream) + @no_type_check def test_aggregate_cursor_blocks(self): """Test that an aggregate cursor blocks until a change is readable.""" with self.watched_collection().aggregate( - [{'$changeStream': {}}], maxAwaitTimeMS=250) as change_stream: + [{"$changeStream": {}}], maxAwaitTimeMS=250 + ) as change_stream: self._test_next_blocks(change_stream) + @no_type_check def test_concurrent_close(self): """Ensure a ChangeStream can be closed from another thread.""" # Use a short await time to speed up the test. with self.change_stream(max_await_time_ms=250) as change_stream: + def iterate_cursor(): for _ in change_stream: pass + t = threading.Thread(target=iterate_cursor) t.start() self.watched_collection().insert_one({}) @@ -339,56 +340,62 @@ def iterate_cursor(): t.join(3) self.assertFalse(t.is_alive()) + @no_type_check def test_unknown_full_document(self): - """Must rely on the server to raise an error on unknown fullDocument. - """ + """Must rely on the server to raise an error on unknown fullDocument.""" try: - with self.change_stream(full_document='notValidatedByPyMongo'): + with self.change_stream(full_document="notValidatedByPyMongo"): pass except OperationFailure: pass + @no_type_check def test_change_operations(self): """Test each operation type.""" - expected_ns = {'db': self.watched_collection().database.name, - 'coll': self.watched_collection().name} + expected_ns = { + "db": self.watched_collection().database.name, + "coll": self.watched_collection().name, + } with self.change_stream() as change_stream: # Insert. - inserted_doc = {'_id': ObjectId(), 'foo': 'bar'} + inserted_doc = {"_id": ObjectId(), "foo": "bar"} self.watched_collection().insert_one(inserted_doc) change = change_stream.next() - self.assertTrue(change['_id']) - self.assertEqual(change['operationType'], 'insert') - self.assertEqual(change['ns'], expected_ns) - self.assertEqual(change['fullDocument'], inserted_doc) + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["ns"], expected_ns) + self.assertEqual(change["fullDocument"], inserted_doc) # Update. - update_spec = {'$set': {'new': 1}, '$unset': {'foo': 1}} + update_spec = {"$set": {"new": 1}, "$unset": {"foo": 1}} self.watched_collection().update_one(inserted_doc, update_spec) change = change_stream.next() - self.assertTrue(change['_id']) - self.assertEqual(change['operationType'], 'update') - self.assertEqual(change['ns'], expected_ns) - self.assertNotIn('fullDocument', change) - self.assertEqual({'updatedFields': {'new': 1}, - 'removedFields': ['foo']}, - change['updateDescription']) + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "update") + self.assertEqual(change["ns"], expected_ns) + self.assertNotIn("fullDocument", change) + + expected_update_description = {"updatedFields": {"new": 1}, "removedFields": ["foo"]} + if client_context.version.at_least(4, 5, 0): + expected_update_description["truncatedArrays"] = [] + self.assertEqual(expected_update_description, change["updateDescription"]) # Replace. - self.watched_collection().replace_one({'new': 1}, {'foo': 'bar'}) + self.watched_collection().replace_one({"new": 1}, {"foo": "bar"}) change = change_stream.next() - self.assertTrue(change['_id']) - self.assertEqual(change['operationType'], 'replace') - self.assertEqual(change['ns'], expected_ns) - self.assertEqual(change['fullDocument'], inserted_doc) + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "replace") + self.assertEqual(change["ns"], expected_ns) + self.assertEqual(change["fullDocument"], inserted_doc) # Delete. - self.watched_collection().delete_one({'foo': 'bar'}) + self.watched_collection().delete_one({"foo": "bar"}) change = change_stream.next() - self.assertTrue(change['_id']) - self.assertEqual(change['operationType'], 'delete') - self.assertEqual(change['ns'], expected_ns) - self.assertNotIn('fullDocument', change) + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "delete") + self.assertEqual(change["ns"], expected_ns) + self.assertNotIn("fullDocument", change) # Invalidate. self._test_get_invalidate_event(change_stream) + @no_type_check @client_context.require_version_min(4, 1, 1) def test_start_after(self): resume_token = self.get_resume_token(invalidate=True) @@ -399,91 +406,93 @@ def test_start_after(self): # start_after can resume after invalidate. with self.change_stream(start_after=resume_token) as change_stream: - self.watched_collection().insert_one({'_id': 2}) + self.watched_collection().insert_one({"_id": 2}) change = change_stream.next() - self.assertEqual(change['operationType'], 'insert') - self.assertEqual(change['fullDocument'], {'_id': 2}) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["fullDocument"], {"_id": 2}) + @no_type_check @client_context.require_version_min(4, 1, 1) def test_start_after_resume_process_with_changes(self): resume_token = self.get_resume_token(invalidate=True) - with self.change_stream(start_after=resume_token, - max_await_time_ms=250) as change_stream: - self.watched_collection().insert_one({'_id': 2}) + with self.change_stream(start_after=resume_token, max_await_time_ms=250) as change_stream: + self.watched_collection().insert_one({"_id": 2}) change = change_stream.next() - self.assertEqual(change['operationType'], 'insert') - self.assertEqual(change['fullDocument'], {'_id': 2}) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["fullDocument"], {"_id": 2}) self.assertIsNone(change_stream.try_next()) self.kill_change_stream_cursor(change_stream) - self.watched_collection().insert_one({'_id': 3}) + self.watched_collection().insert_one({"_id": 3}) change = change_stream.next() - self.assertEqual(change['operationType'], 'insert') - self.assertEqual(change['fullDocument'], {'_id': 3}) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["fullDocument"], {"_id": 3}) - @client_context.require_no_mongos # Remove after SERVER-41196 - @client_context.require_version_min(4, 1, 1) + @no_type_check + @client_context.require_version_min(4, 2) def test_start_after_resume_process_without_changes(self): resume_token = self.get_resume_token(invalidate=True) - with self.change_stream(start_after=resume_token, - max_await_time_ms=250) as change_stream: + with self.change_stream(start_after=resume_token, max_await_time_ms=250) as change_stream: self.assertIsNone(change_stream.try_next()) self.kill_change_stream_cursor(change_stream) - self.watched_collection().insert_one({'_id': 2}) + self.watched_collection().insert_one({"_id": 2}) change = change_stream.next() - self.assertEqual(change['operationType'], 'insert') - self.assertEqual(change['fullDocument'], {'_id': 2}) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["fullDocument"], {"_id": 2}) -class ProseSpecTestsMixin(object): +class ProseSpecTestsMixin: + @no_type_check def _client_with_listener(self, *commands): - listener = WhiteListEventListener(*commands) + listener = AllowListEventListener(*commands) client = rs_or_single_client(event_listeners=[listener]) self.addCleanup(client.close) return client, listener + @no_type_check def _populate_and_exhaust_change_stream(self, change_stream, batch_size=3): - self.watched_collection().insert_many( - [{"data": k} for k in range(batch_size)]) + self.watched_collection().insert_many([{"data": k} for k in range(batch_size)]) for _ in range(batch_size): change = next(change_stream) return change - def _get_expected_resume_token_legacy(self, stream, - listener, previous_change=None): + def _get_expected_resume_token_legacy(self, stream, listener, previous_change=None): """Predicts what the resume token should currently be for server versions that don't support postBatchResumeToken. Assumes the stream - has never returned any changes if previous_change is None.""" + has never returned any changes if previous_change is None. + """ if previous_change is None: - agg_cmd = listener.results['started'][0] + agg_cmd = listener.started_events[0] stage = agg_cmd.command["pipeline"][0]["$changeStream"] return stage.get("resumeAfter") or stage.get("startAfter") - return previous_change['_id'] + return previous_change["_id"] - def _get_expected_resume_token(self, stream, listener, - previous_change=None): + def _get_expected_resume_token(self, stream, listener, previous_change=None): """Predicts what the resume token should currently be for server versions that support postBatchResumeToken. Assumes the stream has never returned any changes if previous_change is None. Assumes - listener is a WhiteListEventListener that listens for aggregate and - getMore commands.""" + listener is a AllowListEventListener that listens for aggregate and + getMore commands. + """ if previous_change is None or stream._cursor._has_next(): - return self._get_expected_resume_token_legacy( - stream, listener, previous_change) + token = self._get_expected_resume_token_legacy(stream, listener, previous_change) + if token is not None: + return token - response = listener.results['succeeded'][-1].reply - return response['cursor']['postBatchResumeToken'] + response = listener.succeeded_events[-1].reply + return response["cursor"]["postBatchResumeToken"] + @no_type_check def _test_raises_error_on_missing_id(self, expected_exception): """ChangeStream will raise an exception if the server response is missing the resume token. """ - with self.change_stream([{'$project': {'_id': 0}}]) as change_stream: + with self.change_stream([{"$project": {"_id": 0}}]) as change_stream: self.watched_collection().insert_one({}) with self.assertRaises(expected_exception): next(change_stream) @@ -491,20 +500,21 @@ def _test_raises_error_on_missing_id(self, expected_exception): with self.assertRaises(StopIteration): next(change_stream) + @no_type_check def _test_update_resume_token(self, expected_rt_getter): """ChangeStream must continuously track the last seen resumeToken.""" client, listener = self._client_with_listener("aggregate", "getMore") - coll = self.watched_collection(write_concern=WriteConcern('majority')) + coll = self.watched_collection(write_concern=WriteConcern("majority")) with self.change_stream_with_client(client) as change_stream: self.assertEqual( - change_stream.resume_token, - expected_rt_getter(change_stream, listener)) + change_stream.resume_token, expected_rt_getter(change_stream, listener) + ) for _ in range(3): coll.insert_one({}) change = next(change_stream) self.assertEqual( - change_stream.resume_token, - expected_rt_getter(change_stream, listener, change)) + change_stream.resume_token, expected_rt_getter(change_stream, listener, change) + ) # Prose test no. 1 @client_context.require_version_min(4, 0, 7) @@ -529,19 +539,20 @@ def test_raises_error_on_missing_id_418minus(self): self._test_raises_error_on_missing_id(InvalidOperation) # Prose test no. 3 + @no_type_check def test_resume_on_error(self): with self.change_stream() as change_stream: - self.insert_one_and_check(change_stream, {'_id': 1}) + self.insert_one_and_check(change_stream, {"_id": 1}) # Cause a cursor not found error on the next getMore. self.kill_change_stream_cursor(change_stream) - self.insert_one_and_check(change_stream, {'_id': 2}) + self.insert_one_and_check(change_stream, {"_id": 2}) # Prose test no. 4 + @no_type_check @client_context.require_failCommand_fail_point def test_no_resume_attempt_if_aggregate_command_fails(self): # Set non-retryable error on aggregate command. - fail_point = {'mode': {'times': 1}, - 'data': {'errorCode': 2, 'failCommands': ['aggregate']}} + fail_point = {"mode": {"times": 1}, "data": {"errorCode": 2, "failCommands": ["aggregate"]}} client, listener = self._client_with_listener("aggregate", "getMore") with self.fail_point(fail_point): try: @@ -550,52 +561,17 @@ def test_no_resume_attempt_if_aggregate_command_fails(self): pass # Driver should have attempted aggregate command only once. - self.assertEqual(len(listener.results['started']), 1) - self.assertEqual(listener.results['started'][0].command_name, - 'aggregate') - - # Prose test no. 5 - def test_does_not_resume_fatal_errors(self): - """ChangeStream will not attempt to resume fatal server errors.""" - if client_context.supports_failCommand_fail_point: - # failCommand does not support returning no errorCode. - TEST_ERROR_CODES = _NON_RESUMABLE_GETMORE_ERRORS - {None} - @contextmanager - def generate_error(change_stream, code): - fail_point = {'mode': {'times': 1}, 'data': { - 'errorCode': code, 'failCommands': ['getMore']}} - with self.fail_point(fail_point): - yield - else: - TEST_ERROR_CODES = _NON_RESUMABLE_GETMORE_ERRORS - @contextmanager - def generate_error(change_stream, code): - def mock_try_next(*args, **kwargs): - change_stream._cursor.close() - raise OperationFailure('Mock server error', code=code) - - original_try_next = change_stream._cursor._try_next - change_stream._cursor._try_next = mock_try_next - try: - yield - finally: - change_stream._cursor._try_next = original_try_next - - for code in TEST_ERROR_CODES: - with self.change_stream() as change_stream: - self.watched_collection().insert_one({}) - with generate_error(change_stream, code): - with self.assertRaises(OperationFailure): - next(change_stream) - with self.assertRaises(StopIteration): - next(change_stream) + self.assertEqual(len(listener.started_events), 1) + self.assertEqual(listener.started_events[0].command_name, "aggregate") + # Prose test no. 5 - REMOVED # Prose test no. 6 - SKIPPED - # readPreference is not configurable using the watch() helpers so we can - # skip this test. Also, PyMongo performs server selection for each - # operation which ensure compliance with this prose test. + # Reason: readPreference is not configurable using the watch() helpers + # so we can skip this test. Also, PyMongo performs server selection for + # each operation which ensure compliance with this prose test. # Prose test no. 7 + @no_type_check def test_initial_empty_batch(self): with self.change_stream() as change_stream: # The first batch should be empty. @@ -607,18 +583,21 @@ def test_initial_empty_batch(self): self.assertEqual(cursor_id, change_stream._cursor.cursor_id) # Prose test no. 8 + @no_type_check def test_kill_cursors(self): def raise_error(): - raise ServerSelectionTimeoutError('mock error') + raise ServerSelectionTimeoutError("mock error") + with self.change_stream() as change_stream: - self.insert_one_and_check(change_stream, {'_id': 1}) + self.insert_one_and_check(change_stream, {"_id": 1}) # Cause a cursor not found error on the next getMore. cursor = change_stream._cursor self.kill_change_stream_cursor(change_stream) cursor.close = raise_error - self.insert_one_and_check(change_stream, {'_id': 2}) + self.insert_one_and_check(change_stream, {"_id": 2}) # Prose test no. 9 + @no_type_check @client_context.require_version_min(4, 0, 0) @client_context.require_version_max(4, 0, 7) def test_start_at_operation_time_caching(self): @@ -627,26 +606,27 @@ def test_start_at_operation_time_caching(self): with self.change_stream_with_client(client) as cs: self.kill_change_stream_cursor(cs) cs.try_next() - cmd = listener.results['started'][-1].command - self.assertIsNotNone(cmd["pipeline"][0]["$changeStream"].get( - "startAtOperationTime")) + cmd = listener.started_events[-1].command + self.assertIsNotNone(cmd["pipeline"][0]["$changeStream"].get("startAtOperationTime")) # Case 2: change stream started with startAtOperationTime - listener.results.clear() + listener.reset() optime = self.get_start_at_operation_time() - with self.change_stream_with_client( - client, start_at_operation_time=optime) as cs: + with self.change_stream_with_client(client, start_at_operation_time=optime) as cs: self.kill_change_stream_cursor(cs) cs.try_next() - cmd = listener.results['started'][-1].command - self.assertEqual(cmd["pipeline"][0]["$changeStream"].get( - "startAtOperationTime"), optime, str([k.command for k in - listener.results['started']])) + cmd = listener.started_events[-1].command + self.assertEqual( + cmd["pipeline"][0]["$changeStream"].get("startAtOperationTime"), + optime, + str([k.command for k in listener.started_events]), + ) # Prose test no. 10 - SKIPPED # This test is identical to prose test no. 3. # Prose test no. 11 + @no_type_check @client_context.require_version_min(4, 0, 7) def test_resumetoken_empty_batch(self): client, listener = self._client_with_listener("getMore") @@ -654,11 +634,11 @@ def test_resumetoken_empty_batch(self): self.assertIsNone(change_stream.try_next()) resume_token = change_stream.resume_token - response = listener.results['succeeded'][0].reply - self.assertEqual(resume_token, - response["cursor"]["postBatchResumeToken"]) + response = listener.succeeded_events[0].reply + self.assertEqual(resume_token, response["cursor"]["postBatchResumeToken"]) # Prose test no. 11 + @no_type_check @client_context.require_version_min(4, 0, 7) def test_resumetoken_exhausted_batch(self): client, listener = self._client_with_listener("getMore") @@ -666,11 +646,11 @@ def test_resumetoken_exhausted_batch(self): self._populate_and_exhaust_change_stream(change_stream) resume_token = change_stream.resume_token - response = listener.results['succeeded'][-1].reply - self.assertEqual(resume_token, - response["cursor"]["postBatchResumeToken"]) + response = listener.succeeded_events[-1].reply + self.assertEqual(resume_token, response["cursor"]["postBatchResumeToken"]) # Prose test no. 12 + @no_type_check @client_context.require_version_max(4, 0, 7) def test_resumetoken_empty_batch_legacy(self): resume_point = self.get_resume_token() @@ -687,13 +667,14 @@ def test_resumetoken_empty_batch_legacy(self): self.assertEqual(resume_token, resume_point) # Prose test no. 12 + @no_type_check @client_context.require_version_max(4, 0, 7) def test_resumetoken_exhausted_batch_legacy(self): # Resume token is _id of last change. with self.change_stream() as change_stream: change = self._populate_and_exhaust_change_stream(change_stream) self.assertEqual(change_stream.resume_token, change["_id"]) - resume_point = change['_id'] + resume_point = change["_id"] # Resume token is _id of last change even if resumeAfter is specified. with self.change_stream(resume_after=resume_point) as change_stream: @@ -701,103 +682,127 @@ def test_resumetoken_exhausted_batch_legacy(self): self.assertEqual(change_stream.resume_token, change["_id"]) # Prose test no. 13 + @no_type_check def test_resumetoken_partially_iterated_batch(self): # When batch has been iterated up to but not including the last element. # Resume token should be _id of previous change document. with self.change_stream() as change_stream: - self.watched_collection( - write_concern=WriteConcern('majority')).insert_many( - [{"data": k} for k in range(3)]) + self.watched_collection(write_concern=WriteConcern("majority")).insert_many( + [{"data": k} for k in range(3)] + ) for _ in range(2): change = next(change_stream) resume_token = change_stream.resume_token self.assertEqual(resume_token, change["_id"]) + @no_type_check def _test_resumetoken_uniterated_nonempty_batch(self, resume_option): # When the batch is not empty and hasn't been iterated at all. # Resume token should be same as the resume option used. resume_point = self.get_resume_token() # Insert some documents so that firstBatch isn't empty. - self.watched_collection( - write_concern=WriteConcern("majority")).insert_many( - [{'a': 1}, {'b': 2}, {'c': 3}]) + self.watched_collection(write_concern=WriteConcern("majority")).insert_many( + [{"a": 1}, {"b": 2}, {"c": 3}] + ) # Resume token should be same as the resume option. - with self.change_stream( - **{resume_option: resume_point}) as change_stream: + with self.change_stream(**{resume_option: resume_point}) as change_stream: self.assertTrue(change_stream._cursor._has_next()) resume_token = change_stream.resume_token self.assertEqual(resume_token, resume_point) # Prose test no. 14 + @no_type_check @client_context.require_no_mongos def test_resumetoken_uniterated_nonempty_batch_resumeafter(self): self._test_resumetoken_uniterated_nonempty_batch("resume_after") # Prose test no. 14 + @no_type_check @client_context.require_no_mongos @client_context.require_version_min(4, 1, 1) def test_resumetoken_uniterated_nonempty_batch_startafter(self): self._test_resumetoken_uniterated_nonempty_batch("start_after") # Prose test no. 17 + @no_type_check @client_context.require_version_min(4, 1, 1) def test_startafter_resume_uses_startafter_after_empty_getMore(self): # Resume should use startAfter after no changes have been returned. resume_point = self.get_resume_token() client, listener = self._client_with_listener("aggregate") - with self.change_stream_with_client( - client, start_after=resume_point) as change_stream: + with self.change_stream_with_client(client, start_after=resume_point) as change_stream: self.assertFalse(change_stream._cursor._has_next()) # No changes - change_stream.try_next() # No changes + change_stream.try_next() # No changes self.kill_change_stream_cursor(change_stream) - change_stream.try_next() # Resume attempt + change_stream.try_next() # Resume attempt - response = listener.results['started'][-1] - self.assertIsNone( - response.command["pipeline"][0]["$changeStream"].get("resumeAfter")) - self.assertIsNotNone( - response.command["pipeline"][0]["$changeStream"].get("startAfter")) + response = listener.started_events[-1] + self.assertIsNone(response.command["pipeline"][0]["$changeStream"].get("resumeAfter")) + self.assertIsNotNone(response.command["pipeline"][0]["$changeStream"].get("startAfter")) # Prose test no. 18 + @no_type_check @client_context.require_version_min(4, 1, 1) def test_startafter_resume_uses_resumeafter_after_nonempty_getMore(self): # Resume should use resumeAfter after some changes have been returned. resume_point = self.get_resume_token() client, listener = self._client_with_listener("aggregate") - with self.change_stream_with_client( - client, start_after=resume_point) as change_stream: + with self.change_stream_with_client(client, start_after=resume_point) as change_stream: self.assertFalse(change_stream._cursor._has_next()) # No changes self.watched_collection().insert_one({}) - next(change_stream) # Changes + next(change_stream) # Changes self.kill_change_stream_cursor(change_stream) - change_stream.try_next() # Resume attempt - - response = listener.results['started'][-1] - self.assertIsNotNone( - response.command["pipeline"][0]["$changeStream"].get("resumeAfter")) - self.assertIsNone( - response.command["pipeline"][0]["$changeStream"].get("startAfter")) + change_stream.try_next() # Resume attempt + + response = listener.started_events[-1] + self.assertIsNotNone(response.command["pipeline"][0]["$changeStream"].get("resumeAfter")) + self.assertIsNone(response.command["pipeline"][0]["$changeStream"].get("startAfter")) + + # Prose test no. 19 + @no_type_check + def test_split_large_change(self): + server_version = client_context.version + if not server_version.at_least(6, 0, 9): + self.skipTest("$changeStreamSplitLargeEvent requires MongoDB 6.0.9+") + if server_version.at_least(6, 1, 0) and server_version < Version(7, 0, 0): + self.skipTest("$changeStreamSplitLargeEvent is not available in 6.x rapid releases") + self.db.drop_collection("test_split_large_change") + coll = self.db.create_collection( + "test_split_large_change", changeStreamPreAndPostImages={"enabled": True} + ) + coll.insert_one({"_id": 1, "value": "q" * 10 * 1024 * 1024}) + with coll.watch( + [{"$changeStreamSplitLargeEvent": {}}], full_document_before_change="required" + ) as change_stream: + coll.update_one({"_id": 1}, {"$set": {"value": "z" * 10 * 1024 * 1024}}) + doc_1 = change_stream.next() + self.assertIn("splitEvent", doc_1) + self.assertEqual(doc_1["splitEvent"], {"fragment": 1, "of": 2}) + doc_2 = change_stream.next() + self.assertIn("splitEvent", doc_2) + self.assertEqual(doc_2["splitEvent"], {"fragment": 2, "of": 2}) class TestClusterChangeStream(TestChangeStreamBase, APITestsMixin): + dbs: list + @classmethod @client_context.require_version_min(4, 0, 0, -1) - @client_context.require_no_mmap - @client_context.require_no_standalone + @client_context.require_change_streams def setUpClass(cls): - super(TestClusterChangeStream, cls).setUpClass() + super().setUpClass() cls.dbs = [cls.db, cls.client.pymongo_test_2] @classmethod def tearDownClass(cls): for db in cls.dbs: cls.client.drop_database(db) - super(TestClusterChangeStream, cls).tearDownClass() + super().tearDownClass() def change_stream_with_client(self, client, *args, **kwargs): return client.watch(*args, **kwargs) @@ -817,10 +822,9 @@ def _insert_and_check(self, change_stream, db, collname, doc): coll = db[collname] coll.insert_one(doc) change = next(change_stream) - self.assertEqual(change['operationType'], 'insert') - self.assertEqual(change['ns'], {'db': db.name, - 'coll': collname}) - self.assertEqual(change['fullDocument'], doc) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["ns"], {"db": db.name, "coll": collname}) + self.assertEqual(change["fullDocument"], doc) def insert_one_and_check(self, change_stream, doc): db = random.choice(self.dbs) @@ -831,31 +835,28 @@ def test_simple(self): collnames = self.generate_unique_collnames(3) with self.change_stream() as change_stream: for db, collname in product(self.dbs, collnames): - self._insert_and_check( - change_stream, db, collname, {'_id': collname} - ) + self._insert_and_check(change_stream, db, collname, {"_id": collname}) def test_aggregate_cursor_blocks(self): """Test that an aggregate cursor blocks until a change is readable.""" with self.client.admin.aggregate( - [{'$changeStream': {'allChangesForCluster': True}}], - maxAwaitTimeMS=250) as change_stream: + [{"$changeStream": {"allChangesForCluster": True}}], maxAwaitTimeMS=250 + ) as change_stream: self._test_next_blocks(change_stream) def test_full_pipeline(self): """$changeStream must be the first stage in a change stream pipeline sent to the server. """ - self._test_full_pipeline({'allChangesForCluster': True}) + self._test_full_pipeline({"allChangesForCluster": True}) class TestDatabaseChangeStream(TestChangeStreamBase, APITestsMixin): @classmethod @client_context.require_version_min(4, 0, 0, -1) - @client_context.require_no_mmap - @client_context.require_no_standalone + @client_context.require_change_streams def setUpClass(cls): - super(TestDatabaseChangeStream, cls).setUpClass() + super().setUpClass() def change_stream_with_client(self, client, *args, **kwargs): return client[self.db.name].watch(*args, **kwargs) @@ -872,22 +873,22 @@ def _test_get_invalidate_event(self, change_stream): change = change_stream.next() # 4.1+ returns "drop" events for each collection in dropped database # and a "dropDatabase" event for the database itself. - if change['operationType'] == 'drop': - self.assertTrue(change['_id']) + if change["operationType"] == "drop": + self.assertTrue(change["_id"]) for _ in range(len(dropped_colls)): - ns = change['ns'] - self.assertEqual(ns['db'], change_stream._target.name) - self.assertIn(ns['coll'], dropped_colls) + ns = change["ns"] + self.assertEqual(ns["db"], change_stream._target.name) + self.assertIn(ns["coll"], dropped_colls) change = change_stream.next() - self.assertEqual(change['operationType'], 'dropDatabase') - self.assertTrue(change['_id']) - self.assertEqual(change['ns'], {'db': change_stream._target.name}) + self.assertEqual(change["operationType"], "dropDatabase") + self.assertTrue(change["_id"]) + self.assertEqual(change["ns"], {"db": change_stream._target.name}) # Get next change. change = change_stream.next() - self.assertTrue(change['_id']) - self.assertEqual(change['operationType'], 'invalidate') - self.assertNotIn('ns', change) - self.assertNotIn('fullDocument', change) + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "invalidate") + self.assertNotIn("ns", change) + self.assertNotIn("fullDocument", change) # The ChangeStream should be dead. with self.assertRaises(StopIteration): change_stream.next() @@ -897,10 +898,9 @@ def _test_invalidate_stops_iteration(self, change_stream): change_stream._client.drop_database(self.db.name) # Check drop and dropDatabase events. for change in change_stream: - self.assertIn(change['operationType'], ( - 'drop', 'dropDatabase', 'invalidate')) + self.assertIn(change["operationType"], ("drop", "dropDatabase", "invalidate")) # Last change must be invalidate. - self.assertEqual(change['operationType'], 'invalidate') + self.assertEqual(change["operationType"], "invalidate") # Change stream must not allow further iteration. with self.assertRaises(StopIteration): change_stream.next() @@ -911,10 +911,9 @@ def _insert_and_check(self, change_stream, collname, doc): coll = self.db[collname] coll.insert_one(doc) change = next(change_stream) - self.assertEqual(change['operationType'], 'insert') - self.assertEqual(change['ns'], {'db': self.db.name, - 'coll': collname}) - self.assertEqual(change['fullDocument'], doc) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["ns"], {"db": self.db.name, "coll": collname}) + self.assertEqual(change["fullDocument"], doc) def insert_one_and_check(self, change_stream, doc): self._insert_and_check(change_stream, self.id(), doc) @@ -924,29 +923,25 @@ def test_simple(self): with self.change_stream() as change_stream: for collname in collnames: self._insert_and_check( - change_stream, collname, {'_id': uuid.uuid4()}) + change_stream, collname, {"_id": Binary.from_uuid(uuid.uuid4())} + ) def test_isolation(self): # Ensure inserts to other dbs don't show up in our ChangeStream. other_db = self.client.pymongo_test_temp - self.assertNotEqual( - other_db, self.db, msg="Isolation must be tested on separate DBs") + self.assertNotEqual(other_db, self.db, msg="Isolation must be tested on separate DBs") collname = self.id() with self.change_stream() as change_stream: - other_db[collname].insert_one({'_id': uuid.uuid4()}) - self._insert_and_check( - change_stream, collname, {'_id': uuid.uuid4()}) + other_db[collname].insert_one({"_id": Binary.from_uuid(uuid.uuid4())}) + self._insert_and_check(change_stream, collname, {"_id": Binary.from_uuid(uuid.uuid4())}) self.client.drop_database(other_db) -class TestCollectionChangeStream(TestChangeStreamBase, APITestsMixin, - ProseSpecTestsMixin): +class TestCollectionChangeStream(TestChangeStreamBase, APITestsMixin, ProseSpecTestsMixin): @classmethod - @client_context.require_version_min(3, 5, 11) - @client_context.require_no_mmap - @client_context.require_no_standalone + @client_context.require_change_streams def setUpClass(cls): - super(TestCollectionChangeStream, cls).setUpClass() + super().setUpClass() def setUp(self): # Use a new collection for each test. @@ -954,8 +949,11 @@ def setUp(self): self.watched_collection().insert_one({}) def change_stream_with_client(self, client, *args, **kwargs): - return client[self.db.name].get_collection( - self.watched_collection().name).watch(*args, **kwargs) + return ( + client[self.db.name] + .get_collection(self.watched_collection().name) + .watch(*args, **kwargs) + ) def generate_invalidate_event(self, change_stream): # Dropping the collection invalidates the change stream. @@ -965,9 +963,9 @@ def _test_invalidate_stops_iteration(self, change_stream): self.generate_invalidate_event(change_stream) # Check drop and dropDatabase events. for change in change_stream: - self.assertIn(change['operationType'], ('drop', 'invalidate')) + self.assertIn(change["operationType"], ("drop", "invalidate")) # Last change must be invalidate. - self.assertEqual(change['operationType'], 'invalidate') + self.assertEqual(change["operationType"], "invalidate") # Change stream must not allow further iteration. with self.assertRaises(StopIteration): change_stream.next() @@ -979,17 +977,18 @@ def _test_get_invalidate_event(self, change_stream): change_stream._target.drop() change = change_stream.next() # 4.1+ returns a "drop" change document. - if change['operationType'] == 'drop': - self.assertTrue(change['_id']) - self.assertEqual(change['ns'], { - 'db': change_stream._target.database.name, - 'coll': change_stream._target.name}) + if change["operationType"] == "drop": + self.assertTrue(change["_id"]) + self.assertEqual( + change["ns"], + {"db": change_stream._target.database.name, "coll": change_stream._target.name}, + ) # Last change should be invalidate. change = change_stream.next() - self.assertTrue(change['_id']) - self.assertEqual(change['operationType'], 'invalidate') - self.assertNotIn('ns', change) - self.assertNotIn('fullDocument', change) + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "invalidate") + self.assertNotIn("ns", change) + self.assertNotIn("fullDocument", change) # The ChangeStream should be dead. with self.assertRaises(StopIteration): change_stream.next() @@ -997,38 +996,36 @@ def _test_get_invalidate_event(self, change_stream): def insert_one_and_check(self, change_stream, doc): self.watched_collection().insert_one(doc) change = next(change_stream) - self.assertEqual(change['operationType'], 'insert') + self.assertEqual(change["operationType"], "insert") self.assertEqual( - change['ns'], {'db': self.watched_collection().database.name, - 'coll': self.watched_collection().name}) - self.assertEqual(change['fullDocument'], doc) + change["ns"], + {"db": self.watched_collection().database.name, "coll": self.watched_collection().name}, + ) + self.assertEqual(change["fullDocument"], doc) def test_raw(self): """Test with RawBSONDocument.""" - raw_coll = self.watched_collection( - codec_options=DEFAULT_RAW_BSON_OPTIONS) + raw_coll = self.watched_collection(codec_options=DEFAULT_RAW_BSON_OPTIONS) with raw_coll.watch() as change_stream: - raw_doc = RawBSONDocument(encode({'_id': 1})) + raw_doc = RawBSONDocument(encode({"_id": 1})) self.watched_collection().insert_one(raw_doc) change = next(change_stream) self.assertIsInstance(change, RawBSONDocument) - self.assertEqual(change['operationType'], 'insert') - self.assertEqual( - change['ns']['db'], self.watched_collection().database.name) - self.assertEqual( - change['ns']['coll'], self.watched_collection().name) - self.assertEqual(change['fullDocument'], raw_doc) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["ns"]["db"], self.watched_collection().database.name) + self.assertEqual(change["ns"]["coll"], self.watched_collection().name) + self.assertEqual(change["fullDocument"], raw_doc) def test_uuid_representations(self): """Test with uuid document _ids and different uuid_representation.""" for uuid_representation in ALL_UUID_REPRESENTATIONS: for id_subtype in (STANDARD, PYTHON_LEGACY): options = self.watched_collection().codec_options.with_options( - uuid_representation=uuid_representation) + uuid_representation=uuid_representation + ) coll = self.watched_collection(codec_options=options) with coll.watch() as change_stream: - coll.insert_one( - {'_id': Binary(uuid.uuid4().bytes, id_subtype)}) + coll.insert_one({"_id": Binary(uuid.uuid4().bytes, id_subtype)}) _ = change_stream.next() resume_token = change_stream.resume_token @@ -1037,12 +1034,12 @@ def test_uuid_representations(self): def test_document_id_order(self): """Test with document _ids that need their order preserved.""" - random_keys = random.sample(string.ascii_letters, - len(string.ascii_letters)) - random_doc = {'_id': SON([(key, key) for key in random_keys])} + random_keys = random.sample(string.ascii_letters, len(string.ascii_letters)) + random_doc = {"_id": SON([(key, key) for key in random_keys])} for document_class in (dict, SON, RawBSONDocument): options = self.watched_collection().codec_options.with_options( - document_class=document_class) + document_class=document_class + ) coll = self.watched_collection(codec_options=options) with coll.watch() as change_stream: coll.insert_one(random_doc) @@ -1058,205 +1055,120 @@ def test_document_id_order(self): def test_read_concern(self): """Test readConcern is not validated by the driver.""" # Read concern 'local' is not allowed for $changeStream. - coll = self.watched_collection(read_concern=ReadConcern('local')) + coll = self.watched_collection(read_concern=ReadConcern("local")) with self.assertRaises(OperationFailure): coll.watch() # Does not error. - coll = self.watched_collection(read_concern=ReadConcern('majority')) + coll = self.watched_collection(read_concern=ReadConcern("majority")) with coll.watch(): pass -class TestAllScenarios(unittest.TestCase): +class TestAllLegacyScenarios(IntegrationTest): + RUN_ON_LOAD_BALANCER = True + listener: AllowListEventListener @classmethod @client_context.require_connection def setUpClass(cls): - cls.listener = WhiteListEventListener("aggregate") + super().setUpClass() + cls.listener = AllowListEventListener("aggregate", "getMore") cls.client = rs_or_single_client(event_listeners=[cls.listener]) @classmethod def tearDownClass(cls): cls.client.close() + super().tearDownClass() def setUp(self): - self.listener.results.clear() + super().setUp() + self.listener.reset() def setUpCluster(self, scenario_dict): assets = [ (scenario_dict["database_name"], scenario_dict["collection_name"]), - (scenario_dict["database2_name"], scenario_dict["collection2_name"]), + ( + scenario_dict.get("database2_name", "db2"), + scenario_dict.get("collection2_name", "coll2"), + ), ] for db, coll in assets: self.client.drop_database(db) self.client[db].create_collection(coll) + def setFailPoint(self, scenario_dict): + fail_point = scenario_dict.get("failPoint") + if fail_point is None: + return + elif not client_context.test_commands_enabled: + self.skipTest("Test commands must be enabled") + + fail_cmd = SON([("configureFailPoint", "failCommand")]) + fail_cmd.update(fail_point) + client_context.client.admin.command(fail_cmd) + self.addCleanup( + client_context.client.admin.command, + "configureFailPoint", + fail_cmd["configureFailPoint"], + mode="off", + ) + + def assert_list_contents_are_subset(self, superlist, sublist): + """Check that each element in sublist is a subset of the corresponding + element in superlist. + """ + self.assertEqual(len(superlist), len(sublist)) + for sup, sub in zip(superlist, sublist): + if isinstance(sub, dict): + self.assert_dict_is_subset(sup, sub) + continue + if isinstance(sub, (list, tuple)): + self.assert_list_contents_are_subset(sup, sub) + continue + self.assertEqual(sup, sub) + + def assert_dict_is_subset(self, superdict, subdict): + """Check that subdict is a subset of superdict.""" + exempt_fields = ["documentKey", "_id", "getMore"] + for key, value in subdict.items(): + if key not in superdict: + self.fail(f"Key {key} not found in {superdict}") + if isinstance(value, dict): + self.assert_dict_is_subset(superdict[key], value) + continue + if isinstance(value, (list, tuple)): + self.assert_list_contents_are_subset(superdict[key], value) + continue + if key in exempt_fields: + # Only check for presence of these exempt fields, but not value. + self.assertIn(key, superdict) + else: + self.assertEqual(superdict[key], value) + + def check_event(self, event, expectation_dict): + if event is None: + self.fail() + for key, value in expectation_dict.items(): + if isinstance(value, dict): + self.assert_dict_is_subset(getattr(event, key), value) + else: + self.assertEqual(getattr(event, key), value) + def tearDown(self): - self.listener.results.clear() + self.listener.reset() -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'change_streams' -) +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "change_streams") -def camel_to_snake(camel): - # Regex to convert CamelCase to snake_case. - snake = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camel) - return re.sub('([a-z0-9])([A-Z])', r'\1_\2', snake).lower() - - -def get_change_stream(client, scenario_def, test): - # Get target namespace on which to instantiate change stream - target = test["target"] - if target == "collection": - db = client.get_database(scenario_def["database_name"]) - cs_target = db.get_collection(scenario_def["collection_name"]) - elif target == "database": - cs_target = client.get_database(scenario_def["database_name"]) - elif target == "client": - cs_target = client - else: - raise ValueError("Invalid target in spec") - - # Construct change stream kwargs dict - cs_pipeline = test["changeStreamPipeline"] - options = test["changeStreamOptions"] - cs_options = {} - for key, value in iteritems(options): - cs_options[camel_to_snake(key)] = value - - # Create and return change stream - return cs_target.watch(pipeline=cs_pipeline, **cs_options) - - -def run_operation(client, operation): - # Apply specified operations - opname = camel_to_snake(operation["name"]) - arguments = operation.get("arguments", {}) - if opname == 'rename': - # Special case for rename operation. - arguments = {'new_name': arguments["to"]} - cmd = getattr(client.get_database( - operation["database"]).get_collection( - operation["collection"]), opname +globals().update( + generate_test_classes( + os.path.join(_TEST_PATH, "unified"), + module=__name__, ) - return cmd(**arguments) - - -def assert_dict_is_subset(superdict, subdict): - """Check that subdict is a subset of superdict.""" - exempt_fields = ["documentKey", "_id"] - for key, value in iteritems(subdict): - if key not in superdict: - assert False - if isinstance(value, dict): - assert_dict_is_subset(superdict[key], value) - continue - if key in exempt_fields: - superdict[key] = "42" - assert superdict[key] == value - - -def check_event(event, expectation_dict): - if event is None: - raise AssertionError - for key, value in iteritems(expectation_dict): - if isinstance(value, dict): - assert_dict_is_subset( - getattr(event, key), value - ) - else: - assert getattr(event, key) == value - +) -def create_test(scenario_def, test): - def run_scenario(self): - # Set up - self.setUpCluster(scenario_def) - is_error = test["result"].get("error", False) - try: - with get_change_stream( - self.client, scenario_def, test - ) as change_stream: - for operation in test["operations"]: - # Run specified operations - run_operation(self.client, operation) - num_expected_changes = len(test["result"].get("success", [])) - changes = [ - change_stream.next() for _ in range(num_expected_changes)] - # Run a next() to induce an error if one is expected and - # there are no changes. - if is_error and not changes: - change_stream.next() - - except OperationFailure as exc: - if not is_error: - raise - expected_code = test["result"]["error"]["code"] - self.assertEqual(exc.code, expected_code) - else: - # Check for expected output from change streams - for change, expected_changes in zip(changes, test["result"]["success"]): - assert_dict_is_subset(change, expected_changes) - self.assertEqual(len(changes), len(test["result"]["success"])) - - finally: - # Check for expected events - results = self.listener.results - for expectation in test.get("expectations", []): - for idx, (event_type, event_desc) in enumerate(iteritems(expectation)): - results_key = event_type.split("_")[1] - event = results[results_key][idx] if len(results[results_key]) > idx else None - check_event(event, event_desc) - - return run_scenario - - -def create_tests(): - for dirpath, _, filenames in os.walk(_TEST_PATH): - dirname = os.path.split(dirpath)[-1] - - for filename in filenames: - with open(os.path.join(dirpath, filename)) as scenario_stream: - scenario_def = json_util.loads(scenario_stream.read()) - - test_type = os.path.splitext(filename)[0] - - for test in scenario_def['tests']: - new_test = create_test(scenario_def, test) - new_test = client_context.require_no_mmap(new_test) - - if 'minServerVersion' in test: - min_ver = tuple( - int(elt) for - elt in test['minServerVersion'].split('.')) - new_test = client_context.require_version_min(*min_ver)( - new_test) - if 'maxServerVersion' in test: - max_ver = tuple( - int(elt) for - elt in test['maxServerVersion'].split('.')) - new_test = client_context.require_version_max(*max_ver)( - new_test) - - topologies = test['topology'] - new_test = client_context.require_cluster_type(topologies)( - new_test) - - test_name = 'test_%s_%s_%s' % ( - dirname, - test_type.replace("-", "_"), - str(test['description'].replace(" ", "_"))) - - new_test.__name__ = test_name - setattr(TestAllScenarios, new_test.__name__, new_test) - - -create_tests() - - -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/test_client.py b/test/test_client.py index 05e94b3d9b..8b2716a262 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -13,7 +13,9 @@ # limitations under the License. """Test the mongo_client module.""" +from __future__ import annotations +import _thread as thread import contextlib import copy import datetime @@ -22,98 +24,118 @@ import signal import socket import struct +import subprocess import sys -import time import threading -import warnings +import time +from typing import Iterable, Type, no_type_check +from unittest.mock import patch sys.path[0:0] = [""] +from test import ( + HAVE_IPADDRESS, + IntegrationTest, + MockClientTest, + SkipTest, + client_context, + client_knobs, + db_pwd, + db_user, + unittest, +) +from test.pymongo_mocks import MockClient +from test.utils import ( + NTHREADS, + CMAPListener, + FunctionCallRecorder, + assertRaisesExactly, + connected, + delay, + get_pool, + gevent_monkey_patched, + is_greenthread_patched, + lazy_client_trial, + one, + remove_all_users, + rs_client, + rs_or_single_client, + rs_or_single_client_noauth, + single_client, + wait_until, +) + +import pymongo from bson import encode -from bson.codec_options import CodecOptions, TypeEncoder, TypeRegistry -from bson.py3compat import thread +from bson.codec_options import ( + CodecOptions, + DatetimeConversion, + TypeEncoder, + TypeRegistry, +) from bson.son import SON from bson.tz_util import utc -import pymongo -from pymongo import auth, message -from pymongo.common import CONNECT_TIMEOUT, _UUID_REPRESENTATIONS +from pymongo import event_loggers, message, monitoring +from pymongo.client_options import ClientOptions from pymongo.command_cursor import CommandCursor +from pymongo.common import _UUID_REPRESENTATIONS, CONNECT_TIMEOUT from pymongo.compression_support import _HAVE_SNAPPY, _HAVE_ZSTD from pymongo.cursor import Cursor, CursorType from pymongo.database import Database -from pymongo.errors import (AutoReconnect, - ConfigurationError, - ConnectionFailure, - InvalidName, - InvalidURI, - NetworkTimeout, - OperationFailure, - WriteConcernError) -from pymongo.monitoring import (ServerHeartbeatListener, - ServerHeartbeatStartedEvent) -from pymongo.mongo_client import MongoClient -from pymongo.monotonic import time as monotonic_time from pymongo.driver_info import DriverInfo -from pymongo.pool import SocketInfo, _METADATA +from pymongo.errors import ( + AutoReconnect, + ConfigurationError, + ConnectionFailure, + InvalidName, + InvalidOperation, + InvalidURI, + NetworkTimeout, + OperationFailure, + ServerSelectionTimeoutError, + WriteConcernError, +) +from pymongo.mongo_client import MongoClient +from pymongo.monitoring import ServerHeartbeatListener, ServerHeartbeatStartedEvent +from pymongo.pool import _METADATA, Connection, PoolOptions from pymongo.read_preferences import ReadPreference -from pymongo.server_selectors import (any_server_selector, - writable_server_selector) +from pymongo.server_description import ServerDescription +from pymongo.server_selectors import readable_server_selector, writable_server_selector from pymongo.server_type import SERVER_TYPE +from pymongo.settings import TOPOLOGY_TYPE from pymongo.srv_resolver import _HAVE_DNSPYTHON +from pymongo.topology import _ErrorContext +from pymongo.topology_description import TopologyDescription from pymongo.write_concern import WriteConcern -from test import (client_context, - client_knobs, - SkipTest, - unittest, - IntegrationTest, - db_pwd, - db_user, - MockClientTest, - HAVE_IPADDRESS) -from test.pymongo_mocks import MockClient -from test.utils import (assertRaisesExactly, - connected, - delay, - FunctionCallRecorder, - get_pool, - gevent_monkey_patched, - ignore_deprecations, - is_greenthread_patched, - lazy_client_trial, - NTHREADS, - one, - remove_all_users, - rs_client, - rs_or_single_client, - rs_or_single_client_noauth, - server_is_master_with_slave, - single_client, - wait_until) class ClientUnitTest(unittest.TestCase): """MongoClient tests that don't require a server.""" + client: MongoClient + @classmethod - @client_context.require_connection def setUpClass(cls): - cls.client = rs_or_single_client(connect=False, - serverSelectionTimeoutMS=100) + cls.client = rs_or_single_client(connect=False, serverSelectionTimeoutMS=100) + + @classmethod + def tearDownClass(cls): + cls.client.close() def test_keyword_arg_defaults(self): - client = MongoClient(socketTimeoutMS=None, - connectTimeoutMS=20000, - waitQueueTimeoutMS=None, - waitQueueMultiple=None, - replicaSet=None, - read_preference=ReadPreference.PRIMARY, - ssl=False, - ssl_keyfile=None, - ssl_certfile=None, - ssl_cert_reqs=0, # ssl.CERT_NONE - ssl_ca_certs=None, - connect=False, - serverSelectionTimeoutMS=12000) + client = MongoClient( + socketTimeoutMS=None, + connectTimeoutMS=20000, + waitQueueTimeoutMS=None, + replicaSet=None, + read_preference=ReadPreference.PRIMARY, + ssl=False, + tlsCertificateKeyFile=None, + tlsAllowInvalidCertificates=True, + tlsCAFile=None, + connect=False, + serverSelectionTimeoutMS=12000, + ) options = client._MongoClient__options pool_opts = options.pool_options @@ -121,12 +143,26 @@ def test_keyword_arg_defaults(self): # socket.Socket.settimeout takes a float in seconds self.assertEqual(20.0, pool_opts.connect_timeout) self.assertEqual(None, pool_opts.wait_queue_timeout) - self.assertEqual(None, pool_opts.wait_queue_multiple) - self.assertTrue(pool_opts.socket_keepalive) - self.assertEqual(None, pool_opts.ssl_context) + self.assertEqual(None, pool_opts._ssl_context) self.assertEqual(None, options.replica_set_name) self.assertEqual(ReadPreference.PRIMARY, client.read_preference) - self.assertAlmostEqual(12, client.server_selection_timeout) + self.assertAlmostEqual(12, client.options.server_selection_timeout) + + def test_connect_timeout(self): + client = MongoClient(connect=False, connectTimeoutMS=None, socketTimeoutMS=None) + pool_opts = client._MongoClient__options.pool_options + self.assertEqual(None, pool_opts.socket_timeout) + self.assertEqual(None, pool_opts.connect_timeout) + client = MongoClient(connect=False, connectTimeoutMS=0, socketTimeoutMS=0) + pool_opts = client._MongoClient__options.pool_options + self.assertEqual(None, pool_opts.socket_timeout) + self.assertEqual(None, pool_opts.connect_timeout) + client = MongoClient( + "mongodb://localhost/?connectTimeoutMS=0&socketTimeoutMS=0", connect=False + ) + pool_opts = client._MongoClient__options.pool_options + self.assertEqual(None, pool_opts.socket_timeout) + self.assertEqual(None, pool_opts.connect_timeout) def test_types(self): self.assertRaises(TypeError, MongoClient, 1) @@ -138,8 +174,12 @@ def test_types(self): self.assertRaises(ConfigurationError, MongoClient, []) def test_max_pool_size_zero(self): - with self.assertRaises(ValueError): - MongoClient(maxPoolSize=0) + MongoClient(maxPoolSize=0) + + def test_uri_detection(self): + self.assertRaises(ConfigurationError, MongoClient, "/foo") + self.assertRaises(ConfigurationError, MongoClient, "://") + self.assertRaises(ConfigurationError, MongoClient, "foo/") def test_get_db(self): def make_db(base, name): @@ -159,15 +199,14 @@ def make_db(base, name): def test_get_database(self): codec_options = CodecOptions(tz_aware=True) write_concern = WriteConcern(w=2, j=True) - db = self.client.get_database( - 'foo', codec_options, ReadPreference.SECONDARY, write_concern) - self.assertEqual('foo', db.name) + db = self.client.get_database("foo", codec_options, ReadPreference.SECONDARY, write_concern) + self.assertEqual("foo", db.name) self.assertEqual(codec_options, db.codec_options) self.assertEqual(ReadPreference.SECONDARY, db.read_preference) self.assertEqual(write_concern, db.write_concern) def test_getattr(self): - self.assertTrue(isinstance(self.client['_does_not_exist'], Database)) + self.assertTrue(isinstance(self.client["_does_not_exist"], Database)) with self.assertRaises(AttributeError) as context: self.client._does_not_exist @@ -175,138 +214,160 @@ def test_getattr(self): # Message should be: # "AttributeError: MongoClient has no attribute '_does_not_exist'. To # access the _does_not_exist database, use client['_does_not_exist']". - self.assertIn("has no attribute '_does_not_exist'", - str(context.exception)) + self.assertIn("has no attribute '_does_not_exist'", str(context.exception)) def test_iteration(self): - def iterate(): - [a for a in self.client] - - self.assertRaises(TypeError, iterate) + client = self.client + if "PyPy" in sys.version and sys.version_info < (3, 8, 15): + msg = "'NoneType' object is not callable" + else: + msg = "'MongoClient' object is not iterable" + # Iteration fails + with self.assertRaisesRegex(TypeError, msg): + for _ in client: # type: ignore[misc] # error: "None" not callable [misc] + break + # Index fails + with self.assertRaises(TypeError): + _ = client[0] + # next fails + with self.assertRaisesRegex(TypeError, "'MongoClient' object is not iterable"): + _ = next(client) + # .next() fails + with self.assertRaisesRegex(TypeError, "'MongoClient' object is not iterable"): + _ = client.next() + # Do not implement typing.Iterable. + self.assertNotIsInstance(client, Iterable) def test_get_default_database(self): - c = rs_or_single_client("mongodb://%s:%d/foo" % (client_context.host, - client_context.port), - connect=False) - self.assertEqual(Database(c, 'foo'), c.get_default_database()) + c = rs_or_single_client( + "mongodb://%s:%d/foo" % (client_context.host, client_context.port), connect=False + ) + self.assertEqual(Database(c, "foo"), c.get_default_database()) # Test that default doesn't override the URI value. - self.assertEqual(Database(c, 'foo'), c.get_default_database('bar')) + self.assertEqual(Database(c, "foo"), c.get_default_database("bar")) codec_options = CodecOptions(tz_aware=True) write_concern = WriteConcern(w=2, j=True) - db = c.get_default_database( - None, codec_options, ReadPreference.SECONDARY, write_concern) - self.assertEqual('foo', db.name) + db = c.get_default_database(None, codec_options, ReadPreference.SECONDARY, write_concern) + self.assertEqual("foo", db.name) self.assertEqual(codec_options, db.codec_options) self.assertEqual(ReadPreference.SECONDARY, db.read_preference) self.assertEqual(write_concern, db.write_concern) - c = rs_or_single_client("mongodb://%s:%d/" % (client_context.host, - client_context.port), - connect=False) - self.assertEqual(Database(c, 'foo'), c.get_default_database('foo')) + c = rs_or_single_client( + "mongodb://%s:%d/" % (client_context.host, client_context.port), connect=False + ) + self.assertEqual(Database(c, "foo"), c.get_default_database("foo")) def test_get_default_database_error(self): # URI with no database. - c = rs_or_single_client("mongodb://%s:%d/" % (client_context.host, - client_context.port), - connect=False) + c = rs_or_single_client( + "mongodb://%s:%d/" % (client_context.host, client_context.port), connect=False + ) self.assertRaises(ConfigurationError, c.get_default_database) def test_get_default_database_with_authsource(self): # Ensure we distinguish database name from authSource. - uri = "mongodb://%s:%d/foo?authSource=src" % ( - client_context.host, client_context.port) + uri = "mongodb://%s:%d/foo?authSource=src" % (client_context.host, client_context.port) c = rs_or_single_client(uri, connect=False) - self.assertEqual(Database(c, 'foo'), c.get_default_database()) + self.assertEqual(Database(c, "foo"), c.get_default_database()) def test_get_database_default(self): - c = rs_or_single_client("mongodb://%s:%d/foo" % (client_context.host, - client_context.port), - connect=False) - self.assertEqual(Database(c, 'foo'), c.get_database()) + c = rs_or_single_client( + "mongodb://%s:%d/foo" % (client_context.host, client_context.port), connect=False + ) + self.assertEqual(Database(c, "foo"), c.get_database()) def test_get_database_default_error(self): # URI with no database. - c = rs_or_single_client("mongodb://%s:%d/" % (client_context.host, - client_context.port), - connect=False) + c = rs_or_single_client( + "mongodb://%s:%d/" % (client_context.host, client_context.port), connect=False + ) self.assertRaises(ConfigurationError, c.get_database) def test_get_database_default_with_authsource(self): # Ensure we distinguish database name from authSource. - uri = "mongodb://%s:%d/foo?authSource=src" % ( - client_context.host, client_context.port) + uri = "mongodb://%s:%d/foo?authSource=src" % (client_context.host, client_context.port) c = rs_or_single_client(uri, connect=False) - self.assertEqual(Database(c, 'foo'), c.get_database()) + self.assertEqual(Database(c, "foo"), c.get_database()) def test_primary_read_pref_with_tags(self): # No tags allowed with "primary". with self.assertRaises(ConfigurationError): - MongoClient('mongodb://host/?readpreferencetags=dc:east') + MongoClient("mongodb://host/?readpreferencetags=dc:east") with self.assertRaises(ConfigurationError): - MongoClient('mongodb://host/?' - 'readpreference=primary&readpreferencetags=dc:east') + MongoClient("mongodb://host/?readpreference=primary&readpreferencetags=dc:east") def test_read_preference(self): c = rs_or_single_client( - "mongodb://host", connect=False, - readpreference=ReadPreference.NEAREST.mongos_mode) + "mongodb://host", connect=False, readpreference=ReadPreference.NEAREST.mongos_mode + ) self.assertEqual(c.read_preference, ReadPreference.NEAREST) def test_metadata(self): metadata = copy.deepcopy(_METADATA) - metadata['application'] = {'name': 'foobar'} - client = MongoClient( - "mongodb://foo:27017/?appname=foobar&connect=false") + metadata["application"] = {"name": "foobar"} + client = MongoClient("mongodb://foo:27017/?appname=foobar&connect=false") options = client._MongoClient__options self.assertEqual(options.pool_options.metadata, metadata) - client = MongoClient('foo', 27017, appname='foobar', connect=False) + client = MongoClient("foo", 27017, appname="foobar", connect=False) options = client._MongoClient__options self.assertEqual(options.pool_options.metadata, metadata) # No error - MongoClient(appname='x' * 128) - self.assertRaises(ValueError, MongoClient, appname='x' * 129) + MongoClient(appname="x" * 128) + self.assertRaises(ValueError, MongoClient, appname="x" * 129) # Bad "driver" options. - self.assertRaises(TypeError, DriverInfo, 'Foo', 1, 'a') + self.assertRaises(TypeError, DriverInfo, "Foo", 1, "a") + self.assertRaises(TypeError, DriverInfo, version="1", platform="a") + self.assertRaises(TypeError, DriverInfo) self.assertRaises(TypeError, MongoClient, driver=1) - self.assertRaises(TypeError, MongoClient, driver='abc') - self.assertRaises(TypeError, MongoClient, driver=('Foo', '1', 'a')) + self.assertRaises(TypeError, MongoClient, driver="abc") + self.assertRaises(TypeError, MongoClient, driver=("Foo", "1", "a")) # Test appending to driver info. - metadata['driver']['name'] = 'PyMongo|FooDriver' - metadata['driver']['version'] = '%s|1.2.3' % ( - _METADATA['driver']['version'],) - client = MongoClient('foo', 27017, appname='foobar', - driver=DriverInfo('FooDriver', '1.2.3', None), connect=False) + metadata["driver"]["name"] = "PyMongo|FooDriver" + metadata["driver"]["version"] = "{}|1.2.3".format(_METADATA["driver"]["version"]) + client = MongoClient( + "foo", + 27017, + appname="foobar", + driver=DriverInfo("FooDriver", "1.2.3", None), + connect=False, + ) options = client._MongoClient__options self.assertEqual(options.pool_options.metadata, metadata) - metadata['platform'] = '%s|FooPlatform' % ( - _METADATA['platform'],) - client = MongoClient('foo', 27017, appname='foobar', - driver=DriverInfo('FooDriver', '1.2.3', 'FooPlatform'), connect=False) + metadata["platform"] = "{}|FooPlatform".format(_METADATA["platform"]) + client = MongoClient( + "foo", + 27017, + appname="foobar", + driver=DriverInfo("FooDriver", "1.2.3", "FooPlatform"), + connect=False, + ) options = client._MongoClient__options self.assertEqual(options.pool_options.metadata, metadata) def test_kwargs_codec_options(self): - class MyFloatType(object): + class MyFloatType: def __init__(self, x): self.__x = x + @property def x(self): return self.__x class MyFloatAsIntEncoder(TypeEncoder): python_type = MyFloatType + def transform_python(self, value): return int(value) # Ensure codec options are passed in correctly - document_class = SON + document_class: Type[SON] = SON type_registry = TypeRegistry([MyFloatAsIntEncoder()]) tz_aware = True - uuid_representation_label = 'javaLegacy' - unicode_decode_error_handler = 'ignore' + uuid_representation_label = "javaLegacy" + unicode_decode_error_handler = "ignore" tzinfo = utc c = MongoClient( document_class=document_class, @@ -315,63 +376,76 @@ def transform_python(self, value): uuidrepresentation=uuid_representation_label, unicode_decode_error_handler=unicode_decode_error_handler, tzinfo=tzinfo, - connect=False + connect=False, ) self.assertEqual(c.codec_options.document_class, document_class) self.assertEqual(c.codec_options.type_registry, type_registry) self.assertEqual(c.codec_options.tz_aware, tz_aware) self.assertEqual( - c.codec_options.uuid_representation, - _UUID_REPRESENTATIONS[uuid_representation_label]) - self.assertEqual( - c.codec_options.unicode_decode_error_handler, - unicode_decode_error_handler) + c.codec_options.uuid_representation, _UUID_REPRESENTATIONS[uuid_representation_label] + ) + self.assertEqual(c.codec_options.unicode_decode_error_handler, unicode_decode_error_handler) self.assertEqual(c.codec_options.tzinfo, tzinfo) def test_uri_codec_options(self): # Ensure codec options are passed in correctly - uuid_representation_label = 'javaLegacy' - unicode_decode_error_handler = 'ignore' - uri = ("mongodb://%s:%d/foo?tz_aware=true&uuidrepresentation=" - "%s&unicode_decode_error_handler=%s" % ( - client_context.host, - client_context.port, - uuid_representation_label, - unicode_decode_error_handler)) + uuid_representation_label = "javaLegacy" + unicode_decode_error_handler = "ignore" + datetime_conversion = "DATETIME_CLAMP" + uri = ( + "mongodb://%s:%d/foo?tz_aware=true&uuidrepresentation=" + "%s&unicode_decode_error_handler=%s" + "&datetime_conversion=%s" + % ( + client_context.host, + client_context.port, + uuid_representation_label, + unicode_decode_error_handler, + datetime_conversion, + ) + ) c = MongoClient(uri, connect=False) self.assertEqual(c.codec_options.tz_aware, True) self.assertEqual( - c.codec_options.uuid_representation, - _UUID_REPRESENTATIONS[uuid_representation_label]) + c.codec_options.uuid_representation, _UUID_REPRESENTATIONS[uuid_representation_label] + ) + self.assertEqual(c.codec_options.unicode_decode_error_handler, unicode_decode_error_handler) + self.assertEqual( + c.codec_options.datetime_conversion, DatetimeConversion[datetime_conversion] + ) + + # Change the passed datetime_conversion to a number and re-assert. + uri = uri.replace(datetime_conversion, f"{int(DatetimeConversion[datetime_conversion])}") + c = MongoClient(uri, connect=False) + self.assertEqual( - c.codec_options.unicode_decode_error_handler, - unicode_decode_error_handler) + c.codec_options.datetime_conversion, DatetimeConversion[datetime_conversion] + ) def test_uri_option_precedence(self): # Ensure kwarg options override connection string options. - uri = ("mongodb://localhost/?ssl=true&replicaSet=name" - "&readPreference=primary") - c = MongoClient(uri, ssl=False, replicaSet="newname", - readPreference="secondaryPreferred") + uri = "mongodb://localhost/?ssl=true&replicaSet=name&readPreference=primary" + c = MongoClient(uri, ssl=False, replicaSet="newname", readPreference="secondaryPreferred") clopts = c._MongoClient__options opts = clopts._options - self.assertEqual(opts['ssl'], False) + self.assertEqual(opts["tls"], False) self.assertEqual(clopts.replica_set_name, "newname") - self.assertEqual( - clopts.read_preference, ReadPreference.SECONDARY_PREFERRED) + self.assertEqual(clopts.read_preference, ReadPreference.SECONDARY_PREFERRED) - @unittest.skipUnless( - _HAVE_DNSPYTHON, "DNS-related tests need dnspython to be installed") + @unittest.skipUnless(_HAVE_DNSPYTHON, "DNS-related tests need dnspython to be installed") def test_connection_timeout_ms_propagates_to_DNS_resolver(self): # Patch the resolver. - from pymongo.srv_resolver import resolver - patched_resolver = FunctionCallRecorder(resolver.query) - pymongo.srv_resolver.resolver.query = patched_resolver + from pymongo.srv_resolver import _resolve + + patched_resolver = FunctionCallRecorder(_resolve) + pymongo.srv_resolver._resolve = patched_resolver + def reset_resolver(): - pymongo.srv_resolver.resolver.query = resolver.query + pymongo.srv_resolver._resolve = _resolve + self.addCleanup(reset_resolver) # Setup. @@ -385,7 +459,7 @@ def test_scenario(args, kwargs, expected_value): patched_resolver.reset() MongoClient(*args, **kwargs) for _, kw in patched_resolver.call_list(): - self.assertAlmostEqual(kw['lifetime'], expected_value) + self.assertAlmostEqual(kw["lifetime"], expected_value) # No timeout specified. test_scenario((base_uri,), {}, CONNECT_TIMEOUT) @@ -394,7 +468,7 @@ def test_scenario(args, kwargs, expected_value): test_scenario((uri_with_timeout,), {}, expected_uri_value) # Timeout only specified in keyword arguments. - kwarg = {'connectTimeoutMS': connectTimeoutMS} + kwarg = {"connectTimeoutMS": connectTimeoutMS} test_scenario((base_uri,), kwarg, expected_kw_value) # Timeout specified in both kwargs and connection string. @@ -403,146 +477,187 @@ def test_scenario(args, kwargs, expected_value): def test_uri_security_options(self): # Ensure that we don't silently override security-related options. with self.assertRaises(InvalidURI): - MongoClient('mongodb://localhost/?ssl=true', tls=False, - connect=False) + MongoClient("mongodb://localhost/?ssl=true", tls=False, connect=False) # Matching SSL and TLS options should not cause errors. - c = MongoClient('mongodb://localhost/?ssl=false', tls=False, - connect=False) - self.assertEqual(c._MongoClient__options._options['ssl'], False) + c = MongoClient("mongodb://localhost/?ssl=false", tls=False, connect=False) + self.assertEqual(c._MongoClient__options._options["tls"], False) # Conflicting tlsInsecure options should raise an error. with self.assertRaises(InvalidURI): - MongoClient('mongodb://localhost/?tlsInsecure=true', - connect=False, tlsAllowInvalidHostnames=True) + MongoClient( + "mongodb://localhost/?tlsInsecure=true", + connect=False, + tlsAllowInvalidHostnames=True, + ) # Conflicting legacy tlsInsecure options should also raise an error. with self.assertRaises(InvalidURI): - MongoClient('mongodb://localhost/?tlsInsecure=true', - connect=False, ssl_cert_reqs=True) + MongoClient( + "mongodb://localhost/?tlsInsecure=true", + connect=False, + tlsAllowInvalidCertificates=False, + ) + + # Conflicting kwargs should raise InvalidURI + with self.assertRaises(InvalidURI): + MongoClient(ssl=True, tls=False) + + def test_event_listeners(self): + c = MongoClient(event_listeners=[], connect=False) + self.assertEqual(c.options.event_listeners, []) + listeners = [ + event_loggers.CommandLogger(), + event_loggers.HeartbeatLogger(), + event_loggers.ServerLogger(), + event_loggers.TopologyLogger(), + event_loggers.ConnectionPoolLogger(), + ] + c = MongoClient(event_listeners=listeners, connect=False) + self.assertEqual(c.options.event_listeners, listeners) + + def test_client_options(self): + c = MongoClient(connect=False) + self.assertIsInstance(c.options, ClientOptions) + self.assertIsInstance(c.options.pool_options, PoolOptions) + self.assertEqual(c.options.server_selection_timeout, 30) + self.assertEqual(c.options.pool_options.max_idle_time_seconds, None) + self.assertIsInstance(c.options.retry_writes, bool) + self.assertIsInstance(c.options.retry_reads, bool) class TestClient(IntegrationTest): + def test_multiple_uris(self): + with self.assertRaises(ConfigurationError): + MongoClient( + host=[ + "mongodb+srv://cluster-a.abc12.mongodb.net", + "mongodb+srv://cluster-b.abc12.mongodb.net", + "mongodb+srv://cluster-c.abc12.mongodb.net", + ] + ) - def test_max_idle_time_reaper(self): + def test_max_idle_time_reaper_default(self): with client_knobs(kill_cursor_frequency=0.1): - # Assert reaper doesn't remove sockets when maxIdleTimeMS not set + # Assert reaper doesn't remove connections when maxIdleTimeMS not set client = rs_or_single_client() - server = client._get_topology().select_server(any_server_selector) - with server._pool.get_socket({}) as sock_info: + server = client._get_topology().select_server(readable_server_selector) + with server._pool.checkout() as conn: pass - self.assertEqual(1, len(server._pool.sockets)) - self.assertTrue(sock_info in server._pool.sockets) + self.assertEqual(1, len(server._pool.conns)) + self.assertTrue(conn in server._pool.conns) client.close() + def test_max_idle_time_reaper_removes_stale_minPoolSize(self): + with client_knobs(kill_cursor_frequency=0.1): # Assert reaper removes idle socket and replaces it with a new one - client = rs_or_single_client(maxIdleTimeMS=500, - minPoolSize=1) - server = client._get_topology().select_server(any_server_selector) - with server._pool.get_socket({}) as sock_info: + client = rs_or_single_client(maxIdleTimeMS=500, minPoolSize=1) + server = client._get_topology().select_server(readable_server_selector) + with server._pool.checkout() as conn: pass # When the reaper runs at the same time as the get_socket, two - # sockets could be created and checked into the pool. - self.assertGreaterEqual(len(server._pool.sockets), 1) - wait_until(lambda: sock_info not in server._pool.sockets, - "remove stale socket") - wait_until(lambda: 1 <= len(server._pool.sockets), - "replace stale socket") + # connections could be created and checked into the pool. + self.assertGreaterEqual(len(server._pool.conns), 1) + wait_until(lambda: conn not in server._pool.conns, "remove stale socket") + wait_until(lambda: len(server._pool.conns) >= 1, "replace stale socket") client.close() - # Assert reaper respects maxPoolSize when adding new sockets. - client = rs_or_single_client(maxIdleTimeMS=500, - minPoolSize=1, - maxPoolSize=1) - server = client._get_topology().select_server(any_server_selector) - with server._pool.get_socket({}) as sock_info: + def test_max_idle_time_reaper_does_not_exceed_maxPoolSize(self): + with client_knobs(kill_cursor_frequency=0.1): + # Assert reaper respects maxPoolSize when adding new connections. + client = rs_or_single_client(maxIdleTimeMS=500, minPoolSize=1, maxPoolSize=1) + server = client._get_topology().select_server(readable_server_selector) + with server._pool.checkout() as conn: pass # When the reaper runs at the same time as the get_socket, - # maxPoolSize=1 should prevent two sockets from being created. - self.assertEqual(1, len(server._pool.sockets)) - wait_until(lambda: sock_info not in server._pool.sockets, - "remove stale socket") - wait_until(lambda: 1 == len(server._pool.sockets), - "replace stale socket") + # maxPoolSize=1 should prevent two connections from being created. + self.assertEqual(1, len(server._pool.conns)) + wait_until(lambda: conn not in server._pool.conns, "remove stale socket") + wait_until(lambda: len(server._pool.conns) == 1, "replace stale socket") client.close() + def test_max_idle_time_reaper_removes_stale(self): + with client_knobs(kill_cursor_frequency=0.1): # Assert reaper has removed idle socket and NOT replaced it client = rs_or_single_client(maxIdleTimeMS=500) - server = client._get_topology().select_server(any_server_selector) - with server._pool.get_socket({}) as sock_info_one: + server = client._get_topology().select_server(readable_server_selector) + with server._pool.checkout() as conn_one: pass - # Assert that the pool does not close sockets prematurely. - time.sleep(.300) - with server._pool.get_socket({}) as sock_info_two: + # Assert that the pool does not close connections prematurely. + time.sleep(0.300) + with server._pool.checkout() as conn_two: pass - self.assertIs(sock_info_one, sock_info_two) + self.assertIs(conn_one, conn_two) wait_until( - lambda: 0 == len(server._pool.sockets), - "stale socket reaped and new one NOT added to the pool") + lambda: len(server._pool.conns) == 0, + "stale socket reaped and new one NOT added to the pool", + ) client.close() def test_min_pool_size(self): - with client_knobs(kill_cursor_frequency=.1): + with client_knobs(kill_cursor_frequency=0.1): client = rs_or_single_client() - server = client._get_topology().select_server(any_server_selector) - self.assertEqual(0, len(server._pool.sockets)) + server = client._get_topology().select_server(readable_server_selector) + self.assertEqual(0, len(server._pool.conns)) # Assert that pool started up at minPoolSize client = rs_or_single_client(minPoolSize=10) - server = client._get_topology().select_server(any_server_selector) - wait_until(lambda: 10 == len(server._pool.sockets), - "pool initialized with 10 sockets") + server = client._get_topology().select_server(readable_server_selector) + wait_until( + lambda: len(server._pool.conns) == 10, "pool initialized with 10 connections" + ) # Assert that if a socket is closed, a new one takes its place - with server._pool.get_socket({}) as sock_info: - sock_info.close_socket(None) - wait_until(lambda: 10 == len(server._pool.sockets), - "a closed socket gets replaced from the pool") - self.assertFalse(sock_info in server._pool.sockets) + with server._pool.checkout() as conn: + conn.close_conn(None) + wait_until( + lambda: len(server._pool.conns) == 10, + "a closed socket gets replaced from the pool", + ) + self.assertFalse(conn in server._pool.conns) def test_max_idle_time_checkout(self): # Use high frequency to test _get_socket_no_auth. with client_knobs(kill_cursor_frequency=99999999): client = rs_or_single_client(maxIdleTimeMS=500) - server = client._get_topology().select_server(any_server_selector) - with server._pool.get_socket({}) as sock_info: + server = client._get_topology().select_server(readable_server_selector) + with server._pool.checkout() as conn: pass - self.assertEqual(1, len(server._pool.sockets)) - time.sleep(1) # Sleep so that the socket becomes stale. + self.assertEqual(1, len(server._pool.conns)) + time.sleep(1) # Sleep so that the socket becomes stale. - with server._pool.get_socket({}) as new_sock_info: - self.assertNotEqual(sock_info, new_sock_info) - self.assertEqual(1, len(server._pool.sockets)) - self.assertFalse(sock_info in server._pool.sockets) - self.assertTrue(new_sock_info in server._pool.sockets) + with server._pool.checkout() as new_con: + self.assertNotEqual(conn, new_con) + self.assertEqual(1, len(server._pool.conns)) + self.assertFalse(conn in server._pool.conns) + self.assertTrue(new_con in server._pool.conns) - # Test that sockets are reused if maxIdleTimeMS is not set. + # Test that connections are reused if maxIdleTimeMS is not set. client = rs_or_single_client() - server = client._get_topology().select_server(any_server_selector) - with server._pool.get_socket({}) as sock_info: + server = client._get_topology().select_server(readable_server_selector) + with server._pool.checkout() as conn: pass - self.assertEqual(1, len(server._pool.sockets)) + self.assertEqual(1, len(server._pool.conns)) time.sleep(1) - with server._pool.get_socket({}) as new_sock_info: - self.assertEqual(sock_info, new_sock_info) - self.assertEqual(1, len(server._pool.sockets)) + with server._pool.checkout() as new_con: + self.assertEqual(conn, new_con) + self.assertEqual(1, len(server._pool.conns)) def test_constants(self): """This test uses MongoClient explicitly to make sure that host and port are not overloaded. """ host, port = client_context.host, client_context.port - kwargs = client_context.default_client_options.copy() + kwargs: dict = client_context.default_client_options.copy() if client_context.auth_enabled: - kwargs['username'] = db_user - kwargs['password'] = db_pwd + kwargs["username"] = db_user + kwargs["password"] = db_pwd # Set bad defaults. MongoClient.HOST = "somedomainthatdoesntexist.org" MongoClient.PORT = 123456789 with self.assertRaises(AutoReconnect): - connected(MongoClient(serverSelectionTimeoutMS=10, - **kwargs)) + connected(MongoClient(serverSelectionTimeoutMS=10, **kwargs)) # Override the defaults. No error. connected(MongoClient(host, port, **kwargs)) @@ -563,18 +678,19 @@ def test_init_disconnected(self): c = rs_or_single_client(connect=False) self.assertIsInstance(c.is_mongos, bool) c = rs_or_single_client(connect=False) - self.assertIsInstance(c.max_pool_size, int) + self.assertIsInstance(c.options.pool_options.max_pool_size, int) self.assertIsInstance(c.nodes, frozenset) c = rs_or_single_client(connect=False) self.assertEqual(c.codec_options, CodecOptions()) - self.assertIsInstance(c.max_bson_size, int) c = rs_or_single_client(connect=False) self.assertFalse(c.primary) self.assertFalse(c.secondaries) c = rs_or_single_client(connect=False) - self.assertIsInstance(c.max_write_batch_size, int) - + self.assertIsInstance(c.topology_description, TopologyDescription) + self.assertEqual(c.topology_description, c._topology._description) + self.assertIsNone(c.address) # PYTHON-2981 + c.admin.command("ping") # connect if client_context.is_rs: # The primary's host and port are from the replica set config. self.assertIsNotNone(c.address) @@ -582,45 +698,66 @@ def test_init_disconnected(self): self.assertEqual(c.address, (host, port)) bad_host = "somedomainthatdoesntexist.org" - c = MongoClient(bad_host, port, connectTimeoutMS=1, - serverSelectionTimeoutMS=10) + c = MongoClient(bad_host, port, connectTimeoutMS=1, serverSelectionTimeoutMS=10) self.assertRaises(ConnectionFailure, c.pymongo_test.test.find_one) def test_init_disconnected_with_auth(self): uri = "mongodb://user:pass@somedomainthatdoesntexist" - c = MongoClient(uri, connectTimeoutMS=1, - serverSelectionTimeoutMS=10) + c = MongoClient(uri, connectTimeoutMS=1, serverSelectionTimeoutMS=10) self.assertRaises(ConnectionFailure, c.pymongo_test.test.find_one) def test_equality(self): - c = connected(rs_or_single_client()) + seed = "{}:{}".format(*list(self.client._topology_settings.seeds)[0]) + c = rs_or_single_client(seed, connect=False) + self.addCleanup(c.close) self.assertEqual(client_context.client, c) - # Explicitly test inequality self.assertFalse(client_context.client != c) + c = rs_or_single_client("invalid.com", connect=False) + self.addCleanup(c.close) + self.assertNotEqual(client_context.client, c) + self.assertTrue(client_context.client != c) + # Seeds differ: + self.assertNotEqual(MongoClient("a", connect=False), MongoClient("b", connect=False)) + # Same seeds but out of order still compares equal: + self.assertEqual( + MongoClient(["a", "b", "c"], connect=False), MongoClient(["c", "a", "b"], connect=False) + ) + + def test_hashable(self): + seed = "{}:{}".format(*list(self.client._topology_settings.seeds)[0]) + c = rs_or_single_client(seed, connect=False) + self.addCleanup(c.close) + self.assertIn(c, {client_context.client}) + c = rs_or_single_client("invalid.com", connect=False) + self.addCleanup(c.close) + self.assertNotIn(c, {client_context.client}) + def test_host_w_port(self): with self.assertRaises(ValueError): - connected(MongoClient("%s:1234567" % (client_context.host,), - connectTimeoutMS=1, - serverSelectionTimeoutMS=10)) + connected( + MongoClient( + f"{client_context.host}:1234567", + connectTimeoutMS=1, + serverSelectionTimeoutMS=10, + ) + ) def test_repr(self): # Used to test 'eval' below. import bson - client = MongoClient( - 'mongodb://localhost:27017,localhost:27018/?replicaSet=replset' - '&connectTimeoutMS=12345&w=1&wtimeoutms=100', - connect=False, document_class=SON) + client = MongoClient( # type: ignore[type-var] + "mongodb://localhost:27017,localhost:27018/?replicaSet=replset" + "&connectTimeoutMS=12345&w=1&wtimeoutms=100", + connect=False, + document_class=SON, + ) the_repr = repr(client) - self.assertIn('MongoClient(host=', the_repr) - self.assertIn( - "document_class=bson.son.SON, " - "tz_aware=False, " - "connect=False, ", - the_repr) + self.assertIn("MongoClient(host=", the_repr) + self.assertIn("document_class=bson.son.SON, tz_aware=False, connect=False, ", the_repr) self.assertIn("connecttimeoutms=12345", the_repr) self.assertIn("replicaset='replset'", the_repr) self.assertIn("w=1", the_repr) @@ -628,20 +765,18 @@ def test_repr(self): self.assertEqual(eval(the_repr), client) - client = MongoClient("localhost:27017,localhost:27018", - replicaSet='replset', - connectTimeoutMS=12345, - socketTimeoutMS=None, - w=1, - wtimeoutms=100, - connect=False) + client = MongoClient( + "localhost:27017,localhost:27018", + replicaSet="replset", + connectTimeoutMS=12345, + socketTimeoutMS=None, + w=1, + wtimeoutms=100, + connect=False, + ) the_repr = repr(client) - self.assertIn('MongoClient(host=', the_repr) - self.assertIn( - "document_class=dict, " - "tz_aware=False, " - "connect=False, ", - the_repr) + self.assertIn("MongoClient(host=", the_repr) + self.assertIn("document_class=dict, tz_aware=False, connect=False, ", the_repr) self.assertIn("connecttimeoutms=12345", the_repr) self.assertIn("replicaset='replset'", the_repr) self.assertIn("sockettimeoutms=None", the_repr) @@ -651,98 +786,83 @@ def test_repr(self): self.assertEqual(eval(the_repr), client) def test_getters(self): - wait_until(lambda: client_context.nodes == self.client.nodes, - "find all nodes") + wait_until(lambda: client_context.nodes == self.client.nodes, "find all nodes") def test_list_databases(self): - cmd_docs = self.client.admin.command('listDatabases')['databases'] + cmd_docs = self.client.admin.command("listDatabases")["databases"] cursor = self.client.list_databases() self.assertIsInstance(cursor, CommandCursor) helper_docs = list(cursor) self.assertTrue(len(helper_docs) > 0) - self.assertEqual(helper_docs, cmd_docs) - for doc in helper_docs: - self.assertIs(type(doc), dict) + self.assertEqual(len(helper_docs), len(cmd_docs)) + # PYTHON-3529 Some fields may change between calls, just compare names. + for helper_doc, cmd_doc in zip(helper_docs, cmd_docs): + self.assertIs(type(helper_doc), dict) + self.assertEqual(helper_doc.keys(), cmd_doc.keys()) client = rs_or_single_client(document_class=SON) + self.addCleanup(client.close) for doc in client.list_databases(): self.assertIs(type(doc), dict) - if client_context.version.at_least(3, 4, 2): - self.client.pymongo_test.test.insert_one({}) - cursor = self.client.list_databases(filter={"name": "admin"}) - docs = list(cursor) - self.assertEqual(1, len(docs)) - self.assertEqual(docs[0]["name"], "admin") - - if client_context.version.at_least(3, 4, 3): - cursor = self.client.list_databases(nameOnly=True) - for doc in cursor: - self.assertEqual(["name"], list(doc)) - - def _test_list_names(self, meth): - self.client.pymongo_test.test.insert_one({"dummy": u"object"}) - self.client.pymongo_test_mike.test.insert_one({"dummy": u"object"}) + self.client.pymongo_test.test.insert_one({}) + cursor = self.client.list_databases(filter={"name": "admin"}) + docs = list(cursor) + self.assertEqual(1, len(docs)) + self.assertEqual(docs[0]["name"], "admin") + + cursor = self.client.list_databases(nameOnly=True) + for doc in cursor: + self.assertEqual(["name"], list(doc)) + + def test_list_database_names(self): + self.client.pymongo_test.test.insert_one({"dummy": "object"}) + self.client.pymongo_test_mike.test.insert_one({"dummy": "object"}) cmd_docs = self.client.admin.command("listDatabases")["databases"] cmd_names = [doc["name"] for doc in cmd_docs] - db_names = meth() + db_names = self.client.list_database_names() self.assertTrue("pymongo_test" in db_names) self.assertTrue("pymongo_test_mike" in db_names) self.assertEqual(db_names, cmd_names) - def test_list_database_names(self): - self._test_list_names(self.client.list_database_names) - - def test_database_names(self): - self._test_list_names(self.client.database_names) - def test_drop_database(self): self.assertRaises(TypeError, self.client.drop_database, 5) self.assertRaises(TypeError, self.client.drop_database, None) - self.client.pymongo_test.test.insert_one({"dummy": u"object"}) - self.client.pymongo_test2.test.insert_one({"dummy": u"object"}) + self.client.pymongo_test.test.insert_one({"dummy": "object"}) + self.client.pymongo_test2.test.insert_one({"dummy": "object"}) dbs = self.client.list_database_names() self.assertIn("pymongo_test", dbs) self.assertIn("pymongo_test2", dbs) self.client.drop_database("pymongo_test") - if client_context.version.at_least(3, 3, 9) and client_context.is_rs: + if client_context.is_rs: wc_client = rs_or_single_client(w=len(client_context.nodes) + 1) with self.assertRaises(WriteConcernError): - wc_client.drop_database('pymongo_test2') + wc_client.drop_database("pymongo_test2") self.client.drop_database(self.client.pymongo_test2) - - raise SkipTest("This test often fails due to SERVER-2329") - dbs = self.client.list_database_names() self.assertNotIn("pymongo_test", dbs) self.assertNotIn("pymongo_test2", dbs) def test_close(self): - coll = self.client.pymongo_test.bar - - self.client.close() - self.client.close() - - coll.count_documents({}) - - self.client.close() - self.client.close() - - coll.count_documents({}) + test_client = rs_or_single_client() + coll = test_client.pymongo_test.bar + test_client.close() + self.assertRaises(InvalidOperation, coll.count_documents, {}) def test_close_kills_cursors(self): - if sys.platform.startswith('java'): + if sys.platform.startswith("java"): # We can't figure out how to make this test reliable with Jython. raise SkipTest("Can't test with Jython") + test_client = rs_or_single_client() # Kill any cursors possibly queued up by previous tests. gc.collect() - self.client._process_periodic_tasks() + test_client._process_periodic_tasks() # Add some test data. - coll = self.client.pymongo_test.test_close_kills_cursors + coll = test_client.pymongo_test.test_close_kills_cursors docs_inserted = 1000 coll.insert_many([{"i": i} for i in range(docs_inserted)]) @@ -760,13 +880,13 @@ def test_close_kills_cursors(self): gc.collect() # Close the client and ensure the topology is closed. - self.assertTrue(self.client._topology._opened) - self.client.close() - self.assertFalse(self.client._topology._opened) - + self.assertTrue(test_client._topology._opened) + test_client.close() + self.assertFalse(test_client._topology._opened) + test_client = rs_or_single_client() # The killCursors task should not need to re-open the topology. - self.client._process_periodic_tasks() - self.assertFalse(self.client._topology._opened) + test_client._process_periodic_tasks() + self.assertTrue(test_client._topology._opened) def test_close_stops_kill_cursors_thread(self): client = rs_client() @@ -777,13 +897,28 @@ def test_close_stops_kill_cursors_thread(self): client.close() self.assertTrue(client._kill_cursors_executor._stopped) - # Reusing the closed client should restart the thread. - client.admin.command('isMaster') - self.assertFalse(client._kill_cursors_executor._stopped) + # Reusing the closed client should raise an InvalidOperation error. + self.assertRaises(InvalidOperation, client.admin.command, "ping") + # Thread is still stopped. + self.assertTrue(client._kill_cursors_executor._stopped) - # Again, closing the client should stop the thread. + def test_uri_connect_option(self): + # Ensure that topology is not opened if connect=False. + client = rs_client(connect=False) + self.assertFalse(client._topology._opened) + + # Ensure kill cursors thread has not been started. + kc_thread = client._kill_cursors_executor._thread + self.assertFalse(kc_thread and kc_thread.is_alive()) + + # Using the client should open topology and start the thread. + client.admin.command("ping") + self.assertTrue(client._topology._opened) + kc_thread = client._kill_cursors_executor._thread + self.assertTrue(kc_thread and kc_thread.is_alive()) + + # Tear down. client.close() - self.assertTrue(client._kill_cursors_executor._stopped) def test_close_does_not_open_servers(self): client = rs_client(connect=False) @@ -792,6 +927,19 @@ def test_close_does_not_open_servers(self): client.close() self.assertEqual(topology._servers, {}) + def test_close_closes_sockets(self): + client = rs_client() + self.addCleanup(client.close) + client.test.test.find_one() + topology = client._topology + client.close() + for server in topology._servers.values(): + self.assertFalse(server._pool.conns) + self.assertTrue(server._monitor._executor._stopped) + self.assertTrue(server._monitor._rtt_monitor._executor._stopped) + self.assertFalse(server._monitor._pool.conns) + self.assertFalse(server._monitor._rtt_monitor._pool.conns) + def test_bad_uri(self): with self.assertRaises(InvalidURI): MongoClient("http://localhost") @@ -803,45 +951,42 @@ def test_auth_from_uri(self): self.addCleanup(client_context.drop_user, "admin", "admin") self.addCleanup(remove_all_users, self.client.pymongo_test) - client_context.create_user( - "pymongo_test", "user", "pass", roles=['userAdmin', 'readWrite']) + client_context.create_user("pymongo_test", "user", "pass", roles=["userAdmin", "readWrite"]) with self.assertRaises(OperationFailure): - connected(rs_or_single_client( - "mongodb://a:b@%s:%d" % (host, port))) + connected(rs_or_single_client_noauth("mongodb://a:b@%s:%d" % (host, port))) # No error. - connected(rs_or_single_client_noauth( - "mongodb://admin:pass@%s:%d" % (host, port))) + connected(rs_or_single_client_noauth("mongodb://admin:pass@%s:%d" % (host, port))) # Wrong database. uri = "mongodb://admin:pass@%s:%d/pymongo_test" % (host, port) with self.assertRaises(OperationFailure): - connected(rs_or_single_client(uri)) + connected(rs_or_single_client_noauth(uri)) # No error. - connected(rs_or_single_client_noauth( - "mongodb://user:pass@%s:%d/pymongo_test" % (host, port))) + connected( + rs_or_single_client_noauth("mongodb://user:pass@%s:%d/pymongo_test" % (host, port)) + ) # Auth with lazy connection. rs_or_single_client_noauth( - "mongodb://user:pass@%s:%d/pymongo_test" % (host, port), - connect=False).pymongo_test.test.find_one() + "mongodb://user:pass@%s:%d/pymongo_test" % (host, port), connect=False + ).pymongo_test.test.find_one() # Wrong password. bad_client = rs_or_single_client_noauth( - "mongodb://user:wrong@%s:%d/pymongo_test" % (host, port), - connect=False) + "mongodb://user:wrong@%s:%d/pymongo_test" % (host, port), connect=False + ) - self.assertRaises(OperationFailure, - bad_client.pymongo_test.test.find_one) + self.assertRaises(OperationFailure, bad_client.pymongo_test.test.find_one) @client_context.require_auth def test_username_and_password(self): client_context.create_user("admin", "ad min", "pa/ss") self.addCleanup(client_context.drop_user, "admin", "ad min") - c = rs_or_single_client(username="ad min", password="pa/ss") + c = rs_or_single_client_noauth(username="ad min", password="pa/ss") # Username and password aren't in strings that will likely be logged. self.assertNotIn("ad min", repr(c)) @@ -853,66 +998,30 @@ def test_username_and_password(self): c.server_info() with self.assertRaises(OperationFailure): - rs_or_single_client(username="ad min", password="foo").server_info() - - @client_context.require_auth - @ignore_deprecations - def test_multiple_logins(self): - client_context.create_user( - 'pymongo_test', 'user1', 'pass', roles=['readWrite']) - client_context.create_user( - 'pymongo_test', 'user2', 'pass', roles=['readWrite']) - self.addCleanup(remove_all_users, self.client.pymongo_test) - - client = rs_or_single_client_noauth( - "mongodb://user1:pass@%s:%d/pymongo_test" % ( - client_context.host, client_context.port)) - - client.pymongo_test.test.find_one() - with self.assertRaises(OperationFailure): - # Can't log in to the same database with multiple users. - client.pymongo_test.authenticate('user2', 'pass') - - client.pymongo_test.test.find_one() - client.pymongo_test.logout() - with self.assertRaises(OperationFailure): - client.pymongo_test.test.find_one() - - client.pymongo_test.authenticate('user2', 'pass') - client.pymongo_test.test.find_one() - - with self.assertRaises(OperationFailure): - client.pymongo_test.authenticate('user1', 'pass') - - client.pymongo_test.test.find_one() + rs_or_single_client_noauth(username="ad min", password="foo").server_info() @client_context.require_auth def test_lazy_auth_raises_operation_failure(self): lazy_client = rs_or_single_client_noauth( - "mongodb://user:wrong@%s/pymongo_test" % (client_context.host,), - connect=False) + f"mongodb://user:wrong@{client_context.host}/pymongo_test", connect=False + ) - assertRaisesExactly( - OperationFailure, lazy_client.test.collection.find_one) + assertRaisesExactly(OperationFailure, lazy_client.test.collection.find_one) - @client_context.require_no_ssl + @client_context.require_no_tls def test_unix_socket(self): if not hasattr(socket, "AF_UNIX"): raise SkipTest("UNIX-sockets are not supported on this system") - mongodb_socket = '/tmp/mongodb-%d.sock' % (client_context.port,) - encoded_socket = ( - '%2Ftmp%2F' + 'mongodb-%d.sock' % (client_context.port,)) + mongodb_socket = "/tmp/mongodb-%d.sock" % (client_context.port,) + encoded_socket = "%2Ftmp%2F" + "mongodb-%d.sock" % (client_context.port,) if not os.access(mongodb_socket, os.R_OK): raise SkipTest("Socket file is not accessible") - if client_context.auth_enabled: - uri = "mongodb://%s:%s@%s" % (db_user, db_pwd, encoded_socket) - else: - uri = "mongodb://%s" % encoded_socket - + uri = "mongodb://%s" % encoded_socket # Confirm we can do operations via the socket. client = rs_or_single_client(uri) + self.addCleanup(client.close) client.pymongo_test.test.insert_one({"dummy": "object"}) dbs = client.list_database_names() self.assertTrue("pymongo_test" in dbs) @@ -922,8 +1031,9 @@ def test_unix_socket(self): # Confirm it fails with a missing socket. self.assertRaises( ConnectionFailure, - connected, MongoClient("mongodb://%2Ftmp%2Fnon-existent.sock", - serverSelectionTimeoutMS=100)) + connected, + MongoClient("mongodb://%2Ftmp%2Fnon-existent.sock", serverSelectionTimeoutMS=100), + ) def test_document_class(self): c = self.client @@ -935,6 +1045,7 @@ def test_document_class(self): self.assertFalse(isinstance(db.test.find_one(), SON)) c = rs_or_single_client(document_class=SON) + self.addCleanup(c.close) db = c.pymongo_test self.assertEqual(SON, c.codec_options.document_class) @@ -945,12 +1056,13 @@ def test_timeouts(self): connectTimeoutMS=10500, socketTimeoutMS=10500, maxIdleTimeMS=10500, - serverSelectionTimeoutMS=10500) + serverSelectionTimeoutMS=10500, + ) self.assertEqual(10.5, get_pool(client).opts.connect_timeout) self.assertEqual(10.5, get_pool(client).opts.socket_timeout) self.assertEqual(10.5, get_pool(client).opts.max_idle_time_seconds) - self.assertEqual(10500, client.max_idle_time_ms) - self.assertEqual(10.5, client.server_selection_timeout) + self.assertEqual(10.5, client.options.pool_options.max_idle_time_seconds) + self.assertEqual(10.5, client.options.server_selection_timeout) def test_socket_timeout_ms_validation(self): c = rs_or_single_client(socketTimeoutMS=10 * 1000) @@ -959,22 +1071,20 @@ def test_socket_timeout_ms_validation(self): c = connected(rs_or_single_client(socketTimeoutMS=None)) self.assertEqual(None, get_pool(c).opts.socket_timeout) - self.assertRaises(ValueError, - rs_or_single_client, socketTimeoutMS=0) + c = connected(rs_or_single_client(socketTimeoutMS=0)) + self.assertEqual(None, get_pool(c).opts.socket_timeout) - self.assertRaises(ValueError, - rs_or_single_client, socketTimeoutMS=-1) + self.assertRaises(ValueError, rs_or_single_client, socketTimeoutMS=-1) - self.assertRaises(ValueError, - rs_or_single_client, socketTimeoutMS=1e10) + self.assertRaises(ValueError, rs_or_single_client, socketTimeoutMS=1e10) - self.assertRaises(ValueError, - rs_or_single_client, socketTimeoutMS='foo') + self.assertRaises(ValueError, rs_or_single_client, socketTimeoutMS="foo") def test_socket_timeout(self): no_timeout = self.client timeout_sec = 1 timeout = rs_or_single_client(socketTimeoutMS=1000 * timeout_sec) + self.addCleanup(timeout.close) no_timeout.pymongo_test.drop_collection("test") no_timeout.pymongo_test.test.insert_one({"x": 1}) @@ -985,135 +1095,89 @@ def test_socket_timeout(self): def get_x(db): doc = next(db.test.find().where(where_func)) return doc["x"] + self.assertEqual(1, get_x(no_timeout.pymongo_test)) self.assertRaises(NetworkTimeout, get_x, timeout.pymongo_test) def test_server_selection_timeout(self): client = MongoClient(serverSelectionTimeoutMS=100, connect=False) - self.assertAlmostEqual(0.1, client.server_selection_timeout) + self.assertAlmostEqual(0.1, client.options.server_selection_timeout) client = MongoClient(serverSelectionTimeoutMS=0, connect=False) - self.assertAlmostEqual(0, client.server_selection_timeout) + self.assertAlmostEqual(0, client.options.server_selection_timeout) - self.assertRaises(ValueError, MongoClient, - serverSelectionTimeoutMS="foo", connect=False) - self.assertRaises(ValueError, MongoClient, - serverSelectionTimeoutMS=-1, connect=False) - self.assertRaises(ConfigurationError, MongoClient, - serverSelectionTimeoutMS=None, connect=False) + self.assertRaises(ValueError, MongoClient, serverSelectionTimeoutMS="foo", connect=False) + self.assertRaises(ValueError, MongoClient, serverSelectionTimeoutMS=-1, connect=False) + self.assertRaises( + ConfigurationError, MongoClient, serverSelectionTimeoutMS=None, connect=False + ) - client = MongoClient( - 'mongodb://localhost/?serverSelectionTimeoutMS=100', connect=False) - self.assertAlmostEqual(0.1, client.server_selection_timeout) + client = MongoClient("mongodb://localhost/?serverSelectionTimeoutMS=100", connect=False) + self.assertAlmostEqual(0.1, client.options.server_selection_timeout) - client = MongoClient( - 'mongodb://localhost/?serverSelectionTimeoutMS=0', connect=False) - self.assertAlmostEqual(0, client.server_selection_timeout) + client = MongoClient("mongodb://localhost/?serverSelectionTimeoutMS=0", connect=False) + self.assertAlmostEqual(0, client.options.server_selection_timeout) # Test invalid timeout in URI ignored and set to default. - client = MongoClient( - 'mongodb://localhost/?serverSelectionTimeoutMS=-1', connect=False) - self.assertAlmostEqual(30, client.server_selection_timeout) + client = MongoClient("mongodb://localhost/?serverSelectionTimeoutMS=-1", connect=False) + self.assertAlmostEqual(30, client.options.server_selection_timeout) - client = MongoClient( - 'mongodb://localhost/?serverSelectionTimeoutMS=', connect=False) - self.assertAlmostEqual(30, client.server_selection_timeout) + client = MongoClient("mongodb://localhost/?serverSelectionTimeoutMS=", connect=False) + self.assertAlmostEqual(30, client.options.server_selection_timeout) def test_waitQueueTimeoutMS(self): client = rs_or_single_client(waitQueueTimeoutMS=2000) self.assertEqual(get_pool(client).opts.wait_queue_timeout, 2) - def test_waitQueueMultiple(self): - client = rs_or_single_client(maxPoolSize=3, waitQueueMultiple=2) - pool = get_pool(client) - self.assertEqual(pool.opts.wait_queue_multiple, 2) - self.assertEqual(pool._socket_semaphore.waiter_semaphore.counter, 6) - def test_socketKeepAlive(self): - for socketKeepAlive in [True, False]: - with warnings.catch_warnings(record=True) as ctx: - warnings.simplefilter("always") - client = rs_or_single_client(socketKeepAlive=socketKeepAlive) - self.assertTrue(any("The socketKeepAlive option is deprecated" - in str(k) for k in ctx)) - pool = get_pool(client) - self.assertEqual(socketKeepAlive, - pool.opts.socket_keepalive) - with pool.get_socket({}) as sock_info: - keepalive = sock_info.sock.getsockopt(socket.SOL_SOCKET, - socket.SO_KEEPALIVE) - self.assertEqual(socketKeepAlive, bool(keepalive)) + pool = get_pool(self.client) + with pool.checkout() as conn: + keepalive = conn.conn.getsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE) + self.assertTrue(keepalive) + @no_type_check def test_tz_aware(self): - self.assertRaises(ValueError, MongoClient, tz_aware='foo') + self.assertRaises(ValueError, MongoClient, tz_aware="foo") aware = rs_or_single_client(tz_aware=True) + self.addCleanup(aware.close) naive = self.client aware.pymongo_test.drop_collection("test") - now = datetime.datetime.utcnow() + now = datetime.datetime.now(tz=datetime.timezone.utc) aware.pymongo_test.test.insert_one({"x": now}) self.assertEqual(None, naive.pymongo_test.test.find_one()["x"].tzinfo) self.assertEqual(utc, aware.pymongo_test.test.find_one()["x"].tzinfo) self.assertEqual( aware.pymongo_test.test.find_one()["x"].replace(tzinfo=None), - naive.pymongo_test.test.find_one()["x"]) + naive.pymongo_test.test.find_one()["x"], + ) @client_context.require_ipv6 def test_ipv6(self): - if client_context.ssl: + if client_context.tls: if not HAVE_IPADDRESS: raise SkipTest("Need the ipaddress module to test with SSL") if client_context.auth_enabled: - auth_str = "%s:%s@" % (db_user, db_pwd) + auth_str = f"{db_user}:{db_pwd}@" else: auth_str = "" uri = "mongodb://%s[::1]:%d" % (auth_str, client_context.port) if client_context.is_rs: - uri += '/?replicaSet=' + client_context.replica_set_name + uri += "/?replicaSet=" + (client_context.replica_set_name or "") client = rs_or_single_client_noauth(uri) - client.pymongo_test.test.insert_one({"dummy": u"object"}) - client.pymongo_test_bernie.test.insert_one({"dummy": u"object"}) + self.addCleanup(client.close) + client.pymongo_test.test.insert_one({"dummy": "object"}) + client.pymongo_test_bernie.test.insert_one({"dummy": "object"}) dbs = client.list_database_names() self.assertTrue("pymongo_test" in dbs) self.assertTrue("pymongo_test_bernie" in dbs) - @client_context.require_no_mongos - def test_fsync_lock_unlock(self): - if server_is_master_with_slave(client_context.client): - raise SkipTest('SERVER-7714') - - self.assertFalse(self.client.is_locked) - # async flushing not supported on windows... - if sys.platform not in ('cygwin', 'win32'): - # Work around async becoming a reserved keyword in Python 3.7 - opts = {'async': True} - self.client.fsync(**opts) - self.assertFalse(self.client.is_locked) - self.client.fsync(lock=True) - self.assertTrue(self.client.is_locked) - locked = True - self.client.unlock() - for _ in range(5): - locked = self.client.is_locked - if not locked: - break - time.sleep(1) - self.assertFalse(locked) - - def test_is_locked_does_not_raise_warning(self): - client = rs_or_single_client() - with warnings.catch_warnings(record=True) as ctx: - warnings.simplefilter("always") - _ = client.is_locked - self.assertFalse( - any(issubclass(w.category, DeprecationWarning) for w in ctx)) - def test_contextlib(self): client = rs_or_single_client() client.pymongo_test.drop_collection("test") @@ -1121,19 +1185,20 @@ def test_contextlib(self): # The socket used for the previous commands has been returned to the # pool - self.assertEqual(1, len(get_pool(client).sockets)) + self.assertEqual(1, len(get_pool(client).conns)) with contextlib.closing(client): self.assertEqual("bar", client.pymongo_test.test.find_one()["foo"]) - self.assertEqual(1, len(get_pool(client).sockets)) - self.assertEqual(0, len(get_pool(client).sockets)) - + with self.assertRaises(InvalidOperation): + client.pymongo_test.test.find_one() + client = rs_or_single_client() with client as client: self.assertEqual("bar", client.pymongo_test.test.find_one()["foo"]) - self.assertEqual(0, len(get_pool(client).sockets)) + with self.assertRaises(InvalidOperation): + client.pymongo_test.test.find_one() def test_interrupt_signal(self): - if sys.platform.startswith('java'): + if sys.platform.startswith("java"): # We can't figure out how to raise an exception on a thread that's # blocked on a socket, whether that's the main thread or a worker, # without simply killing the whole thread in Jython. This suggests @@ -1150,8 +1215,8 @@ def test_interrupt_signal(self): where = delay(1.5) # Need exactly 1 document so find() will execute its $where clause once - db.drop_collection('foo') - db.foo.insert_one({'_id': 1}) + db.drop_collection("foo") + db.foo.insert_one({"_id": 1}) old_signal_handler = None try: @@ -1159,10 +1224,11 @@ def test_interrupt_signal(self): # main thread while find() is in-progress: On Windows, SIGALRM is # unavailable so we use a second thread. In our Evergreen setup on # Linux, the thread technique causes an error in the test at - # sock.recv(): TypeError: 'int' object is not callable + # conn.recv(): TypeError: 'int' object is not callable # We don't know what causes this, so we hack around it. - if sys.platform == 'win32': + if sys.platform == "win32": + def interrupter(): # Raises KeyboardInterrupt in the main thread time.sleep(0.25) @@ -1181,7 +1247,7 @@ def sigalarm(num, frame): raised = False try: # Will be interrupted by a KeyboardInterrupt. - next(db.foo.find({'$where': where})) + next(db.foo.find({"$where": where})) except KeyboardInterrupt: raised = True @@ -1192,10 +1258,7 @@ def sigalarm(num, frame): # Raises AssertionError due to PYTHON-294 -- Mongo's response to # the previous find() is still waiting to be read on the socket, # so the request id's don't match. - self.assertEqual( - {'_id': 1}, - next(db.foo.find()) - ) + self.assertEqual({"_id": 1}, next(db.foo.find())) finally: if old_signal_handler: signal.signal(signal.SIGALRM, old_signal_handler) @@ -1206,20 +1269,19 @@ def test_operation_failure(self): # to avoid race conditions caused by replica set failover or idle # socket reaping. client = single_client() + self.addCleanup(client.close) client.pymongo_test.test.find_one() pool = get_pool(client) - socket_count = len(pool.sockets) + socket_count = len(pool.conns) self.assertGreaterEqual(socket_count, 1) - old_sock_info = next(iter(pool.sockets)) + old_conn = next(iter(pool.conns)) client.pymongo_test.test.drop() - client.pymongo_test.test.insert_one({'_id': 'foo'}) - self.assertRaises( - OperationFailure, - client.pymongo_test.test.insert_one, {'_id': 'foo'}) + client.pymongo_test.test.insert_one({"_id": "foo"}) + self.assertRaises(OperationFailure, client.pymongo_test.test.insert_one, {"_id": "foo"}) - self.assertEqual(socket_count, len(pool.sockets)) - new_sock_info = next(iter(pool.sockets)) - self.assertEqual(old_sock_info, new_sock_info) + self.assertEqual(socket_count, len(pool.conns)) + new_con = next(iter(pool.conns)) + self.assertEqual(old_conn, new_con) def test_lazy_connect_w0(self): # Ensure that connect-on-demand works when the first operation is @@ -1227,33 +1289,36 @@ def test_lazy_connect_w0(self): # Use a separate collection to avoid races where we're still # completing an operation on a collection while the next test begins. - client_context.client.drop_database('test_lazy_connect_w0') - self.addCleanup( - client_context.client.drop_database, 'test_lazy_connect_w0') + client_context.client.drop_database("test_lazy_connect_w0") + self.addCleanup(client_context.client.drop_database, "test_lazy_connect_w0") client = rs_or_single_client(connect=False, w=0) + self.addCleanup(client.close) client.test_lazy_connect_w0.test.insert_one({}) wait_until( - lambda: client.test_lazy_connect_w0.test.count_documents({}) == 1, - "find one document") + lambda: client.test_lazy_connect_w0.test.count_documents({}) == 1, "find one document" + ) client = rs_or_single_client(connect=False, w=0) - client.test_lazy_connect_w0.test.update_one({}, {'$set': {'x': 1}}) + self.addCleanup(client.close) + client.test_lazy_connect_w0.test.update_one({}, {"$set": {"x": 1}}) wait_until( - lambda: client.test_lazy_connect_w0.test.find_one().get('x') == 1, - "update one document") + lambda: client.test_lazy_connect_w0.test.find_one().get("x") == 1, "update one document" + ) client = rs_or_single_client(connect=False, w=0) + self.addCleanup(client.close) client.test_lazy_connect_w0.test.delete_one({}) wait_until( - lambda: client.test_lazy_connect_w0.test.count_documents({}) == 0, - "delete one document") + lambda: client.test_lazy_connect_w0.test.count_documents({}) == 0, "delete one document" + ) @client_context.require_no_mongos def test_exhaust_network_error(self): # When doing an exhaust query, the socket stays checked out on success # but must be checked in on error to avoid semaphore leaks. client = rs_or_single_client(maxPoolSize=1, retryReads=False) + self.addCleanup(client.close) collection = client.pymongo_test.test pool = get_pool(client) pool._check_interval_seconds = None # Never check. @@ -1262,16 +1327,16 @@ def test_exhaust_network_error(self): connected(client) # Cause a network error. - sock_info = one(pool.sockets) - sock_info.sock.close() + conn = one(pool.conns) + conn.conn.close() cursor = collection.find(cursor_type=CursorType.EXHAUST) with self.assertRaises(ConnectionFailure): next(cursor) - self.assertTrue(sock_info.closed) + self.assertTrue(conn.closed) # The semaphore was decremented despite the error. - self.assertTrue(pool._socket_semaphore.acquire(blocking=False)) + self.assertEqual(0, pool.requests) @client_context.require_auth def test_auth_network_error(self): @@ -1279,23 +1344,15 @@ def test_auth_network_error(self): # when authenticating a new socket with cached credentials. # Get a client with one socket so we detect if it's leaked. - c = connected(rs_or_single_client(maxPoolSize=1, - waitQueueTimeoutMS=1, - retryReads=False)) - - # Simulate an authenticate() call on a different socket. - credentials = auth._build_credentials_tuple( - 'DEFAULT', 'admin', db_user, db_pwd, {}, None) - - c._cache_credentials('test', credentials, connect=False) + c = connected(rs_or_single_client(maxPoolSize=1, waitQueueTimeoutMS=1, retryReads=False)) # Cause a network error on the actual socket. pool = get_pool(c) - socket_info = one(pool.sockets) - socket_info.sock.close() + socket_info = one(pool.conns) + socket_info.conn.close() - # SocketInfo.check_auth logs in with the new credential, but gets a - # socket.error. Should be reraised as AutoReconnect. + # Connection.authenticate logs, but gets a socket.error. Should be + # reraised as AutoReconnect. self.assertRaises(AutoReconnect, c.test.collection.find_one) # No semaphore leak, the pool is allowed to make a new socket. @@ -1303,8 +1360,7 @@ def test_auth_network_error(self): @client_context.require_no_replica_set def test_connect_to_standalone_using_replica_set_name(self): - client = single_client(replicaSet='anything', - serverSelectionTimeoutMS=100) + client = single_client(replicaSet="anything", serverSelectionTimeoutMS=100) with self.assertRaises(AutoReconnect): client.test.test.find_one() @@ -1315,16 +1371,25 @@ def test_stale_getmore(self): # the topology before the getMore message is sent. Test that # MongoClient._run_operation_with_response handles the error. with self.assertRaises(AutoReconnect): - client = rs_client(connect=False, - serverSelectionTimeoutMS=100) - client._run_operation_with_response( - operation=message._GetMore('pymongo_test', 'collection', - 101, 1234, client.codec_options, - ReadPreference.PRIMARY, - None, client, None, None), - unpack_res=Cursor( - client.pymongo_test.collection)._unpack_response, - address=('not-a-member', 27017)) + client = rs_client(connect=False, serverSelectionTimeoutMS=100) + client._run_operation( + operation=message._GetMore( + "pymongo_test", + "collection", + 101, + 1234, + client.codec_options, + ReadPreference.PRIMARY, + None, + client, + None, + None, + False, + None, + ), + unpack_res=Cursor(client.pymongo_test.collection)._unpack_response, + address=("not-a-member", 27017), + ) def test_heartbeat_frequency_ms(self): class HeartbeatStartedListener(ServerHeartbeatListener): @@ -1348,48 +1413,50 @@ def init(self, *args): heartbeat_times.append(time.time()) try: - ServerHeartbeatStartedEvent.__init__ = init + ServerHeartbeatStartedEvent.__init__ = init # type: ignore listener = HeartbeatStartedListener() uri = "mongodb://%s:%d/?heartbeatFrequencyMS=500" % ( - client_context.host, client_context.port) + client_context.host, + client_context.port, + ) client = single_client(uri, event_listeners=[listener]) - wait_until(lambda: len(listener.results) >= 2, - "record two ServerHeartbeatStartedEvents") + wait_until( + lambda: len(listener.results) >= 2, "record two ServerHeartbeatStartedEvents" + ) # Default heartbeatFrequencyMS is 10 sec. Check the interval was # closer to 0.5 sec with heartbeatFrequencyMS configured. - self.assertAlmostEqual( - heartbeat_times[1] - heartbeat_times[0], 0.5, delta=2) + self.assertAlmostEqual(heartbeat_times[1] - heartbeat_times[0], 0.5, delta=2) client.close() finally: - ServerHeartbeatStartedEvent.__init__ = old_init + ServerHeartbeatStartedEvent.__init__ = old_init # type: ignore def test_small_heartbeat_frequency_ms(self): uri = "mongodb://example/?heartbeatFrequencyMS=499" with self.assertRaises(ConfigurationError) as context: MongoClient(uri) - self.assertIn('heartbeatFrequencyMS', str(context.exception)) + self.assertIn("heartbeatFrequencyMS", str(context.exception)) def test_compression(self): def compression_settings(client): pool_options = client._MongoClient__options.pool_options - return pool_options.compression_settings + return pool_options._compression_settings uri = "mongodb://localhost:27017/?compressors=zlib" client = MongoClient(uri, connect=False) opts = compression_settings(client) - self.assertEqual(opts.compressors, ['zlib']) + self.assertEqual(opts.compressors, ["zlib"]) uri = "mongodb://localhost:27017/?compressors=zlib&zlibCompressionLevel=4" client = MongoClient(uri, connect=False) opts = compression_settings(client) - self.assertEqual(opts.compressors, ['zlib']) + self.assertEqual(opts.compressors, ["zlib"]) self.assertEqual(opts.zlib_compression_level, 4) uri = "mongodb://localhost:27017/?compressors=zlib&zlibCompressionLevel=-1" client = MongoClient(uri, connect=False) opts = compression_settings(client) - self.assertEqual(opts.compressors, ['zlib']) + self.assertEqual(opts.compressors, ["zlib"]) self.assertEqual(opts.zlib_compression_level, -1) uri = "mongodb://localhost:27017" client = MongoClient(uri, connect=False) @@ -1404,7 +1471,7 @@ def compression_settings(client): uri = "mongodb://localhost:27017/?compressors=foobar,zlib" client = MongoClient(uri, connect=False) opts = compression_settings(client) - self.assertEqual(opts.compressors, ['zlib']) + self.assertEqual(opts.compressors, ["zlib"]) self.assertEqual(opts.zlib_compression_level, -1) # According to the connection string spec, unsupported values @@ -1412,12 +1479,12 @@ def compression_settings(client): uri = "mongodb://localhost:27017/?compressors=zlib&zlibCompressionLevel=10" client = MongoClient(uri, connect=False) opts = compression_settings(client) - self.assertEqual(opts.compressors, ['zlib']) + self.assertEqual(opts.compressors, ["zlib"]) self.assertEqual(opts.zlib_compression_level, -1) uri = "mongodb://localhost:27017/?compressors=zlib&zlibCompressionLevel=-2" client = MongoClient(uri, connect=False) opts = compression_settings(client) - self.assertEqual(opts.compressors, ['zlib']) + self.assertEqual(opts.compressors, ["zlib"]) self.assertEqual(opts.zlib_compression_level, -1) if not _HAVE_SNAPPY: @@ -1429,11 +1496,11 @@ def compression_settings(client): uri = "mongodb://localhost:27017/?compressors=snappy" client = MongoClient(uri, connect=False) opts = compression_settings(client) - self.assertEqual(opts.compressors, ['snappy']) + self.assertEqual(opts.compressors, ["snappy"]) uri = "mongodb://localhost:27017/?compressors=snappy,zlib" client = MongoClient(uri, connect=False) opts = compression_settings(client) - self.assertEqual(opts.compressors, ['snappy', 'zlib']) + self.assertEqual(opts.compressors, ["snappy", "zlib"]) if not _HAVE_ZSTD: uri = "mongodb://localhost:27017/?compressors=zstd" @@ -1444,11 +1511,11 @@ def compression_settings(client): uri = "mongodb://localhost:27017/?compressors=zstd" client = MongoClient(uri, connect=False) opts = compression_settings(client) - self.assertEqual(opts.compressors, ['zstd']) + self.assertEqual(opts.compressors, ["zstd"]) uri = "mongodb://localhost:27017/?compressors=zstd,zlib" client = MongoClient(uri, connect=False) opts = compression_settings(client) - self.assertEqual(opts.compressors, ['zstd', 'zlib']) + self.assertEqual(opts.compressors, ["zstd", "zlib"]) options = client_context.default_client_options if "compressors" in options and "zlib" in options["compressors"]: @@ -1460,14 +1527,14 @@ def compression_settings(client): def test_reset_during_update_pool(self): client = rs_or_single_client(minPoolSize=10) self.addCleanup(client.close) - client.admin.command('ping') + client.admin.command("ping") pool = get_pool(client) - pool_id = pool.pool_id + generation = pool.gen.get_overall() # Continuously reset the pool. class ResetPoolThread(threading.Thread): def __init__(self, pool): - super(ResetPoolThread, self).__init__() + super().__init__() self.running = True self.pool = pool @@ -1476,7 +1543,9 @@ def stop(self): def run(self): while self.running: - self.pool.reset() + exc = AutoReconnect("mock pool error") + ctx = _ErrorContext(exc, 0, pool.gen.get_overall(), False, None) + client._topology.handle_error(pool.address, ctx) time.sleep(0.001) t = ResetPoolThread(pool) @@ -1488,22 +1557,22 @@ def run(self): while True: for _ in range(10): client._topology.update_pool() - if pool_id != pool.pool_id: + if generation != pool.gen.get_overall(): break finally: t.stop() t.join() - client.admin.command('ping') + client.admin.command("ping") def test_background_connections_do_not_hold_locks(self): min_pool_size = 10 client = rs_or_single_client( - serverSelectionTimeoutMS=3000, minPoolSize=min_pool_size, - connect=False) + serverSelectionTimeoutMS=3000, minPoolSize=min_pool_size, connect=False + ) self.addCleanup(client.close) # Create a single connection in the pool. - client.admin.command('ping') + client.admin.command("ping") # Cause new connections stall for a few seconds. pool = get_pool(client) @@ -1514,24 +1583,276 @@ def stall_connect(*args, **kwargs): return original_connect(*args, **kwargs) pool.connect = stall_connect + # Un-patch Pool.connect to break the cyclic reference. + self.addCleanup(delattr, pool, "connect") # Wait for the background thread to start creating connections - wait_until(lambda: len(pool.sockets) > 1, 'start creating connections') + wait_until(lambda: len(pool.conns) > 1, "start creating connections") # Assert that application operations do not block. for _ in range(10): - start = monotonic_time() - client.admin.command('ping') - total = monotonic_time() - start + start = time.monotonic() + client.admin.command("ping") + total = time.monotonic() - start # Each ping command should not take more than 2 seconds self.assertLess(total, 2) + @client_context.require_replica_set + def test_direct_connection(self): + # direct_connection=True should result in Single topology. + client = rs_or_single_client(directConnection=True) + client.admin.command("ping") + self.assertEqual(len(client.nodes), 1) + self.assertEqual(client._topology_settings.get_topology_type(), TOPOLOGY_TYPE.Single) + client.close() + + # direct_connection=False should result in RS topology. + client = rs_or_single_client(directConnection=False) + client.admin.command("ping") + self.assertGreaterEqual(len(client.nodes), 1) + self.assertIn( + client._topology_settings.get_topology_type(), + [TOPOLOGY_TYPE.ReplicaSetNoPrimary, TOPOLOGY_TYPE.ReplicaSetWithPrimary], + ) + client.close() + + # directConnection=True, should error with multiple hosts as a list. + with self.assertRaises(ConfigurationError): + MongoClient(["host1", "host2"], directConnection=True) + + @unittest.skipIf("PyPy" in sys.version, "PYTHON-2927 fails often on PyPy") + def test_continuous_network_errors(self): + def server_description_count(): + i = 0 + for obj in gc.get_objects(): + try: + if isinstance(obj, ServerDescription): + i += 1 + except ReferenceError: + pass + return i + + gc.collect() + with client_knobs(min_heartbeat_interval=0.003): + client = MongoClient( + "invalid:27017", heartbeatFrequencyMS=3, serverSelectionTimeoutMS=150 + ) + initial_count = server_description_count() + self.addCleanup(client.close) + with self.assertRaises(ServerSelectionTimeoutError): + client.test.test.find_one() + gc.collect() + final_count = server_description_count() + # If a bug like PYTHON-2433 is reintroduced then too many + # ServerDescriptions will be kept alive and this test will fail: + # AssertionError: 19 != 46 within 15 delta (27 difference) + # On Python 3.11 we seem to get more of a delta. + self.assertAlmostEqual(initial_count, final_count, delta=20) + + @client_context.require_failCommand_fail_point + def test_network_error_message(self): + client = single_client(retryReads=False) + self.addCleanup(client.close) + client.admin.command("ping") # connect + with self.fail_point( + {"mode": {"times": 1}, "data": {"closeConnection": True, "failCommands": ["find"]}} + ): + assert client.address is not None + expected = "{}:{}: ".format(*client.address) + with self.assertRaisesRegex(AutoReconnect, expected): + client.pymongo_test.test.find_one({}) + + @unittest.skipIf("PyPy" in sys.version, "PYTHON-2938 could fail on PyPy") + def test_process_periodic_tasks(self): + client = rs_or_single_client() + coll = client.db.collection + coll.insert_many([{} for _ in range(5)]) + cursor = coll.find(batch_size=2) + cursor.next() + c_id = cursor.cursor_id + self.assertIsNotNone(c_id) + client.close() + # Add cursor to kill cursors queue + del cursor + wait_until( + lambda: client._MongoClient__kill_cursors_queue, + "waited for cursor to be added to queue", + ) + client._process_periodic_tasks() # This must not raise or print any exceptions + with self.assertRaises(InvalidOperation): + coll.insert_many([{} for _ in range(5)]) + + @unittest.skipUnless(_HAVE_DNSPYTHON, "DNS-related tests need dnspython to be installed") + def test_service_name_from_kwargs(self): + client = MongoClient( + "mongodb+srv://user:password@test22.test.build.10gen.cc", + srvServiceName="customname", + connect=False, + ) + self.assertEqual(client._topology_settings.srv_service_name, "customname") + client = MongoClient( + "mongodb+srv://user:password@test22.test.build.10gen.cc" + "/?srvServiceName=shouldbeoverriden", + srvServiceName="customname", + connect=False, + ) + self.assertEqual(client._topology_settings.srv_service_name, "customname") + client = MongoClient( + "mongodb+srv://user:password@test22.test.build.10gen.cc/?srvServiceName=customname", + connect=False, + ) + self.assertEqual(client._topology_settings.srv_service_name, "customname") + + @unittest.skipUnless(_HAVE_DNSPYTHON, "DNS-related tests need dnspython to be installed") + def test_srv_max_hosts_kwarg(self): + client = MongoClient("mongodb+srv://test1.test.build.10gen.cc/") + self.assertGreater(len(client.topology_description.server_descriptions()), 1) + client = MongoClient("mongodb+srv://test1.test.build.10gen.cc/", srvmaxhosts=1) + self.assertEqual(len(client.topology_description.server_descriptions()), 1) + client = MongoClient( + "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=1", srvmaxhosts=2 + ) + self.assertEqual(len(client.topology_description.server_descriptions()), 2) + + @unittest.skipIf(_HAVE_DNSPYTHON, "dnspython must not be installed") + def test_srv_no_dnspython_error(self): + with self.assertRaisesRegex(ConfigurationError, 'The "dnspython" module must be'): + MongoClient("mongodb+srv://test1.test.build.10gen.cc/") + + @unittest.skipIf( + client_context.load_balancer or client_context.serverless, + "loadBalanced clients do not run SDAM", + ) + @unittest.skipIf(sys.platform == "win32", "Windows does not support SIGSTOP") + def test_sigstop_sigcont(self): + test_dir = os.path.dirname(os.path.realpath(__file__)) + script = os.path.join(test_dir, "sigstop_sigcont.py") + p = subprocess.Popen( + [sys.executable, script, client_context.uri], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) + self.addCleanup(p.wait, timeout=1) + self.addCleanup(p.kill) + time.sleep(1) + # Stop the child, sleep for twice the streaming timeout + # (heartbeatFrequencyMS + connectTimeoutMS), and restart. + os.kill(p.pid, signal.SIGSTOP) + time.sleep(2) + os.kill(p.pid, signal.SIGCONT) + time.sleep(0.5) + # Tell the script to exit gracefully. + outs, _ = p.communicate(input=b"q\n", timeout=10) + self.assertTrue(outs) + log_output = outs.decode("utf-8") + self.assertIn("TEST STARTED", log_output) + self.assertIn("ServerHeartbeatStartedEvent", log_output) + self.assertIn("ServerHeartbeatSucceededEvent", log_output) + self.assertIn("TEST COMPLETED", log_output) + self.assertNotIn("ServerHeartbeatFailedEvent", log_output) + + def _test_handshake(self, env_vars, expected_env): + with patch.dict("os.environ", env_vars): + metadata = copy.deepcopy(_METADATA) + if expected_env is not None: + metadata["env"] = expected_env + + if "AWS_REGION" not in env_vars: + os.environ["AWS_REGION"] = "" + with rs_or_single_client(serverSelectionTimeoutMS=10000) as client: + client.admin.command("ping") + options = client._MongoClient__options + self.assertEqual(options.pool_options.metadata, metadata) + + def test_handshake_01_aws(self): + self._test_handshake( + { + "AWS_EXECUTION_ENV": "AWS_Lambda_python3.9", + "AWS_REGION": "us-east-2", + "AWS_LAMBDA_FUNCTION_MEMORY_SIZE": "1024", + }, + {"name": "aws.lambda", "region": "us-east-2", "memory_mb": 1024}, + ) + + def test_handshake_02_azure(self): + self._test_handshake({"FUNCTIONS_WORKER_RUNTIME": "python"}, {"name": "azure.func"}) + + def test_handshake_03_gcp(self): + self._test_handshake( + { + "K_SERVICE": "servicename", + "FUNCTION_MEMORY_MB": "1024", + "FUNCTION_TIMEOUT_SEC": "60", + "FUNCTION_REGION": "us-central1", + }, + {"name": "gcp.func", "region": "us-central1", "memory_mb": 1024, "timeout_sec": 60}, + ) + # Extra case for FUNCTION_NAME. + self._test_handshake( + { + "FUNCTION_NAME": "funcname", + "FUNCTION_MEMORY_MB": "1024", + "FUNCTION_TIMEOUT_SEC": "60", + "FUNCTION_REGION": "us-central1", + }, + {"name": "gcp.func", "region": "us-central1", "memory_mb": 1024, "timeout_sec": 60}, + ) + + def test_handshake_04_vercel(self): + self._test_handshake( + {"VERCEL": "1", "VERCEL_REGION": "cdg1"}, {"name": "vercel", "region": "cdg1"} + ) + + def test_handshake_05_multiple(self): + self._test_handshake( + {"AWS_EXECUTION_ENV": "AWS_Lambda_python3.9", "FUNCTIONS_WORKER_RUNTIME": "python"}, + None, + ) + # Extra cases for other combos. + self._test_handshake( + {"FUNCTIONS_WORKER_RUNTIME": "python", "K_SERVICE": "servicename"}, + None, + ) + self._test_handshake({"K_SERVICE": "servicename", "VERCEL": "1"}, None) + + def test_handshake_06_region_too_long(self): + self._test_handshake( + {"AWS_EXECUTION_ENV": "AWS_Lambda_python3.9", "AWS_REGION": "a" * 512}, + {"name": "aws.lambda"}, + ) + + def test_handshake_07_memory_invalid_int(self): + self._test_handshake( + {"AWS_EXECUTION_ENV": "AWS_Lambda_python3.9", "AWS_LAMBDA_FUNCTION_MEMORY_SIZE": "big"}, + {"name": "aws.lambda"}, + ) + + def test_handshake_08_invalid_aws_ec2(self): + # AWS_EXECUTION_ENV needs to start with "AWS_Lambda_". + self._test_handshake( + {"AWS_EXECUTION_ENV": "EC2"}, + None, + ) + + def test_dict_hints(self): + self.db.t.find(hint={"x": 1}) + + def test_dict_hints_sort(self): + result = self.db.t.find() + result.sort({"x": 1}) + + self.db.t.find(sort={"x": 1}) + + def test_dict_hints_create_index(self): + self.db.t.create_index({"x": pymongo.ASCENDING}) + class TestExhaustCursor(IntegrationTest): """Test that clients properly handle errors from exhaust cursors.""" def setUp(self): - super(TestExhaustCursor, self).setUp() + super().setUp() if client_context.is_mongos: raise SkipTest("mongos doesn't support exhaust, SERVER-2627") @@ -1542,20 +1863,20 @@ def test_exhaust_query_server_error(self): collection = client.pymongo_test.test pool = get_pool(client) - sock_info = one(pool.sockets) + conn = one(pool.conns) # This will cause OperationFailure in all mongo versions since # the value for $orderby must be a document. cursor = collection.find( - SON([('$query', {}), ('$orderby', True)]), - cursor_type=CursorType.EXHAUST) + SON([("$query", {}), ("$orderby", True)]), cursor_type=CursorType.EXHAUST + ) self.assertRaises(OperationFailure, cursor.next) - self.assertFalse(sock_info.closed) + self.assertFalse(conn.closed) # The socket was checked in and the semaphore was decremented. - self.assertIn(sock_info, pool.sockets) - self.assertTrue(pool._socket_semaphore.acquire(blocking=False)) + self.assertIn(conn, pool.conns) + self.assertEqual(0, pool.requests) def test_exhaust_getmore_server_error(self): # When doing a getmore on an exhaust cursor, the socket stays checked @@ -1569,7 +1890,7 @@ def test_exhaust_getmore_server_error(self): pool = get_pool(client) pool._check_interval_seconds = None # Never check. - sock_info = one(pool.sockets) + conn = one(pool.conns) cursor = collection.find(cursor_type=CursorType.EXHAUST) @@ -1579,42 +1900,41 @@ def test_exhaust_getmore_server_error(self): # Cause a server error on getmore. def receive_message(request_id): # Discard the actual server response. - SocketInfo.receive_message(sock_info, request_id) + Connection.receive_message(conn, request_id) # responseFlags bit 1 is QueryFailure. - msg = struct.pack('= count, - 'find %s %s event(s)' % (count, event)) + event = OBJECT_TYPES[op["event"]] + count = op["count"] + timeout = op.get("timeout", 10000) / 1000.0 + wait_until( + lambda: self.listener.event_count(event) >= count, + f"find {count} {event} event(s)", + timeout=timeout, + ) def check_out(self, op): """Run the 'checkOut' operation.""" - label = op['label'] - with self.pool.get_socket({}, checkout=True) as sock_info: + label = op["label"] + with self.pool.checkout() as conn: + # Call 'pin_cursor' so we can hold the socket. + conn.pin_cursor() if label: - self.labels[label] = sock_info + self.labels[label] = conn else: - self.addCleanup(sock_info.close_socket, None) + self.addCleanup(conn.close_conn, None) def check_in(self, op): """Run the 'checkIn' operation.""" - label = op['connection'] - sock_info = self.labels[label] - self.pool.return_socket(sock_info) + label = op["connection"] + conn = self.labels[label] + self.pool.checkin(conn) + + def ready(self, op): + """Run the 'ready' operation.""" + self.pool.ready() def clear(self, op): """Run the 'clear' operation.""" @@ -163,8 +152,8 @@ def close(self, op): def run_operation(self, op): """Run a single operation in a test.""" - op_name = camel_to_snake(op['name']) - thread = op['thread'] + op_name = camel_to_snake(op["name"]) + thread = op["thread"] meth = getattr(self, op_name) if thread: self.targets[thread].schedule(lambda: meth(op)) @@ -179,9 +168,9 @@ def run_operations(self, ops): def check_object(self, actual, expected): """Assert that the actual object matches the expected object.""" - self.assertEqual(type(actual), OBJECT_TYPES[expected['type']]) + self.assertEqual(type(actual), OBJECT_TYPES[expected["type"]]) for attr, expected_val in expected.items(): - if attr == 'type': + if attr == "type": continue c2s = camel_to_snake(attr) actual_val = getattr(actual, c2s) @@ -197,42 +186,84 @@ def check_event(self, actual, expected): def actual_events(self, ignore): """Return all the non-ignored events.""" ignore = tuple(OBJECT_TYPES[name] for name in ignore) - return [event for event in self.listener.events - if not isinstance(event, ignore)] + return [event for event in self.listener.events if not isinstance(event, ignore)] def check_events(self, events, ignore): """Check the events of a test.""" actual_events = self.actual_events(ignore) for actual, expected in zip(actual_events, events): + self.logs.append(f"Checking event actual: {actual!r} vs expected: {expected!r}") self.check_event(actual, expected) if len(events) > len(actual_events): - self.fail('missing events: %r' % (events[len(actual_events):],)) - elif len(events) < len(actual_events): - self.fail('extra events: %r' % (actual_events[len(events):],)) + self.fail(f"missing events: {events[len(actual_events) :]!r}") def check_error(self, actual, expected): - message = expected.pop('message') + message = expected.pop("message") self.check_object(actual, expected) self.assertIn(message, str(actual)) + def _set_fail_point(self, client, command_args): + cmd = SON([("configureFailPoint", "failCommand")]) + cmd.update(command_args) + client.admin.command(cmd) + + def set_fail_point(self, command_args): + if not client_context.supports_failCommand_fail_point: + self.skipTest("failCommand fail point must be supported") + self._set_fail_point(self.client, command_args) + def run_scenario(self, scenario_def, test): """Run a CMAP spec test.""" - self.assertEqual(scenario_def['version'], 1) - self.assertEqual(scenario_def['style'], 'unit') + self.logs: list = [] + self.assertEqual(scenario_def["version"], 1) + self.assertIn(scenario_def["style"], ["unit", "integration"]) self.listener = CMAPListener() - self._ops = [] - - opts = test['poolOptions'].copy() - opts['event_listeners'] = [self.listener] - client = single_client(**opts) + self._ops: list = [] + + # Configure the fail point before creating the client. + if "failPoint" in test: + fp = test["failPoint"] + self.set_fail_point(fp) + self.addCleanup( + self.set_fail_point, {"configureFailPoint": fp["configureFailPoint"], "mode": "off"} + ) + + opts = test["poolOptions"].copy() + opts["event_listeners"] = [self.listener] + opts["_monitor_class"] = DummyMonitor + opts["connect"] = False + # Support backgroundThreadIntervalMS, default to 50ms. + interval = opts.pop("backgroundThreadIntervalMS", 50) + if interval < 0: + kill_cursor_frequency = 99999999 + else: + kill_cursor_frequency = interval / 1000.0 + with client_knobs(kill_cursor_frequency=kill_cursor_frequency, min_heartbeat_interval=0.05): + client = single_client(**opts) + # Update the SD to a known type because the DummyMonitor will not. + # Note we cannot simply call topology.on_change because that would + # internally call pool.ready() which introduces unexpected + # PoolReadyEvents. Instead, update the initial state before + # opening the Topology. + td = client_context.client._topology.description + sd = td.server_descriptions()[(client_context.host, client_context.port)] + client._topology._description = updated_topology_description( + client._topology._description, sd + ) + # When backgroundThreadIntervalMS is negative we do not start the + # background thread to ensure it never runs. + if interval < 0: + client._topology.open() + else: + client._get_topology() self.addCleanup(client.close) - self.pool = get_pool(client) + self.pool = list(client._topology._servers.values())[0].pool # Map of target names to Thread objects. - self.targets = dict() + self.targets: dict = {} # Map of label names to Connection objects - self.labels = dict() + self.labels: dict = {} def cleanup(): for t in self.targets.values(): @@ -240,40 +271,42 @@ def cleanup(): for t in self.targets.values(): t.join(5) for conn in self.labels.values(): - conn.close_socket(None) + conn.close_conn(None) self.addCleanup(cleanup) try: - if test['error']: + if test["error"]: with self.assertRaises(PyMongoError) as ctx: - self.run_operations(test['operations']) - self.check_error(ctx.exception, test['error']) + self.run_operations(test["operations"]) + self.check_error(ctx.exception, test["error"]) else: - self.run_operations(test['operations']) + self.run_operations(test["operations"]) - self.check_events(test['events'], test['ignore']) + self.check_events(test["events"], test["ignore"]) except Exception: # Print the events after a test failure. - print() - print('Failed test: %r' % (test['description'],)) - print('Operations:') + print("\nFailed test: {!r}".format(test["description"])) + print("Operations:") for op in self._ops: print(op) - print('Threads:') + print("Threads:") print(self.targets) - print('Connections:') + print("Connections:") print(self.labels) - print('Events:') + print("Events:") for event in self.listener.events: print(event) + print("Log:") + for log in self.logs: + print(log) raise POOL_OPTIONS = { - 'maxPoolSize': 50, - 'minPoolSize': 1, - 'maxIdleTimeMS': 10000, - 'waitQueueTimeoutMS': 10000 + "maxPoolSize": 50, + "minPoolSize": 1, + "maxIdleTimeMS": 10000, + "waitQueueTimeoutMS": 10000, } # @@ -288,11 +321,10 @@ def test_1_client_connection_pool_options(self): def test_2_all_client_pools_have_same_options(self): client = rs_or_single_client(**self.POOL_OPTIONS) self.addCleanup(client.close) - client.admin.command('isMaster') + client.admin.command("ping") # Discover at least one secondary. if client_context.has_secondaries: - client.admin.command( - 'isMaster', read_preference=ReadPreference.SECONDARY) + client.admin.command("ping", read_preference=ReadPreference.SECONDARY) pools = get_pools(client) pool_opts = pools[0].opts @@ -301,10 +333,9 @@ def test_2_all_client_pools_have_same_options(self): self.assertEqual(pool.opts, pool_opts) def test_3_uri_connection_pool_options(self): - opts = '&'.join(['%s=%s' % (k, v) - for k, v in self.POOL_OPTIONS.items()]) - uri = 'mongodb://%s/?%s' % (client_context.pair, opts) - client = rs_or_single_client(uri, **self.credentials) + opts = "&".join([f"{k}={v}" for k, v in self.POOL_OPTIONS.items()]) + uri = f"mongodb://{client_context.pair}/?{opts}" + client = rs_or_single_client(uri) self.addCleanup(client.close) pool_opts = get_pool(client).opts self.assertEqual(pool_opts.non_default_options, self.POOL_OPTIONS) @@ -316,18 +347,16 @@ def test_4_subscribe_to_events(self): self.assertEqual(listener.event_count(PoolCreatedEvent), 1) # Creates a new connection. - client.admin.command('isMaster') - self.assertEqual( - listener.event_count(ConnectionCheckOutStartedEvent), 1) + client.admin.command("ping") + self.assertEqual(listener.event_count(ConnectionCheckOutStartedEvent), 1) self.assertEqual(listener.event_count(ConnectionCreatedEvent), 1) self.assertEqual(listener.event_count(ConnectionReadyEvent), 1) self.assertEqual(listener.event_count(ConnectionCheckedOutEvent), 1) self.assertEqual(listener.event_count(ConnectionCheckedInEvent), 1) # Uses the existing connection. - client.admin.command('isMaster') - self.assertEqual( - listener.event_count(ConnectionCheckOutStartedEvent), 2) + client.admin.command("ping") + self.assertEqual(listener.event_count(ConnectionCheckOutStartedEvent), 2) self.assertEqual(listener.event_count(ConnectionCheckedOutEvent), 2) self.assertEqual(listener.event_count(ConnectionCheckedInEvent), 2) @@ -342,58 +371,44 @@ def test_5_check_out_fails_connection_error(self): pool = get_pool(client) def mock_connect(*args, **kwargs): - raise ConnectionFailure('connect failed') + raise ConnectionFailure("connect failed") + pool.connect = mock_connect + # Un-patch Pool.connect to break the cyclic reference. + self.addCleanup(delattr, pool, "connect") # Attempt to create a new connection. - with self.assertRaisesRegex(ConnectionFailure, 'connect failed'): - client.admin.command('isMaster') + with self.assertRaisesRegex(ConnectionFailure, "connect failed"): + client.admin.command("ping") self.assertIsInstance(listener.events[0], PoolCreatedEvent) - self.assertIsInstance(listener.events[1], - ConnectionCheckOutStartedEvent) - self.assertIsInstance(listener.events[2], - ConnectionCheckOutFailedEvent) - self.assertIsInstance(listener.events[3], PoolClearedEvent) + self.assertIsInstance(listener.events[1], PoolReadyEvent) + self.assertIsInstance(listener.events[2], ConnectionCheckOutStartedEvent) + self.assertIsInstance(listener.events[3], ConnectionCheckOutFailedEvent) + self.assertIsInstance(listener.events[4], PoolClearedEvent) - failed_event = listener.events[2] - self.assertEqual( - failed_event.reason, ConnectionCheckOutFailedReason.CONN_ERROR) + failed_event = listener.events[3] + self.assertEqual(failed_event.reason, ConnectionCheckOutFailedReason.CONN_ERROR) def test_5_check_out_fails_auth_error(self): listener = CMAPListener() - client = single_client(event_listeners=[listener]) + client = single_client_noauth( + username="notauser", password="fail", event_listeners=[listener] + ) self.addCleanup(client.close) - pool = get_pool(client) - connect = pool.connect - - def mock_check_auth(self, *args, **kwargs): - self.close_socket(ConnectionClosedReason.ERROR) - raise ConnectionFailure('auth failed') - - def mock_connect(*args, **kwargs): - sock_info = connect(*args, **kwargs) - sock_info.check_auth = functools.partial(mock_check_auth, sock_info) - return sock_info - pool.connect = mock_connect # Attempt to create a new connection. - with self.assertRaisesRegex(ConnectionFailure, 'auth failed'): - client.admin.command('isMaster') + with self.assertRaisesRegex(OperationFailure, "failed"): + client.admin.command("ping") self.assertIsInstance(listener.events[0], PoolCreatedEvent) - self.assertIsInstance(listener.events[1], - ConnectionCheckOutStartedEvent) - self.assertIsInstance(listener.events[2], ConnectionCreatedEvent) + self.assertIsInstance(listener.events[1], PoolReadyEvent) + self.assertIsInstance(listener.events[2], ConnectionCheckOutStartedEvent) + self.assertIsInstance(listener.events[3], ConnectionCreatedEvent) # Error happens here. - self.assertIsInstance(listener.events[3], ConnectionClosedEvent) - self.assertIsInstance(listener.events[4], - ConnectionCheckOutFailedEvent) - self.assertIsInstance(listener.events[5], PoolClearedEvent) - - failed_event = listener.events[4] - self.assertEqual( - failed_event.reason, ConnectionCheckOutFailedReason.CONN_ERROR) + self.assertIsInstance(listener.events[4], ConnectionClosedEvent) + self.assertIsInstance(listener.events[5], ConnectionCheckOutFailedEvent) + self.assertEqual(listener.events[5].reason, ConnectionCheckOutFailedReason.CONN_ERROR) # # Extra non-spec tests @@ -404,20 +419,35 @@ def assertRepr(self, obj): self.assertEqual(repr(new_obj), repr(obj)) def test_events_repr(self): - host = ('localhost', 27017) + host = ("localhost", 27017) self.assertRepr(ConnectionCheckedInEvent(host, 1)) self.assertRepr(ConnectionCheckedOutEvent(host, 1)) - self.assertRepr(ConnectionCheckOutFailedEvent( - host, ConnectionCheckOutFailedReason.POOL_CLOSED)) - self.assertRepr(ConnectionClosedEvent( - host, 1, ConnectionClosedReason.POOL_CLOSED)) + self.assertRepr( + ConnectionCheckOutFailedEvent(host, ConnectionCheckOutFailedReason.POOL_CLOSED) + ) + self.assertRepr(ConnectionClosedEvent(host, 1, ConnectionClosedReason.POOL_CLOSED)) self.assertRepr(ConnectionCreatedEvent(host, 1)) self.assertRepr(ConnectionReadyEvent(host, 1)) self.assertRepr(ConnectionCheckOutStartedEvent(host)) self.assertRepr(PoolCreatedEvent(host, {})) self.assertRepr(PoolClearedEvent(host)) + self.assertRepr(PoolClearedEvent(host, service_id=ObjectId())) self.assertRepr(PoolClosedEvent(host)) + def test_close_leaves_pool_unpaused(self): + # Needed until we implement PYTHON-2463. This test is related to + # test_threads.TestThreads.test_client_disconnect + listener = CMAPListener() + client = single_client(event_listeners=[listener]) + client.admin.command("ping") + pool = get_pool(client) + client.close() + self.assertEqual(1, listener.event_count(PoolClearedEvent)) + self.assertEqual(PoolState.READY, pool.state) + # Checking out a connection should succeed + with pool.checkout(): + pass + def create_test(scenario_def, test, name): def run_scenario(self): @@ -426,8 +456,7 @@ def run_scenario(self): return run_scenario -class CMAPTestCreator(TestCreator): - +class CMAPSpecTestCreator(SpecTestCreator): def tests(self, scenario_def): """Extract the tests from a spec file. @@ -437,7 +466,7 @@ def tests(self, scenario_def): return [scenario_def] -test_creator = CMAPTestCreator(create_test, TestCMAP, TestCMAP.TEST_PATH) +test_creator = CMAPSpecTestCreator(create_test, TestCMAP, TestCMAP.TEST_PATH) test_creator.create_tests() diff --git a/test/test_code.py b/test/test_code.py index a2e257adf8..c564e3e04e 100644 --- a/test/test_code.py +++ b/test/test_code.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright 2009-present MongoDB, Inc. # @@ -15,30 +14,31 @@ # limitations under the License. """Tests for the Code wrapper.""" +from __future__ import annotations import sys + sys.path[0:0] = [""] -from bson.code import Code from test import unittest +from bson.code import Code + class TestCode(unittest.TestCase): def test_types(self): self.assertRaises(TypeError, Code, 5) self.assertRaises(TypeError, Code, None) self.assertRaises(TypeError, Code, "aoeu", 5) - self.assertRaises(TypeError, Code, u"aoeu", 5) self.assertTrue(Code("aoeu")) - self.assertTrue(Code(u"aoeu")) self.assertTrue(Code("aoeu", {})) - self.assertTrue(Code(u"aoeu", {})) def test_read_only(self): c = Code("blah") def set_c(): - c.scope = 5 + c.scope = 5 # type: ignore + self.assertRaises(AttributeError, set_c) def test_code(self): @@ -49,15 +49,15 @@ def test_code(self): self.assertTrue(isinstance(a_code, Code)) self.assertFalse(isinstance(a_string, Code)) self.assertIsNone(a_code.scope) - with_scope = Code('hello world', {'my_var': 5}) - self.assertEqual({'my_var': 5}, with_scope.scope) - empty_scope = Code('hello world', {}) + with_scope = Code("hello world", {"my_var": 5}) + self.assertEqual({"my_var": 5}, with_scope.scope) + empty_scope = Code("hello world", {}) self.assertEqual({}, empty_scope.scope) - another_scope = Code(with_scope, {'new_var': 42}) + another_scope = Code(with_scope, {"new_var": 42}) self.assertEqual(str(with_scope), str(another_scope)) - self.assertEqual({'new_var': 42, 'my_var': 5}, another_scope.scope) + self.assertEqual({"new_var": 42, "my_var": 5}, another_scope.scope) # No error. - Code(u'héllø world¡') + Code("héllø world¡") def test_repr(self): c = Code("hello world", {}) @@ -67,7 +67,7 @@ def test_repr(self): c = Code("hello world", {"blah": 3}) self.assertEqual(repr(c), "Code('hello world', {'blah': 3})") c = Code("\x08\xFF") - self.assertEqual(repr(c), "Code(%s, None)" % (repr("\x08\xFF"),)) + self.assertEqual(repr(c), "Code({}, None)".format(repr("\x08\xFF"))) def test_equality(self): b = Code("hello") @@ -100,8 +100,7 @@ def test_scope_preserved(self): def test_scope_kwargs(self): self.assertEqual({"a": 1}, Code("", a=1).scope) self.assertEqual({"a": 1}, Code("", {"a": 2}, a=1).scope) - self.assertEqual({"a": 1, "b": 2, "c": 3}, - Code("", {"b": 2}, a=1, c=3).scope) + self.assertEqual({"a": 1, "b": 2, "c": 3}, Code("", {"b": 2}, a=1, c=3).scope) if __name__ == "__main__": diff --git a/test/test_collation.py b/test/test_collation.py index 7cb4d8b5c1..bedf0a2eaa 100644 --- a/test/test_collation.py +++ b/test/test_collation.py @@ -13,43 +13,52 @@ # limitations under the License. """Test the collation module.""" +from __future__ import annotations import functools import warnings +from test import IntegrationTest, client_context, unittest +from test.utils import EventListener, rs_or_single_client +from typing import Any from pymongo.collation import ( Collation, - CollationCaseFirst, CollationStrength, CollationAlternate, - CollationMaxVariable) + CollationAlternate, + CollationCaseFirst, + CollationMaxVariable, + CollationStrength, +) from pymongo.errors import ConfigurationError -from pymongo.operations import (DeleteMany, DeleteOne, IndexModel, ReplaceOne, - UpdateMany, UpdateOne) +from pymongo.operations import ( + DeleteMany, + DeleteOne, + IndexModel, + ReplaceOne, + UpdateMany, + UpdateOne, +) from pymongo.write_concern import WriteConcern -from test import unittest, client_context -from test.utils import EventListener, ignore_deprecations, rs_or_single_client class TestCollationObject(unittest.TestCase): - def test_constructor(self): self.assertRaises(TypeError, Collation, locale=42) # Fill in a locale to test the other options. - _Collation = functools.partial(Collation, 'en_US') + _Collation = functools.partial(Collation, "en_US") # No error. _Collation(caseFirst=CollationCaseFirst.UPPER) - self.assertRaises(TypeError, _Collation, caseLevel='true') - self.assertRaises(ValueError, _Collation, strength='six') - self.assertRaises(TypeError, _Collation, - numericOrdering='true') + self.assertRaises(TypeError, _Collation, caseLevel="true") + self.assertRaises(ValueError, _Collation, strength="six") + self.assertRaises(TypeError, _Collation, numericOrdering="true") self.assertRaises(TypeError, _Collation, alternate=5) self.assertRaises(TypeError, _Collation, maxVariable=2) - self.assertRaises(TypeError, _Collation, normalization='false') - self.assertRaises(TypeError, _Collation, backwards='true') + self.assertRaises(TypeError, _Collation, normalization="false") + self.assertRaises(TypeError, _Collation, backwards="true") # No errors. - Collation('en_US', future_option='bar', another_option=42) + Collation("en_US", future_option="bar", another_option=42) collation = Collation( - 'en_US', + "en_US", caseLevel=True, caseFirst=CollationCaseFirst.UPPER, strength=CollationStrength.QUATERNARY, @@ -57,46 +66,42 @@ def test_constructor(self): alternate=CollationAlternate.SHIFTED, maxVariable=CollationMaxVariable.SPACE, normalization=True, - backwards=True) - - self.assertEqual({ - 'locale': 'en_US', - 'caseLevel': True, - 'caseFirst': 'upper', - 'strength': 4, - 'numericOrdering': True, - 'alternate': 'shifted', - 'maxVariable': 'space', - 'normalization': True, - 'backwards': True - }, collation.document) - - self.assertEqual({ - 'locale': 'en_US', - 'backwards': True - }, Collation('en_US', backwards=True).document) - - -def raisesConfigurationErrorForOldMongoDB(func): - @functools.wraps(func) - def wrapper(self, *args, **kwargs): - if client_context.version.at_least(3, 3, 9): - return func(self, *args, **kwargs) - else: - with self.assertRaises(ConfigurationError): - return func(self, *args, **kwargs) - return wrapper - - -class TestCollation(unittest.TestCase): + backwards=True, + ) + + self.assertEqual( + { + "locale": "en_US", + "caseLevel": True, + "caseFirst": "upper", + "strength": 4, + "numericOrdering": True, + "alternate": "shifted", + "maxVariable": "space", + "normalization": True, + "backwards": True, + }, + collation.document, + ) + + self.assertEqual( + {"locale": "en_US", "backwards": True}, Collation("en_US", backwards=True).document + ) + + +class TestCollation(IntegrationTest): + listener: EventListener + warn_context: Any + collation: Collation @classmethod @client_context.require_connection def setUpClass(cls): + super().setUpClass() cls.listener = EventListener() cls.client = rs_or_single_client(event_listeners=[cls.listener]) cls.db = cls.client.pymongo_test - cls.collation = Collation('en_US') + cls.collation = Collation("en_US") cls.warn_context = warnings.catch_warnings() cls.warn_context.__enter__() warnings.simplefilter("ignore", DeprecationWarning) @@ -105,309 +110,178 @@ def setUpClass(cls): def tearDownClass(cls): cls.warn_context.__exit__() cls.warn_context = None + cls.client.close() + super().tearDownClass() def tearDown(self): - self.listener.results.clear() + self.listener.reset() + super().tearDown() def last_command_started(self): - return self.listener.results['started'][-1].command + return self.listener.started_events[-1].command def assertCollationInLastCommand(self): - self.assertEqual( - self.collation.document, - self.last_command_started()['collation']) + self.assertEqual(self.collation.document, self.last_command_started()["collation"]) - @raisesConfigurationErrorForOldMongoDB def test_create_collection(self): self.db.test.drop() - self.db.create_collection('test', collation=self.collation) + self.db.create_collection("test", collation=self.collation) self.assertCollationInLastCommand() # Test passing collation as a dict as well. self.db.test.drop() - self.listener.results.clear() - self.db.create_collection('test', collation=self.collation.document) + self.listener.reset() + self.db.create_collection("test", collation=self.collation.document) self.assertCollationInLastCommand() def test_index_model(self): - model = IndexModel([('a', 1), ('b', -1)], collation=self.collation) - self.assertEqual(self.collation.document, model.document['collation']) + model = IndexModel([("a", 1), ("b", -1)], collation=self.collation) + self.assertEqual(self.collation.document, model.document["collation"]) - @raisesConfigurationErrorForOldMongoDB def test_create_index(self): - self.db.test.create_index('foo', collation=self.collation) - ci_cmd = self.listener.results['started'][0].command - self.assertEqual( - self.collation.document, - ci_cmd['indexes'][0]['collation']) + self.db.test.create_index("foo", collation=self.collation) + ci_cmd = self.listener.started_events[0].command + self.assertEqual(self.collation.document, ci_cmd["indexes"][0]["collation"]) - @raisesConfigurationErrorForOldMongoDB - def test_ensure_index(self): - self.db.test.ensure_index('foo', collation=self.collation) - ci_cmd = self.listener.results['started'][0].command - self.assertEqual( - self.collation.document, - ci_cmd['indexes'][0]['collation']) - - @raisesConfigurationErrorForOldMongoDB def test_aggregate(self): - self.db.test.aggregate([{'$group': {'_id': 42}}], - collation=self.collation) - self.assertCollationInLastCommand() - - @raisesConfigurationErrorForOldMongoDB - @ignore_deprecations - def test_count(self): - self.db.test.count(collation=self.collation) - self.assertCollationInLastCommand() - - self.listener.results.clear() - self.db.test.find(collation=self.collation).count() + self.db.test.aggregate([{"$group": {"_id": 42}}], collation=self.collation) self.assertCollationInLastCommand() - @raisesConfigurationErrorForOldMongoDB def test_count_documents(self): self.db.test.count_documents({}, collation=self.collation) self.assertCollationInLastCommand() - @raisesConfigurationErrorForOldMongoDB def test_distinct(self): - self.db.test.distinct('foo', collation=self.collation) + self.db.test.distinct("foo", collation=self.collation) self.assertCollationInLastCommand() - self.listener.results.clear() - self.db.test.find(collation=self.collation).distinct('foo') + self.listener.reset() + self.db.test.find(collation=self.collation).distinct("foo") self.assertCollationInLastCommand() - @raisesConfigurationErrorForOldMongoDB def test_find_command(self): - self.db.test.insert_one({'is this thing on?': True}) - self.listener.results.clear() + self.db.test.insert_one({"is this thing on?": True}) + self.listener.reset() next(self.db.test.find(collation=self.collation)) self.assertCollationInLastCommand() - @raisesConfigurationErrorForOldMongoDB def test_explain_command(self): - self.listener.results.clear() + self.listener.reset() self.db.test.find(collation=self.collation).explain() # The collation should be part of the explained command. self.assertEqual( - self.collation.document, - self.last_command_started()['explain']['collation']) - - @raisesConfigurationErrorForOldMongoDB - @client_context.require_version_max(4, 1, 0, -1) - def test_group(self): - self.db.test.group('foo', {'foo': {'$gt': 42}}, {}, - 'function(a, b) { return a; }', - collation=self.collation) - self.assertCollationInLastCommand() - - @raisesConfigurationErrorForOldMongoDB - def test_map_reduce(self): - self.db.test.map_reduce('function() {}', 'function() {}', 'output', - collation=self.collation) - self.assertCollationInLastCommand() + self.collation.document, self.last_command_started()["explain"]["collation"] + ) - @raisesConfigurationErrorForOldMongoDB def test_delete(self): - self.db.test.delete_one({'foo': 42}, collation=self.collation) - command = self.listener.results['started'][0].command - self.assertEqual( - self.collation.document, - command['deletes'][0]['collation']) + self.db.test.delete_one({"foo": 42}, collation=self.collation) + command = self.listener.started_events[0].command + self.assertEqual(self.collation.document, command["deletes"][0]["collation"]) - self.listener.results.clear() - self.db.test.delete_many({'foo': 42}, collation=self.collation) - command = self.listener.results['started'][0].command - self.assertEqual( - self.collation.document, - command['deletes'][0]['collation']) - - self.listener.results.clear() - self.db.test.remove({'foo': 42}, collation=self.collation) - command = self.listener.results['started'][0].command - self.assertEqual( - self.collation.document, - command['deletes'][0]['collation']) + self.listener.reset() + self.db.test.delete_many({"foo": 42}, collation=self.collation) + command = self.listener.started_events[0].command + self.assertEqual(self.collation.document, command["deletes"][0]["collation"]) - @raisesConfigurationErrorForOldMongoDB def test_update(self): - self.db.test.update({'foo': 42}, {'$set': {'foo': 'bar'}}, - collation=self.collation) - command = self.listener.results['started'][0].command - self.assertEqual( - self.collation.document, - command['updates'][0]['collation']) - - self.listener.results.clear() - self.db.test.save({'_id': 12345}, collation=self.collation) - command = self.listener.results['started'][0].command - self.assertEqual( - self.collation.document, - command['updates'][0]['collation']) + self.db.test.replace_one({"foo": 42}, {"foo": 43}, collation=self.collation) + command = self.listener.started_events[0].command + self.assertEqual(self.collation.document, command["updates"][0]["collation"]) - self.listener.results.clear() - self.db.test.replace_one({'foo': 42}, {'foo': 43}, - collation=self.collation) - command = self.listener.results['started'][0].command - self.assertEqual( - self.collation.document, - command['updates'][0]['collation']) + self.listener.reset() + self.db.test.update_one({"foo": 42}, {"$set": {"foo": 43}}, collation=self.collation) + command = self.listener.started_events[0].command + self.assertEqual(self.collation.document, command["updates"][0]["collation"]) - self.listener.results.clear() - self.db.test.update_one({'foo': 42}, {'$set': {'foo': 43}}, - collation=self.collation) - command = self.listener.results['started'][0].command - self.assertEqual( - self.collation.document, - command['updates'][0]['collation']) - - self.listener.results.clear() - self.db.test.update_many({'foo': 42}, {'$set': {'foo': 43}}, - collation=self.collation) - command = self.listener.results['started'][0].command - self.assertEqual( - self.collation.document, - command['updates'][0]['collation']) + self.listener.reset() + self.db.test.update_many({"foo": 42}, {"$set": {"foo": 43}}, collation=self.collation) + command = self.listener.started_events[0].command + self.assertEqual(self.collation.document, command["updates"][0]["collation"]) - @raisesConfigurationErrorForOldMongoDB def test_find_and(self): - self.db.test.find_and_modify({'foo': 42}, {'$set': {'foo': 43}}, - collation=self.collation) + self.db.test.find_one_and_delete({"foo": 42}, collation=self.collation) self.assertCollationInLastCommand() - self.listener.results.clear() - self.db.test.find_one_and_delete({'foo': 42}, collation=self.collation) + self.listener.reset() + self.db.test.find_one_and_update( + {"foo": 42}, {"$set": {"foo": 43}}, collation=self.collation + ) self.assertCollationInLastCommand() - self.listener.results.clear() - self.db.test.find_one_and_update({'foo': 42}, {'$set': {'foo': 43}}, - collation=self.collation) + self.listener.reset() + self.db.test.find_one_and_replace({"foo": 42}, {"foo": 43}, collation=self.collation) self.assertCollationInLastCommand() - self.listener.results.clear() - self.db.test.find_one_and_replace({'foo': 42}, {'foo': 43}, - collation=self.collation) - self.assertCollationInLastCommand() - - @raisesConfigurationErrorForOldMongoDB def test_bulk_write(self): - self.db.test.collection.bulk_write([ - DeleteOne({'noCollation': 42}), - DeleteMany({'noCollation': 42}), - DeleteOne({'foo': 42}, collation=self.collation), - DeleteMany({'foo': 42}, collation=self.collation), - ReplaceOne({'noCollation': 24}, {'bar': 42}), - UpdateOne({'noCollation': 84}, {'$set': {'bar': 10}}, upsert=True), - UpdateMany({'noCollation': 45}, {'$set': {'bar': 42}}), - ReplaceOne({'foo': 24}, {'foo': 42}, collation=self.collation), - UpdateOne({'foo': 84}, {'$set': {'foo': 10}}, upsert=True, - collation=self.collation), - UpdateMany({'foo': 45}, {'$set': {'foo': 42}}, - collation=self.collation) - ]) - - delete_cmd = self.listener.results['started'][0].command - update_cmd = self.listener.results['started'][1].command + self.db.test.collection.bulk_write( + [ + DeleteOne({"noCollation": 42}), + DeleteMany({"noCollation": 42}), + DeleteOne({"foo": 42}, collation=self.collation), + DeleteMany({"foo": 42}, collation=self.collation), + ReplaceOne({"noCollation": 24}, {"bar": 42}), + UpdateOne({"noCollation": 84}, {"$set": {"bar": 10}}, upsert=True), + UpdateMany({"noCollation": 45}, {"$set": {"bar": 42}}), + ReplaceOne({"foo": 24}, {"foo": 42}, collation=self.collation), + UpdateOne( + {"foo": 84}, {"$set": {"foo": 10}}, upsert=True, collation=self.collation + ), + UpdateMany({"foo": 45}, {"$set": {"foo": 42}}, collation=self.collation), + ] + ) + + delete_cmd = self.listener.started_events[0].command + update_cmd = self.listener.started_events[1].command def check_ops(ops): for op in ops: - if 'noCollation' in op['q']: - self.assertNotIn('collation', op) + if "noCollation" in op["q"]: + self.assertNotIn("collation", op) else: - self.assertEqual(self.collation.document, - op['collation']) - - check_ops(delete_cmd['deletes']) - check_ops(update_cmd['updates']) - - @raisesConfigurationErrorForOldMongoDB - def test_bulk(self): - bulk = self.db.test.initialize_ordered_bulk_op() - bulk.find({'noCollation': 42}).remove_one() - bulk.find({'noCollation': 42}).remove() - bulk.find({'foo': 42}, collation=self.collation).remove_one() - bulk.find({'foo': 42}, collation=self.collation).remove() - bulk.find({'noCollation': 24}).replace_one({'bar': 42}) - bulk.find({'noCollation': 84}).upsert().update_one( - {'$set': {'foo': 10}}) - bulk.find({'noCollation': 45}).update({'$set': {'bar': 42}}) - bulk.find({'foo': 24}, collation=self.collation).replace_one( - {'foo': 42}) - bulk.find({'foo': 84}, collation=self.collation).upsert().update_one( - {'$set': {'foo': 10}}) - bulk.find({'foo': 45}, collation=self.collation).update({ - '$set': {'foo': 42}}) - bulk.execute() - - delete_cmd = self.listener.results['started'][0].command - update_cmd = self.listener.results['started'][1].command + self.assertEqual(self.collation.document, op["collation"]) - def check_ops(ops): - for op in ops: - if 'noCollation' in op['q']: - self.assertNotIn('collation', op) - else: - self.assertEqual(self.collation.document, - op['collation']) - - check_ops(delete_cmd['deletes']) - check_ops(update_cmd['updates']) - - @client_context.require_version_max(3, 3, 8) - def test_mixed_bulk_collation(self): - bulk = self.db.test.initialize_unordered_bulk_op() - bulk.find({'foo': 42}).upsert().update_one( - {'$set': {'bar': 10}}) - bulk.find({'foo': 43}, collation=self.collation).remove_one() - with self.assertRaises(ConfigurationError): - bulk.execute() - self.assertIsNone(self.db.test.find_one({'foo': 42})) + check_ops(delete_cmd["deletes"]) + check_ops(update_cmd["updates"]) - @raisesConfigurationErrorForOldMongoDB def test_indexes_same_keys_different_collations(self): self.db.test.drop() - usa_collation = Collation('en_US') - ja_collation = Collation('ja') - self.db.test.create_indexes([ - IndexModel('fieldname', collation=usa_collation), - IndexModel('fieldname', name='japanese_version', - collation=ja_collation), - IndexModel('fieldname', name='simple') - ]) + usa_collation = Collation("en_US") + ja_collation = Collation("ja") + self.db.test.create_indexes( + [ + IndexModel("fieldname", collation=usa_collation), + IndexModel("fieldname", name="japanese_version", collation=ja_collation), + IndexModel("fieldname", name="simple"), + ] + ) indexes = self.db.test.index_information() - self.assertEqual(usa_collation.document['locale'], - indexes['fieldname_1']['collation']['locale']) - self.assertEqual(ja_collation.document['locale'], - indexes['japanese_version']['collation']['locale']) - self.assertNotIn('collation', indexes['simple']) - self.db.test.drop_index('fieldname_1') + self.assertEqual( + usa_collation.document["locale"], indexes["fieldname_1"]["collation"]["locale"] + ) + self.assertEqual( + ja_collation.document["locale"], indexes["japanese_version"]["collation"]["locale"] + ) + self.assertNotIn("collation", indexes["simple"]) + self.db.test.drop_index("fieldname_1") indexes = self.db.test.index_information() - self.assertIn('japanese_version', indexes) - self.assertIn('simple', indexes) - self.assertNotIn('fieldname', indexes) + self.assertIn("japanese_version", indexes) + self.assertIn("simple", indexes) + self.assertNotIn("fieldname", indexes) def test_unacknowledged_write(self): unacknowledged = WriteConcern(w=0) - collection = self.db.get_collection( - 'test', write_concern=unacknowledged) + collection = self.db.get_collection("test", write_concern=unacknowledged) with self.assertRaises(ConfigurationError): collection.update_one( - {'hello': 'world'}, {'$set': {'hello': 'moon'}}, - collation=self.collation) - bulk = collection.initialize_ordered_bulk_op() - bulk.find({'hello': 'world'}, collation=self.collation).update_one( - {'$set': {'hello': 'moon'}}) - with self.assertRaises(ConfigurationError): - bulk.execute() - update_one = UpdateOne({'hello': 'world'}, {'$set': {'hello': 'moon'}}, - collation=self.collation) + {"hello": "world"}, {"$set": {"hello": "moon"}}, collation=self.collation + ) + update_one = UpdateOne( + {"hello": "world"}, {"$set": {"hello": "moon"}}, collation=self.collation + ) with self.assertRaises(ConfigurationError): collection.bulk_write([update_one]) - @raisesConfigurationErrorForOldMongoDB def test_cursor_collation(self): - self.db.test.insert_one({'hello': 'world'}) + self.db.test.insert_one({"hello": "world"}) next(self.db.test.find().collation(self.collation)) self.assertCollationInLastCommand() diff --git a/test/test_collection.py b/test/test_collection.py index acd2c2c1a1..4947192453 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - # Copyright 2009-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,61 +13,71 @@ # limitations under the License. """Test the collection module.""" +from __future__ import annotations import contextlib import re import sys -import threading - from codecs import utf_8_decode from collections import defaultdict +from typing import Iterable, no_type_check + +from pymongo.database import Database sys.path[0:0] = [""] +from test import client_context, unittest +from test.test_client import IntegrationTest +from test.utils import ( + IMPOSSIBLE_WRITE_CONCERN, + EventListener, + get_pool, + is_mongos, + rs_or_single_client, + single_client, + wait_until, +) + from bson import encode -from bson.raw_bson import RawBSONDocument -from bson.regex import Regex -from bson.code import Code from bson.codec_options import CodecOptions from bson.objectid import ObjectId -from bson.py3compat import itervalues +from bson.raw_bson import RawBSONDocument +from bson.regex import Regex from bson.son import SON -from pymongo import (ASCENDING, DESCENDING, GEO2D, - GEOHAYSTACK, GEOSPHERE, HASHED, TEXT) +from pymongo import ASCENDING, DESCENDING, GEO2D, GEOSPHERE, HASHED, TEXT from pymongo.bulk import BulkWriteError from pymongo.collection import Collection, ReturnDocument from pymongo.command_cursor import CommandCursor from pymongo.cursor import CursorType -from pymongo.errors import (ConfigurationError, - DocumentTooLarge, - DuplicateKeyError, - ExecutionTimeout, - InvalidDocument, - InvalidName, - InvalidOperation, - OperationFailure, - WriteConcernError) +from pymongo.errors import ( + ConfigurationError, + DocumentTooLarge, + DuplicateKeyError, + ExecutionTimeout, + InvalidDocument, + InvalidName, + InvalidOperation, + OperationFailure, + WriteConcernError, +) from pymongo.message import _COMMAND_OVERHEAD, _gen_find_command from pymongo.mongo_client import MongoClient from pymongo.operations import * from pymongo.read_concern import DEFAULT_READ_CONCERN from pymongo.read_preferences import ReadPreference -from pymongo.results import (InsertOneResult, - InsertManyResult, - UpdateResult, - DeleteResult) +from pymongo.results import ( + DeleteResult, + InsertManyResult, + InsertOneResult, + UpdateResult, +) from pymongo.write_concern import WriteConcern -from test import client_context, unittest -from test.test_client import IntegrationTest -from test.utils import (get_pool, ignore_deprecations, is_mongos, - rs_or_single_client, single_client, - wait_until, EventListener, - IMPOSSIBLE_WRITE_CONCERN) class TestCollectionNoConnect(unittest.TestCase): - """Test Collection features on a client that does not connect. - """ + """Test Collection features on a client that does not connect.""" + + db: Database @classmethod def setUpClass(cls): @@ -95,7 +103,7 @@ def make_col(base, name): def test_getattr(self): coll = self.db.test - self.assertTrue(isinstance(coll['_does_not_exist'], Collection)) + self.assertTrue(isinstance(coll["_does_not_exist"], Collection)) with self.assertRaises(AttributeError) as context: coll._does_not_exist @@ -104,8 +112,7 @@ def test_getattr(self): # "AttributeError: Collection has no attribute '_does_not_exist'. To # access the test._does_not_exist collection, use # database['test._does_not_exist']." - self.assertIn("has no attribute '_does_not_exist'", - str(context.exception)) + self.assertIn("has no attribute '_does_not_exist'", str(context.exception)) coll2 = coll.with_options(write_concern=WriteConcern(w=0)) self.assertEqual(coll2.write_concern, WriteConcern(w=0)) @@ -116,15 +123,35 @@ def test_getattr(self): self.assertEqual(coll2.write_concern, coll4.write_concern) def test_iteration(self): - self.assertRaises(TypeError, next, self.db) + coll = self.db.coll + if "PyPy" in sys.version and sys.version_info < (3, 8, 15): + msg = "'NoneType' object is not callable" + else: + msg = "'Collection' object is not iterable" + # Iteration fails + with self.assertRaisesRegex(TypeError, msg): + for _ in coll: # type: ignore[misc] # error: "None" not callable [misc] + break + # Non-string indices will start failing in PyMongo 5. + self.assertEqual(coll[0].name, "coll.0") + self.assertEqual(coll[{}].name, "coll.{}") + # next fails + with self.assertRaisesRegex(TypeError, "'Collection' object is not iterable"): + _ = next(coll) + # .next() fails + with self.assertRaisesRegex(TypeError, "'Collection' object is not iterable"): + _ = coll.next() + # Do not implement typing.Iterable. + self.assertNotIsInstance(coll, Iterable) class TestCollection(IntegrationTest): + w: int @classmethod def setUpClass(cls): - super(TestCollection, cls).setUpClass() - cls.w = client_context.w + super().setUpClass() + cls.w = client_context.w # type: ignore @classmethod def tearDownClass(cls): @@ -138,12 +165,12 @@ def tearDown(self): @contextlib.contextmanager def write_concern_collection(self): - if client_context.version.at_least(3, 3, 9) and client_context.is_rs: + if client_context.is_rs: with self.assertRaises(WriteConcernError): # Unsatisfiable write concern. yield Collection( - self.db, 'test', - write_concern=WriteConcern(w=len(client_context.nodes) + 1)) + self.db, "test", write_concern=WriteConcern(w=len(client_context.nodes) + 1) + ) else: yield self.db.test @@ -154,39 +181,41 @@ def test_equality(self): self.assertEqual(self.db.test.mike, self.db["test.mike"]) self.assertEqual(self.db.test["mike"], self.db["test.mike"]) - @client_context.require_version_min(3, 3, 9) + def test_hashable(self): + self.assertIn(self.db.test.mike, {self.db["test.mike"]}) + def test_create(self): # No Exception. db = client_context.client.pymongo_test db.create_test_no_wc.drop() wait_until( - lambda: 'create_test_no_wc' not in db.list_collection_names(), - 'drop create_test_no_wc collection') - Collection(db, name='create_test_no_wc', create=True) + lambda: "create_test_no_wc" not in db.list_collection_names(), + "drop create_test_no_wc collection", + ) + Collection(db, name="create_test_no_wc", create=True) wait_until( - lambda: 'create_test_no_wc' in db.list_collection_names(), - 'create create_test_no_wc collection') + lambda: "create_test_no_wc" in db.list_collection_names(), + "create create_test_no_wc collection", + ) # SERVER-33317 - if (not client_context.is_mongos or not - client_context.version.at_least(3, 7, 0)): + if not client_context.is_mongos or not client_context.version.at_least(3, 7, 0): with self.assertRaises(OperationFailure): Collection( - db, name='create-test-wc', - write_concern=IMPOSSIBLE_WRITE_CONCERN, - create=True) + db, name="create-test-wc", write_concern=IMPOSSIBLE_WRITE_CONCERN, create=True + ) def test_drop_nonexistent_collection(self): - self.db.drop_collection('test') - self.assertFalse('test' in self.db.list_collection_names()) + self.db.drop_collection("test") + self.assertFalse("test" in self.db.list_collection_names()) # No exception - self.db.drop_collection('test') + self.db.drop_collection("test") def test_create_indexes(self): db = self.db - self.assertRaises(TypeError, db.test.create_indexes, 'foo') - self.assertRaises(TypeError, db.test.create_indexes, ['foo']) + self.assertRaises(TypeError, db.test.create_indexes, "foo") + self.assertRaises(TypeError, db.test.create_indexes, ["foo"]) self.assertRaises(TypeError, IndexModel, 5) self.assertRaises(ValueError, IndexModel, []) @@ -195,8 +224,7 @@ def test_create_indexes(self): self.assertEqual(len(db.test.index_information()), 1) db.test.create_indexes([IndexModel("hello")]) - db.test.create_indexes([IndexModel([("hello", DESCENDING), - ("world", ASCENDING)])]) + db.test.create_indexes([IndexModel([("hello", DESCENDING), ("world", ASCENDING)])]) # Tuple instead of list. db.test.create_indexes([IndexModel((("world", ASCENDING),))]) @@ -204,9 +232,9 @@ def test_create_indexes(self): self.assertEqual(len(db.test.index_information()), 4) db.test.drop_indexes() - names = db.test.create_indexes([IndexModel([("hello", DESCENDING), - ("world", ASCENDING)], - name="hello_world")]) + names = db.test.create_indexes( + [IndexModel([("hello", DESCENDING), ("world", ASCENDING)], name="hello_world")] + ) self.assertEqual(names, ["hello_world"]) db.test.drop_indexes() @@ -216,29 +244,39 @@ def test_create_indexes(self): db.test.drop_indexes() self.assertEqual(len(db.test.index_information()), 1) - names = db.test.create_indexes([IndexModel([("hello", DESCENDING), - ("world", ASCENDING)]), - IndexModel("hello")]) + names = db.test.create_indexes( + [IndexModel([("hello", DESCENDING), ("world", ASCENDING)]), IndexModel("hello")] + ) info = db.test.index_information() for name in names: self.assertTrue(name in info) db.test.drop() - db.test.insert_one({'a': 1}) - db.test.insert_one({'a': 1}) - self.assertRaises( - DuplicateKeyError, - db.test.create_indexes, - [IndexModel('a', unique=True)]) + db.test.insert_one({"a": 1}) + db.test.insert_one({"a": 1}) + self.assertRaises(DuplicateKeyError, db.test.create_indexes, [IndexModel("a", unique=True)]) with self.write_concern_collection() as coll: - coll.create_indexes([IndexModel('hello')]) + coll.create_indexes([IndexModel("hello")]) + + @client_context.require_version_max(4, 3, -1) + def test_create_indexes_commitQuorum_requires_44(self): + db = self.db + with self.assertRaisesRegex( + ConfigurationError, + r"Must be connected to MongoDB 4\.4\+ to use the commitQuorum option for createIndexes", + ): + db.coll.create_indexes([IndexModel("a")], commitQuorum="majority") + + @client_context.require_no_standalone + @client_context.require_version_min(4, 4, -1) + def test_create_indexes_commitQuorum(self): + self.db.coll.create_indexes([IndexModel("a")], commitQuorum="majority") def test_create_index(self): db = self.db self.assertRaises(TypeError, db.test.create_index, 5) - self.assertRaises(TypeError, db.test.create_index, {"hello": 1}) self.assertRaises(ValueError, db.test.create_index, []) db.test.drop_indexes() @@ -254,8 +292,7 @@ def test_create_index(self): self.assertEqual(len(db.test.index_information()), 4) db.test.drop_indexes() - ix = db.test.create_index([("hello", DESCENDING), - ("world", ASCENDING)], name="hello_world") + ix = db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], name="hello_world") self.assertEqual(ix, "hello_world") db.test.drop_indexes() @@ -268,14 +305,21 @@ def test_create_index(self): db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)]) self.assertTrue("hello_-1_world_1" in db.test.index_information()) + db.test.drop_indexes() + db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], name=None) + self.assertTrue("hello_-1_world_1" in db.test.index_information()) + db.test.drop() - db.test.insert_one({'a': 1}) - db.test.insert_one({'a': 1}) - self.assertRaises( - DuplicateKeyError, db.test.create_index, 'a', unique=True) + db.test.insert_one({"a": 1}) + db.test.insert_one({"a": 1}) + self.assertRaises(DuplicateKeyError, db.test.create_index, "a", unique=True) with self.write_concern_collection() as coll: - coll.create_index([('hello', DESCENDING)]) + coll.create_index([("hello", DESCENDING)]) + + db.test.create_index(["hello", "world"]) + db.test.create_index(["hello", ("world", DESCENDING)]) + db.test.create_index({"hello": 1}.items()) # type:ignore[arg-type] def test_drop_index(self): db = self.db @@ -304,72 +348,22 @@ def test_drop_index(self): self.assertTrue("hello_1" in db.test.index_information()) with self.write_concern_collection() as coll: - coll.drop_index('hello_1') + coll.drop_index("hello_1") @client_context.require_no_mongos @client_context.require_test_commands def test_index_management_max_time_ms(self): - if (client_context.version[:2] == (3, 4) and - client_context.version[2] < 4): - raise unittest.SkipTest("SERVER-27711") coll = self.db.test - self.client.admin.command("configureFailPoint", - "maxTimeAlwaysTimeOut", - mode="alwaysOn") + self.client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="alwaysOn") try: + self.assertRaises(ExecutionTimeout, coll.create_index, "foo", maxTimeMS=1) self.assertRaises( - ExecutionTimeout, coll.create_index, "foo", maxTimeMS=1) - self.assertRaises( - ExecutionTimeout, - coll.create_indexes, - [IndexModel("foo")], - maxTimeMS=1) - self.assertRaises( - ExecutionTimeout, coll.drop_index, "foo", maxTimeMS=1) - self.assertRaises( - ExecutionTimeout, coll.drop_indexes, maxTimeMS=1) - self.assertRaises( - ExecutionTimeout, coll.reindex, maxTimeMS=1) + ExecutionTimeout, coll.create_indexes, [IndexModel("foo")], maxTimeMS=1 + ) + self.assertRaises(ExecutionTimeout, coll.drop_index, "foo", maxTimeMS=1) + self.assertRaises(ExecutionTimeout, coll.drop_indexes, maxTimeMS=1) finally: - self.client.admin.command("configureFailPoint", - "maxTimeAlwaysTimeOut", - mode="off") - - def test_reindex(self): - if not client_context.supports_reindex: - raise unittest.SkipTest( - "reindex is no longer supported by mongos 4.1+") - db = self.db - db.drop_collection("test") - db.test.insert_one({"foo": "bar", "who": "what", "when": "how"}) - db.test.create_index("foo") - db.test.create_index("who") - db.test.create_index("when") - info = db.test.index_information() - - def check_result(result): - self.assertEqual(4, result['nIndexes']) - indexes = result['indexes'] - names = [idx['name'] for idx in indexes] - for name in names: - self.assertTrue(name in info) - for key in info: - self.assertTrue(key in names) - - reindexed = db.test.reindex() - if 'raw' in reindexed: - # mongos - for result in itervalues(reindexed['raw']): - check_result(result) - else: - check_result(reindexed) - - coll = Collection( - self.db, - 'test', - write_concern=WriteConcern(w=100)) - # No error since writeConcern is not sent. - coll.reindex() + self.client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="off") def test_list_indexes(self): db = self.db @@ -377,7 +371,7 @@ def test_list_indexes(self): db.test.insert_one({}) # create collection def map_indexes(indexes): - return dict([(index["name"], index) for index in indexes]) + return {index["name"]: index for index in indexes} indexes = list(db.test.list_indexes()) self.assertEqual(len(indexes), 1) @@ -386,16 +380,15 @@ def map_indexes(indexes): db.test.create_index("hello") indexes = list(db.test.list_indexes()) self.assertEqual(len(indexes), 2) - self.assertEqual(map_indexes(indexes)["hello_1"]["key"], - SON([("hello", ASCENDING)])) + self.assertEqual(map_indexes(indexes)["hello_1"]["key"], SON([("hello", ASCENDING)])) - db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], - unique=True) + db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], unique=True) indexes = list(db.test.list_indexes()) self.assertEqual(len(indexes), 3) index_map = map_indexes(indexes) - self.assertEqual(index_map["hello_-1_world_1"]["key"], - SON([("hello", DESCENDING), ("world", ASCENDING)])) + self.assertEqual( + index_map["hello_-1_world_1"]["key"], SON([("hello", DESCENDING), ("world", ASCENDING)]) + ) self.assertEqual(True, index_map["hello_-1_world_1"]["unique"]) # List indexes on a collection that does not exist. @@ -415,60 +408,53 @@ def test_index_info(self): db.test.create_index("hello") self.assertEqual(len(db.test.index_information()), 2) - self.assertEqual(db.test.index_information()["hello_1"]["key"], - [("hello", ASCENDING)]) + self.assertEqual(db.test.index_information()["hello_1"]["key"], [("hello", ASCENDING)]) - db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], - unique=True) - self.assertEqual(db.test.index_information()["hello_1"]["key"], - [("hello", ASCENDING)]) + db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], unique=True) + self.assertEqual(db.test.index_information()["hello_1"]["key"], [("hello", ASCENDING)]) self.assertEqual(len(db.test.index_information()), 3) - self.assertEqual([("hello", DESCENDING), ("world", ASCENDING)], - db.test.index_information()["hello_-1_world_1"]["key"] - ) self.assertEqual( - True, db.test.index_information()["hello_-1_world_1"]["unique"]) + [("hello", DESCENDING), ("world", ASCENDING)], + db.test.index_information()["hello_-1_world_1"]["key"], + ) + self.assertEqual(True, db.test.index_information()["hello_-1_world_1"]["unique"]) def test_index_geo2d(self): db = self.db db.test.drop_indexes() - self.assertEqual('loc_2d', db.test.create_index([("loc", GEO2D)])) - index_info = db.test.index_information()['loc_2d'] - self.assertEqual([('loc', '2d')], index_info['key']) + self.assertEqual("loc_2d", db.test.create_index([("loc", GEO2D)])) + index_info = db.test.index_information()["loc_2d"] + self.assertEqual([("loc", "2d")], index_info["key"]) + # geoSearch was deprecated in 4.4 and removed in 5.0 + @client_context.require_version_max(4, 5) @client_context.require_no_mongos def test_index_haystack(self): db = self.db db.test.drop() - _id = db.test.insert_one({ - "pos": {"long": 34.2, "lat": 33.3}, - "type": "restaurant" - }).inserted_id - db.test.insert_one({ - "pos": {"long": 34.2, "lat": 37.3}, "type": "restaurant" - }) - db.test.insert_one({ - "pos": {"long": 59.1, "lat": 87.2}, "type": "office" - }) - db.test.create_index( - [("pos", GEOHAYSTACK), ("type", ASCENDING)], - bucketSize=1 - ) - - results = db.command(SON([ - ("geoSearch", "test"), - ("near", [33, 33]), - ("maxDistance", 6), - ("search", {"type": "restaurant"}), - ("limit", 30), - ]))['results'] + _id = db.test.insert_one( + {"pos": {"long": 34.2, "lat": 33.3}, "type": "restaurant"} + ).inserted_id + db.test.insert_one({"pos": {"long": 34.2, "lat": 37.3}, "type": "restaurant"}) + db.test.insert_one({"pos": {"long": 59.1, "lat": 87.2}, "type": "office"}) + db.test.create_index([("pos", "geoHaystack"), ("type", ASCENDING)], bucketSize=1) + + results = db.command( + SON( + [ + ("geoSearch", "test"), + ("near", [33, 33]), + ("maxDistance", 6), + ("search", {"type": "restaurant"}), + ("limit", 30), + ] + ) + )["results"] self.assertEqual(2, len(results)) - self.assertEqual({ - "_id": _id, - "pos": {"long": 34.2, "lat": 33.3}, - "type": "restaurant" - }, results[0]) + self.assertEqual( + {"_id": _id, "pos": {"long": 34.2, "lat": 33.3}, "type": "restaurant"}, results[0] + ) @client_context.require_no_mongos def test_index_text(self): @@ -478,38 +464,33 @@ def test_index_text(self): index_info = db.test.index_information()["t_text"] self.assertTrue("weights" in index_info) - db.test.insert_many([ - {'t': 'spam eggs and spam'}, - {'t': 'spam'}, - {'t': 'egg sausage and bacon'}]) + db.test.insert_many( + [{"t": "spam eggs and spam"}, {"t": "spam"}, {"t": "egg sausage and bacon"}] + ) # MongoDB 2.6 text search. Create 'score' field in projection. - cursor = db.test.find( - {'$text': {'$search': 'spam'}}, - {'score': {'$meta': 'textScore'}}) + cursor = db.test.find({"$text": {"$search": "spam"}}, {"score": {"$meta": "textScore"}}) # Sort by 'score' field. - cursor.sort([('score', {'$meta': 'textScore'})]) + cursor.sort([("score", {"$meta": "textScore"})]) results = list(cursor) - self.assertTrue(results[0]['score'] >= results[1]['score']) + self.assertTrue(results[0]["score"] >= results[1]["score"]) db.test.drop_indexes() def test_index_2dsphere(self): db = self.db db.test.drop_indexes() - self.assertEqual("geo_2dsphere", - db.test.create_index([("geo", GEOSPHERE)])) + self.assertEqual("geo_2dsphere", db.test.create_index([("geo", GEOSPHERE)])) for dummy, info in db.test.index_information().items(): - field, idx_type = info['key'][0] - if field == 'geo' and idx_type == '2dsphere': + field, idx_type = info["key"][0] + if field == "geo" and idx_type == "2dsphere": break else: self.fail("2dsphere index not found.") - poly = {"type": "Polygon", - "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]]} + poly = {"type": "Polygon", "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]]} query = {"geo": {"$within": {"$geometry": poly}}} # This query will error without a 2dsphere index. @@ -519,12 +500,11 @@ def test_index_2dsphere(self): def test_index_hashed(self): db = self.db db.test.drop_indexes() - self.assertEqual("a_hashed", - db.test.create_index([("a", HASHED)])) + self.assertEqual("a_hashed", db.test.create_index([("a", HASHED)])) for dummy, info in db.test.index_information().items(): - field, idx_type = info['key'][0] - if field == 'a' and idx_type == 'hashed': + field, idx_type = info["key"][0] + if field == "a" and idx_type == "hashed": break else: self.fail("hashed index not found.") @@ -534,40 +514,25 @@ def test_index_hashed(self): def test_index_sparse(self): db = self.db db.test.drop_indexes() - db.test.create_index([('key', ASCENDING)], sparse=True) - self.assertTrue(db.test.index_information()['key_1']['sparse']) + db.test.create_index([("key", ASCENDING)], sparse=True) + self.assertTrue(db.test.index_information()["key_1"]["sparse"]) def test_index_background(self): db = self.db db.test.drop_indexes() - db.test.create_index([('keya', ASCENDING)]) - db.test.create_index([('keyb', ASCENDING)], background=False) - db.test.create_index([('keyc', ASCENDING)], background=True) - self.assertFalse('background' in db.test.index_information()['keya_1']) - self.assertFalse(db.test.index_information()['keyb_1']['background']) - self.assertTrue(db.test.index_information()['keyc_1']['background']) + db.test.create_index([("keya", ASCENDING)]) + db.test.create_index([("keyb", ASCENDING)], background=False) + db.test.create_index([("keyc", ASCENDING)], background=True) + self.assertFalse("background" in db.test.index_information()["keya_1"]) + self.assertFalse(db.test.index_information()["keyb_1"]["background"]) + self.assertTrue(db.test.index_information()["keyc_1"]["background"]) def _drop_dups_setup(self, db): - db.drop_collection('test') - db.test.insert_one({'i': 1}) - db.test.insert_one({'i': 2}) - db.test.insert_one({'i': 2}) # duplicate - db.test.insert_one({'i': 3}) - - @client_context.require_version_max(2, 6) - def test_index_drop_dups(self): - # Try dropping duplicates - db = self.db - self._drop_dups_setup(db) - - # No error, just drop the duplicate - db.test.create_index([('i', ASCENDING)], unique=True, dropDups=True) - - # Duplicate was dropped - self.assertEqual(3, db.test.count_documents({})) - - # Index was created, plus the index on _id - self.assertEqual(2, len(db.test.index_information())) + db.drop_collection("test") + db.test.insert_one({"i": 1}) + db.test.insert_one({"i": 2}) + db.test.insert_one({"i": 2}) # duplicate + db.test.insert_one({"i": 3}) def test_index_dont_drop_dups(self): # Try *not* dropping duplicates @@ -576,11 +541,8 @@ def test_index_dont_drop_dups(self): # There's a duplicate def test_create(): - db.test.create_index( - [('i', ASCENDING)], - unique=True, - dropDups=False - ) + db.test.create_index([("i", ASCENDING)], unique=True, dropDups=False) + self.assertRaises(DuplicateKeyError, test_create) # Duplicate wasn't dropped @@ -591,79 +553,76 @@ def test_create(): # Get the plan dynamically because the explain format will change. def get_plan_stage(self, root, stage): - if root.get('stage') == stage: + if root.get("stage") == stage: return root elif "inputStage" in root: - return self.get_plan_stage(root['inputStage'], stage) + return self.get_plan_stage(root["inputStage"], stage) elif "inputStages" in root: - for i in root['inputStages']: + for i in root["inputStages"]: stage = self.get_plan_stage(i, stage) if stage: return stage + elif "queryPlan" in root: + # queryPlan (and slotBasedPlan) are new in 5.0. + return self.get_plan_stage(root["queryPlan"], stage) elif "shards" in root: - for i in root['shards']: - stage = self.get_plan_stage(i['winningPlan'], stage) + for i in root["shards"]: + stage = self.get_plan_stage(i["winningPlan"], stage) if stage: return stage return {} - @client_context.require_version_min(3, 1, 9, -1) def test_index_filter(self): db = self.db db.drop_collection("test") # Test bad filter spec on create. - self.assertRaises(OperationFailure, db.test.create_index, "x", - partialFilterExpression=5) - self.assertRaises(OperationFailure, db.test.create_index, "x", - partialFilterExpression={"x": {"$asdasd": 3}}) - self.assertRaises(OperationFailure, db.test.create_index, "x", - partialFilterExpression={"$and": 5}) - self.assertRaises(OperationFailure, db.test.create_index, "x", - partialFilterExpression={ - "$and": [{"$and": [{"x": {"$lt": 2}}, - {"x": {"$gt": 0}}]}, - {"x": {"$exists": True}}]}) - - self.assertEqual("x_1", db.test.create_index( - [('x', ASCENDING)], partialFilterExpression={"a": {"$lte": 1.5}})) + self.assertRaises(OperationFailure, db.test.create_index, "x", partialFilterExpression=5) + self.assertRaises( + OperationFailure, + db.test.create_index, + "x", + partialFilterExpression={"x": {"$asdasd": 3}}, + ) + self.assertRaises( + OperationFailure, db.test.create_index, "x", partialFilterExpression={"$and": 5} + ) + + self.assertEqual( + "x_1", + db.test.create_index([("x", ASCENDING)], partialFilterExpression={"a": {"$lte": 1.5}}), + ) db.test.insert_one({"x": 5, "a": 2}) db.test.insert_one({"x": 6, "a": 1}) # Operations that use the partial index. explain = db.test.find({"x": 6, "a": 1}).explain() - stage = self.get_plan_stage(explain['queryPlanner']['winningPlan'], - 'IXSCAN') - self.assertEqual("x_1", stage.get('indexName')) - self.assertTrue(stage.get('isPartial')) + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "IXSCAN") + self.assertEqual("x_1", stage.get("indexName")) + self.assertTrue(stage.get("isPartial")) explain = db.test.find({"x": {"$gt": 1}, "a": 1}).explain() - stage = self.get_plan_stage(explain['queryPlanner']['winningPlan'], - 'IXSCAN') - self.assertEqual("x_1", stage.get('indexName')) - self.assertTrue(stage.get('isPartial')) + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "IXSCAN") + self.assertEqual("x_1", stage.get("indexName")) + self.assertTrue(stage.get("isPartial")) explain = db.test.find({"x": 6, "a": {"$lte": 1}}).explain() - stage = self.get_plan_stage(explain['queryPlanner']['winningPlan'], - 'IXSCAN') - self.assertEqual("x_1", stage.get('indexName')) - self.assertTrue(stage.get('isPartial')) + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "IXSCAN") + self.assertEqual("x_1", stage.get("indexName")) + self.assertTrue(stage.get("isPartial")) # Operations that do not use the partial index. explain = db.test.find({"x": 6, "a": {"$lte": 1.6}}).explain() - stage = self.get_plan_stage(explain['queryPlanner']['winningPlan'], - 'COLLSCAN') + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "COLLSCAN") self.assertNotEqual({}, stage) explain = db.test.find({"x": 6}).explain() - stage = self.get_plan_stage(explain['queryPlanner']['winningPlan'], - 'COLLSCAN') + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "COLLSCAN") self.assertNotEqual({}, stage) # Test drop_indexes. db.test.drop_index("x_1") explain = db.test.find({"x": 6, "a": 1}).explain() - stage = self.get_plan_stage(explain['queryPlanner']['winningPlan'], - 'COLLSCAN') + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "COLLSCAN") self.assertNotEqual({}, stage) def test_field_selection(self): @@ -725,9 +684,7 @@ def test_options(self): db.drop_collection("test") db.create_collection("test", capped=True, size=4096) result = db.test.options() - # mongos 2.2.x adds an $auth field when auth is enabled. - result.pop('$auth', None) - self.assertEqual(result, {"capped": True, 'size': 4096}) + self.assertEqual(result, {"capped": True, "size": 4096}) db.drop_collection("test") def test_insert_one(self): @@ -752,19 +709,16 @@ def test_insert_one(self): self.assertIsNotNone(db.test.find_one({"_id": document["_id"]})) self.assertEqual(2, db.test.count_documents({})) - db = db.client.get_database(db.name, - write_concern=WriteConcern(w=0)) + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) result = db.test.insert_one(document) self.assertTrue(isinstance(result, InsertOneResult)) self.assertTrue(isinstance(result.inserted_id, ObjectId)) self.assertEqual(document["_id"], result.inserted_id) self.assertFalse(result.acknowledged) # The insert failed duplicate key... - wait_until(lambda: 2 == db.test.count_documents({}), - 'forcing duplicate key error') + wait_until(lambda: db.test.count_documents({}) == 2, "forcing duplicate key error") - document = RawBSONDocument( - encode({'_id': ObjectId(), 'foo': 'bar'})) + document = RawBSONDocument(encode({"_id": ObjectId(), "foo": "bar"})) result = db.test.insert_one(document) self.assertTrue(isinstance(result, InsertOneResult)) self.assertEqual(result.inserted_id, None) @@ -773,7 +727,7 @@ def test_insert_many(self): db = self.db db.test.drop() - docs = [{} for _ in range(5)] + docs: list = [{} for _ in range(5)] result = db.test.insert_many(docs) self.assertTrue(isinstance(result, InsertManyResult)) self.assertTrue(isinstance(result.inserted_ids, list)) @@ -782,7 +736,7 @@ def test_insert_many(self): _id = doc["_id"] self.assertTrue(isinstance(_id, ObjectId)) self.assertTrue(_id in result.inserted_ids) - self.assertEqual(1, db.test.count_documents({'_id': _id})) + self.assertEqual(1, db.test.count_documents({"_id": _id})) self.assertTrue(result.acknowledged) docs = [{"_id": i} for i in range(5)] @@ -797,21 +751,48 @@ def test_insert_many(self): self.assertEqual(1, db.test.count_documents({"_id": _id})) self.assertTrue(result.acknowledged) - docs = [RawBSONDocument(encode({"_id": i + 5})) - for i in range(5)] + docs = [RawBSONDocument(encode({"_id": i + 5})) for i in range(5)] result = db.test.insert_many(docs) self.assertTrue(isinstance(result, InsertManyResult)) self.assertTrue(isinstance(result.inserted_ids, list)) self.assertEqual([], result.inserted_ids) - db = db.client.get_database(db.name, - write_concern=WriteConcern(w=0)) - docs = [{} for _ in range(5)] + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) + docs: list = [{} for _ in range(5)] result = db.test.insert_many(docs) self.assertTrue(isinstance(result, InsertManyResult)) self.assertFalse(result.acknowledged) self.assertEqual(20, db.test.count_documents({})) + def test_insert_many_generator(self): + coll = self.db.test + coll.delete_many({}) + + def gen(): + yield {"a": 1, "b": 1} + yield {"a": 1, "b": 2} + yield {"a": 2, "b": 3} + yield {"a": 3, "b": 5} + yield {"a": 5, "b": 8} + + result = coll.insert_many(gen()) + self.assertEqual(5, len(result.inserted_ids)) + + def test_insert_many_invalid(self): + db = self.db + + with self.assertRaisesRegex(TypeError, "documents must be a non-empty list"): + db.test.insert_many({}) + + with self.assertRaisesRegex(TypeError, "documents must be a non-empty list"): + db.test.insert_many([]) + + with self.assertRaisesRegex(TypeError, "documents must be a non-empty list"): + db.test.insert_many(1) # type: ignore[arg-type] + + with self.assertRaisesRegex(TypeError, "documents must be a non-empty list"): + db.test.insert_many(RawBSONDocument(encode({"_id": 2}))) + def test_delete_one(self): self.db.test.drop() @@ -831,13 +812,12 @@ def test_delete_one(self): self.assertTrue(result.acknowledged) self.assertEqual(1, self.db.test.count_documents({})) - db = self.db.client.get_database(self.db.name, - write_concern=WriteConcern(w=0)) + db = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) result = db.test.delete_one({"z": 1}) self.assertTrue(isinstance(result, DeleteResult)) self.assertRaises(InvalidOperation, lambda: result.deleted_count) self.assertFalse(result.acknowledged) - wait_until(lambda: 0 == db.test.count_documents({}), 'delete 1 documents') + wait_until(lambda: db.test.count_documents({}) == 0, "delete 1 documents") def test_delete_many(self): self.db.test.drop() @@ -853,53 +833,60 @@ def test_delete_many(self): self.assertTrue(result.acknowledged) self.assertEqual(0, self.db.test.count_documents({"x": 1})) - db = self.db.client.get_database(self.db.name, - write_concern=WriteConcern(w=0)) + db = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) result = db.test.delete_many({"y": 1}) self.assertTrue(isinstance(result, DeleteResult)) self.assertRaises(InvalidOperation, lambda: result.deleted_count) self.assertFalse(result.acknowledged) - wait_until( - lambda: 0 == db.test.count_documents({}), 'delete 2 documents') + wait_until(lambda: db.test.count_documents({}) == 0, "delete 2 documents") def test_command_document_too_large(self): - large = '*' * (self.client.max_bson_size + _COMMAND_OVERHEAD) + large = "*" * (client_context.max_bson_size + _COMMAND_OVERHEAD) coll = self.db.test - self.assertRaises( - DocumentTooLarge, coll.insert_one, {'data': large}) + self.assertRaises(DocumentTooLarge, coll.insert_one, {"data": large}) # update_one and update_many are the same + self.assertRaises(DocumentTooLarge, coll.replace_one, {}, {"data": large}) + self.assertRaises(DocumentTooLarge, coll.delete_one, {"data": large}) + + def test_write_large_document(self): + max_size = client_context.max_bson_size + half_size = int(max_size / 2) + max_str = "x" * max_size + half_str = "x" * half_size + self.assertEqual(max_size, 16777216) + + self.assertRaises(OperationFailure, self.db.test.insert_one, {"foo": max_str}) self.assertRaises( - DocumentTooLarge, coll.replace_one, {}, {'data': large}) + OperationFailure, self.db.test.replace_one, {}, {"foo": max_str}, upsert=True + ) + self.assertRaises(OperationFailure, self.db.test.insert_many, [{"x": 1}, {"foo": max_str}]) + self.db.test.insert_many([{"foo": half_str}, {"foo": half_str}]) + + self.db.test.insert_one({"bar": "x"}) + # Use w=0 here to test legacy doc size checking in all server versions + unack_coll = self.db.test.with_options(write_concern=WriteConcern(w=0)) self.assertRaises( - DocumentTooLarge, coll.delete_one, {'data': large}) + DocumentTooLarge, unack_coll.replace_one, {"bar": "x"}, {"bar": "x" * (max_size - 14)} + ) + self.db.test.replace_one({"bar": "x"}, {"bar": "x" * (max_size - 32)}) - @client_context.require_version_min(3, 1, 9, -1) def test_insert_bypass_document_validation(self): db = self.db db.test.drop() db.create_collection("test", validator={"a": {"$exists": True}}) - db_w0 = self.db.client.get_database( - self.db.name, write_concern=WriteConcern(w=0)) + db_w0 = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) # Test insert_one - self.assertRaises(OperationFailure, db.test.insert_one, - {"_id": 1, "x": 100}) - result = db.test.insert_one({"_id": 1, "x": 100}, - bypass_document_validation=True) + self.assertRaises(OperationFailure, db.test.insert_one, {"_id": 1, "x": 100}) + result = db.test.insert_one({"_id": 1, "x": 100}, bypass_document_validation=True) self.assertTrue(isinstance(result, InsertOneResult)) self.assertEqual(1, result.inserted_id) - result = db.test.insert_one({"_id":2, "a":0}) + result = db.test.insert_one({"_id": 2, "a": 0}) self.assertTrue(isinstance(result, InsertOneResult)) self.assertEqual(2, result.inserted_id) - if client_context.version < (3, 6): - # Uses OP_INSERT which does not support bypass_document_validation. - self.assertRaises(OperationFailure, db_w0.test.insert_one, - {"y": 1}, bypass_document_validation=True) - else: - db_w0.test.insert_one({"y": 1}, bypass_document_validation=True) - wait_until(lambda: db_w0.test.find_one({"y": 1}), - "find w:0 inserted document") + db_w0.test.insert_one({"y": 1}, bypass_document_validation=True) + wait_until(lambda: db_w0.test.find_one({"y": 1}), "find w:0 inserted document") # Test insert_many docs = [{"_id": i, "x": 100 - i} for i in range(3, 100)] @@ -924,26 +911,25 @@ def test_insert_bypass_document_validation(self): self.assertEqual(1, db.test.count_documents({"a": doc["a"]})) self.assertTrue(result.acknowledged) - self.assertRaises(OperationFailure, db_w0.test.insert_many, - [{"x": 1}, {"x": 2}], - bypass_document_validation=True) + self.assertRaises( + OperationFailure, + db_w0.test.insert_many, + [{"x": 1}, {"x": 2}], + bypass_document_validation=True, + ) - @client_context.require_version_min(3, 1, 9, -1) def test_replace_bypass_document_validation(self): db = self.db db.test.drop() db.create_collection("test", validator={"a": {"$exists": True}}) - db_w0 = self.db.client.get_database( - self.db.name, write_concern=WriteConcern(w=0)) + db_w0 = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) # Test replace_one db.test.insert_one({"a": 101}) - self.assertRaises(OperationFailure, db.test.replace_one, - {"a": 101}, {"y": 1}) + self.assertRaises(OperationFailure, db.test.replace_one, {"a": 101}, {"y": 1}) self.assertEqual(0, db.test.count_documents({"y": 1})) self.assertEqual(1, db.test.count_documents({"a": 101})) - db.test.replace_one({"a": 101}, {"y": 1}, - bypass_document_validation=True) + db.test.replace_one({"a": 101}, {"y": 1}, bypass_document_validation=True) self.assertEqual(0, db.test.count_documents({"a": 101})) self.assertEqual(1, db.test.count_documents({"y": 1})) db.test.replace_one({"y": 1}, {"a": 102}) @@ -952,143 +938,107 @@ def test_replace_bypass_document_validation(self): self.assertEqual(1, db.test.count_documents({"a": 102})) db.test.insert_one({"y": 1}, bypass_document_validation=True) - self.assertRaises(OperationFailure, db.test.replace_one, - {"y": 1}, {"x": 101}) + self.assertRaises(OperationFailure, db.test.replace_one, {"y": 1}, {"x": 101}) self.assertEqual(0, db.test.count_documents({"x": 101})) self.assertEqual(1, db.test.count_documents({"y": 1})) - db.test.replace_one({"y": 1}, {"x": 101}, - bypass_document_validation=True) + db.test.replace_one({"y": 1}, {"x": 101}, bypass_document_validation=True) self.assertEqual(0, db.test.count_documents({"y": 1})) self.assertEqual(1, db.test.count_documents({"x": 101})) - db.test.replace_one({"x": 101}, {"a": 103}, - bypass_document_validation=False) + db.test.replace_one({"x": 101}, {"a": 103}, bypass_document_validation=False) self.assertEqual(0, db.test.count_documents({"x": 101})) self.assertEqual(1, db.test.count_documents({"a": 103})) db.test.insert_one({"y": 1}, bypass_document_validation=True) - if client_context.version < (3, 6): - # Uses OP_UPDATE which does not support bypass_document_validation. - self.assertRaises(OperationFailure, db_w0.test.replace_one, - {"y": 1}, {"x": 1}, - bypass_document_validation=True) - else: - db_w0.test.replace_one({"y": 1}, {"x": 1}, - bypass_document_validation=True) - wait_until(lambda: db_w0.test.find_one({"x": 1}), - "find w:0 replaced document") + db_w0.test.replace_one({"y": 1}, {"x": 1}, bypass_document_validation=True) + wait_until(lambda: db_w0.test.find_one({"x": 1}), "find w:0 replaced document") - @client_context.require_version_min(3, 1, 9, -1) def test_update_bypass_document_validation(self): db = self.db db.test.drop() db.test.insert_one({"z": 5}) - db.command(SON([("collMod", "test"), - ("validator", {"z": {"$gte": 0}})])) - db_w0 = self.db.client.get_database( - self.db.name, write_concern=WriteConcern(w=0)) + db.command(SON([("collMod", "test"), ("validator", {"z": {"$gte": 0}})])) + db_w0 = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) # Test update_one - self.assertRaises(OperationFailure, db.test.update_one, - {"z": 5}, {"$inc": {"z": -10}}) + self.assertRaises(OperationFailure, db.test.update_one, {"z": 5}, {"$inc": {"z": -10}}) self.assertEqual(0, db.test.count_documents({"z": -5})) self.assertEqual(1, db.test.count_documents({"z": 5})) - db.test.update_one({"z": 5}, {"$inc": {"z": -10}}, - bypass_document_validation=True) + db.test.update_one({"z": 5}, {"$inc": {"z": -10}}, bypass_document_validation=True) self.assertEqual(0, db.test.count_documents({"z": 5})) self.assertEqual(1, db.test.count_documents({"z": -5})) - db.test.update_one({"z": -5}, {"$inc": {"z": 6}}, - bypass_document_validation=False) + db.test.update_one({"z": -5}, {"$inc": {"z": 6}}, bypass_document_validation=False) self.assertEqual(1, db.test.count_documents({"z": 1})) self.assertEqual(0, db.test.count_documents({"z": -5})) - db.test.insert_one({"z": -10}, - bypass_document_validation=True) - self.assertRaises(OperationFailure, db.test.update_one, - {"z": -10}, {"$inc": {"z": 1}}) + db.test.insert_one({"z": -10}, bypass_document_validation=True) + self.assertRaises(OperationFailure, db.test.update_one, {"z": -10}, {"$inc": {"z": 1}}) self.assertEqual(0, db.test.count_documents({"z": -9})) self.assertEqual(1, db.test.count_documents({"z": -10})) - db.test.update_one({"z": -10}, {"$inc": {"z": 1}}, - bypass_document_validation=True) + db.test.update_one({"z": -10}, {"$inc": {"z": 1}}, bypass_document_validation=True) self.assertEqual(1, db.test.count_documents({"z": -9})) self.assertEqual(0, db.test.count_documents({"z": -10})) - db.test.update_one({"z": -9}, {"$inc": {"z": 9}}, - bypass_document_validation=False) + db.test.update_one({"z": -9}, {"$inc": {"z": 9}}, bypass_document_validation=False) self.assertEqual(0, db.test.count_documents({"z": -9})) self.assertEqual(1, db.test.count_documents({"z": 0})) db.test.insert_one({"y": 1, "x": 0}, bypass_document_validation=True) - if client_context.version < (3, 6): - # Uses OP_UPDATE which does not support bypass_document_validation. - self.assertRaises(OperationFailure, db_w0.test.update_one, - {"y": 1}, {"$inc": {"x": 1}}, - bypass_document_validation=True) - else: - db_w0.test.update_one({"y": 1}, {"$inc": {"x": 1}}, - bypass_document_validation=True) - wait_until(lambda: db_w0.test.find_one({"y": 1, "x": 1}), - "find w:0 updated document") + db_w0.test.update_one({"y": 1}, {"$inc": {"x": 1}}, bypass_document_validation=True) + wait_until(lambda: db_w0.test.find_one({"y": 1, "x": 1}), "find w:0 updated document") # Test update_many db.test.insert_many([{"z": i} for i in range(3, 101)]) - db.test.insert_one({"y": 0}, - bypass_document_validation=True) - self.assertRaises(OperationFailure, db.test.update_many, {}, - {"$inc": {"z": -100}}) + db.test.insert_one({"y": 0}, bypass_document_validation=True) + self.assertRaises(OperationFailure, db.test.update_many, {}, {"$inc": {"z": -100}}) self.assertEqual(100, db.test.count_documents({"z": {"$gte": 0}})) self.assertEqual(0, db.test.count_documents({"z": {"$lt": 0}})) self.assertEqual(0, db.test.count_documents({"y": 0, "z": -100})) - db.test.update_many({"z": {"$gte": 0}}, {"$inc": {"z": -100}}, - bypass_document_validation=True) + db.test.update_many( + {"z": {"$gte": 0}}, {"$inc": {"z": -100}}, bypass_document_validation=True + ) self.assertEqual(0, db.test.count_documents({"z": {"$gt": 0}})) self.assertEqual(100, db.test.count_documents({"z": {"$lte": 0}})) - db.test.update_many({"z": {"$gt": -50}}, {"$inc": {"z": 100}}, - bypass_document_validation=False) + db.test.update_many( + {"z": {"$gt": -50}}, {"$inc": {"z": 100}}, bypass_document_validation=False + ) self.assertEqual(50, db.test.count_documents({"z": {"$gt": 0}})) self.assertEqual(50, db.test.count_documents({"z": {"$lt": 0}})) - db.test.insert_many([{"z": -i} for i in range(50)], - bypass_document_validation=True) - self.assertRaises(OperationFailure, db.test.update_many, - {}, {"$inc": {"z": 1}}) + db.test.insert_many([{"z": -i} for i in range(50)], bypass_document_validation=True) + self.assertRaises(OperationFailure, db.test.update_many, {}, {"$inc": {"z": 1}}) self.assertEqual(100, db.test.count_documents({"z": {"$lte": 0}})) self.assertEqual(50, db.test.count_documents({"z": {"$gt": 1}})) - db.test.update_many({"z": {"$gte": 0}}, {"$inc": {"z": -100}}, - bypass_document_validation=True) + db.test.update_many( + {"z": {"$gte": 0}}, {"$inc": {"z": -100}}, bypass_document_validation=True + ) self.assertEqual(0, db.test.count_documents({"z": {"$gt": 0}})) self.assertEqual(150, db.test.count_documents({"z": {"$lte": 0}})) - db.test.update_many({"z": {"$lte": 0}}, {"$inc": {"z": 100}}, - bypass_document_validation=False) + db.test.update_many( + {"z": {"$lte": 0}}, {"$inc": {"z": 100}}, bypass_document_validation=False + ) self.assertEqual(150, db.test.count_documents({"z": {"$gte": 0}})) self.assertEqual(0, db.test.count_documents({"z": {"$lt": 0}})) db.test.insert_one({"m": 1, "x": 0}, bypass_document_validation=True) db.test.insert_one({"m": 1, "x": 0}, bypass_document_validation=True) - if client_context.version < (3, 6): - # Uses OP_UPDATE which does not support bypass_document_validation. - self.assertRaises(OperationFailure, db_w0.test.update_many, - {"m": 1}, {"$inc": {"x": 1}}, - bypass_document_validation=True) - else: - db_w0.test.update_many({"m": 1}, {"$inc": {"x": 1}}, - bypass_document_validation=True) - wait_until( - lambda: db_w0.test.count_documents({"m": 1, "x": 1}) == 2, - "find w:0 updated documents") + db_w0.test.update_many({"m": 1}, {"$inc": {"x": 1}}, bypass_document_validation=True) + wait_until( + lambda: db_w0.test.count_documents({"m": 1, "x": 1}) == 2, "find w:0 updated documents" + ) - @client_context.require_version_min(3, 1, 9, -1) def test_bypass_document_validation_bulk_write(self): db = self.db db.test.drop() db.create_collection("test", validator={"a": {"$gte": 0}}) - db_w0 = self.db.client.get_database( - self.db.name, write_concern=WriteConcern(w=0)) - - ops = [InsertOne({"a": -10}), - InsertOne({"a": -11}), - InsertOne({"a": -12}), - UpdateOne({"a": {"$lte": -10}}, {"$inc": {"a": 1}}), - UpdateMany({"a": {"$lte": -10}}, {"$inc": {"a": 1}}), - ReplaceOne({"a": {"$lte": -10}}, {"a": -1})] + db_w0 = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) + + ops: list = [ + InsertOne({"a": -10}), + InsertOne({"a": -11}), + InsertOne({"a": -12}), + UpdateOne({"a": {"$lte": -10}}, {"$inc": {"a": 1}}), + UpdateMany({"a": {"$lte": -10}}, {"$inc": {"a": 1}}), + ReplaceOne({"a": {"$lte": -10}}, {"a": -1}), + ] db.test.bulk_write(ops, bypass_document_validation=True) self.assertEqual(3, db.test.count_documents({})) @@ -1100,22 +1050,22 @@ def test_bypass_document_validation_bulk_write(self): for op in ops: self.assertRaises(BulkWriteError, db.test.bulk_write, [op]) - self.assertRaises(OperationFailure, db_w0.test.bulk_write, ops, - bypass_document_validation=True) + self.assertRaises( + OperationFailure, db_w0.test.bulk_write, ops, bypass_document_validation=True + ) def test_find_by_default_dct(self): db = self.db - db.test.insert_one({'foo': 'bar'}) - dct = defaultdict(dict, [('foo', 'bar')]) + db.test.insert_one({"foo": "bar"}) + dct = defaultdict(dict, [("foo", "bar")]) # type: ignore[arg-type] self.assertIsNotNone(db.test.find_one(dct)) - self.assertEqual(dct, defaultdict(dict, [('foo', 'bar')])) + self.assertEqual(dct, defaultdict(dict, [("foo", "bar")])) def test_find_w_fields(self): db = self.db db.test.delete_many({}) - db.test.insert_one({"x": 1, "mike": "awesome", - "extra thing": "abcdefghijklmnopqrstuvwxyz"}) + db.test.insert_one({"x": 1, "mike": "awesome", "extra thing": "abcdefghijklmnopqrstuvwxyz"}) self.assertEqual(1, db.test.count_documents({})) doc = next(db.test.find({})) self.assertTrue("x" in doc) @@ -1136,6 +1086,7 @@ def test_find_w_fields(self): doc = next(db.test.find({}, ["mike"])) self.assertFalse("extra thing" in doc) + @no_type_check def test_fields_specifier_as_dict(self): db = self.db db.test.delete_many({}) @@ -1143,9 +1094,7 @@ def test_fields_specifier_as_dict(self): db.test.insert_one({"x": [1, 2, 3], "mike": "awesome"}) self.assertEqual([1, 2, 3], db.test.find_one()["x"]) - self.assertEqual([2, 3], - db.test.find_one( - projection={"x": {"$slice": -2}})["x"]) + self.assertEqual([2, 3], db.test.find_one(projection={"x": {"$slice": -2}})["x"]) self.assertTrue("x" not in db.test.find_one(projection={"x": 0})) self.assertTrue("mike" in db.test.find_one(projection={"x": 0})) @@ -1159,14 +1108,10 @@ def test_find_w_regex(self): db.test.insert_one({"x": "hello_test"}) self.assertEqual(len(list(db.test.find())), 4) - self.assertEqual(len(list(db.test.find({"x": - re.compile("^hello.*")}))), 4) - self.assertEqual(len(list(db.test.find({"x": - re.compile("ello")}))), 4) - self.assertEqual(len(list(db.test.find({"x": - re.compile("^hello$")}))), 0) - self.assertEqual(len(list(db.test.find({"x": - re.compile("^hello_mi.*$")}))), 2) + self.assertEqual(len(list(db.test.find({"x": re.compile("^hello.*")}))), 4) + self.assertEqual(len(list(db.test.find({"x": re.compile("ello")}))), 4) + self.assertEqual(len(list(db.test.find({"x": re.compile("^hello$")}))), 0) + self.assertEqual(len(list(db.test.find({"x": re.compile("^hello_mi.*$")}))), 2) def test_id_can_be_anything(self): db = self.db @@ -1185,37 +1130,9 @@ def test_id_can_be_anything(self): self.assertEqual(obj["_id"], numeric) for x in db.test.find(): - self.assertEqual(x["hello"], u"world") + self.assertEqual(x["hello"], "world") self.assertTrue("_id" in x) - def test_invalid_key_names(self): - db = self.db - db.test.drop() - - db.test.insert_one({"hello": "world"}) - db.test.insert_one({"hello": {"hello": "world"}}) - - self.assertRaises(InvalidDocument, db.test.insert_one, - {"$hello": "world"}) - self.assertRaises(InvalidDocument, db.test.insert_one, - {"hello": {"$hello": "world"}}) - - db.test.insert_one({"he$llo": "world"}) - db.test.insert_one({"hello": {"hello$": "world"}}) - - self.assertRaises(InvalidDocument, db.test.insert_one, - {".hello": "world"}) - self.assertRaises(InvalidDocument, db.test.insert_one, - {"hello": {".hello": "world"}}) - self.assertRaises(InvalidDocument, db.test.insert_one, - {"hello.": "world"}) - self.assertRaises(InvalidDocument, db.test.insert_one, - {"hello": {"hello.": "world"}}) - self.assertRaises(InvalidDocument, db.test.insert_one, - {"hel.lo": "world"}) - self.assertRaises(InvalidDocument, db.test.insert_one, - {"hello": {"hel.lo": "world"}}) - def test_unique_index(self): db = self.db db.drop_collection("test") @@ -1258,103 +1175,74 @@ def test_write_error_text_handling(self): db.test.create_index("text", unique=True) # Test workaround for SERVER-24007 - data = (b'a\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83') + data = ( + b"a\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + ) text = utf_8_decode(data, None, True) db.test.insert_one({"text": text}) # Should raise DuplicateKeyError, not InvalidBSON - self.assertRaises(DuplicateKeyError, - db.test.insert_one, - {"text": text}) - - self.assertRaises(DuplicateKeyError, - db.test.insert, - {"text": text}) - - self.assertRaises(DuplicateKeyError, - db.test.insert, - [{"text": text}]) - - self.assertRaises(DuplicateKeyError, - db.test.replace_one, - {"_id": ObjectId()}, - {"text": text}, - upsert=True) - - self.assertRaises(DuplicateKeyError, - db.test.update, - {"_id": ObjectId()}, - {"text": text}, - upsert=True) + self.assertRaises(DuplicateKeyError, db.test.insert_one, {"text": text}) + + self.assertRaises( + DuplicateKeyError, db.test.replace_one, {"_id": ObjectId()}, {"text": text}, upsert=True + ) # Should raise BulkWriteError, not InvalidBSON - self.assertRaises(BulkWriteError, - db.test.insert_many, - [{"text": text}]) + self.assertRaises(BulkWriteError, db.test.insert_many, [{"text": text}]) def test_write_error_unicode(self): coll = self.db.test self.addCleanup(coll.drop) - coll.create_index('a', unique=True) - coll.insert_one({'a': u'unicode \U0001f40d'}) - with self.assertRaisesRegex( - DuplicateKeyError, - 'E11000 duplicate key error') as ctx: - coll.insert_one({'a': u'unicode \U0001f40d'}) + coll.create_index("a", unique=True) + coll.insert_one({"a": "unicode \U0001f40d"}) + with self.assertRaisesRegex(DuplicateKeyError, "E11000 duplicate key error") as ctx: + coll.insert_one({"a": "unicode \U0001f40d"}) # Once more for good measure. - self.assertIn('E11000 duplicate key error', - str(ctx.exception)) - - if sys.version_info[0] == 2: - # Test unicode(error) conversion. - self.assertIn('E11000 duplicate key error', - unicode(ctx.exception)) - + self.assertIn("E11000 duplicate key error", str(ctx.exception)) def test_wtimeout(self): # Ensure setting wtimeout doesn't disable write concern altogether. # See SERVER-12596. collection = self.db.test collection.drop() - collection.insert_one({'_id': 1}) + collection.insert_one({"_id": 1}) - coll = collection.with_options( - write_concern=WriteConcern(w=1, wtimeout=1000)) - self.assertRaises(DuplicateKeyError, coll.insert_one, {'_id': 1}) + coll = collection.with_options(write_concern=WriteConcern(w=1, wtimeout=1000)) + self.assertRaises(DuplicateKeyError, coll.insert_one, {"_id": 1}) - coll = collection.with_options( - write_concern=WriteConcern(wtimeout=1000)) - self.assertRaises(DuplicateKeyError, coll.insert_one, {'_id': 1}) + coll = collection.with_options(write_concern=WriteConcern(wtimeout=1000)) + self.assertRaises(DuplicateKeyError, coll.insert_one, {"_id": 1}) def test_error_code(self): try: @@ -1380,16 +1268,13 @@ def test_index_on_subfield(self): db.test.insert_one({"hello": {"a": 4, "b": 5}}) db.test.insert_one({"hello": {"a": 7, "b": 2}}) - self.assertRaises(DuplicateKeyError, - db.test.insert_one, - {"hello": {"a": 4, "b": 10}}) + self.assertRaises(DuplicateKeyError, db.test.insert_one, {"hello": {"a": 4, "b": 10}}) def test_replace_one(self): db = self.db db.drop_collection("test") - self.assertRaises(ValueError, - lambda: db.test.replace_one({}, {"$set": {"x": 1}})) + self.assertRaises(ValueError, lambda: db.test.replace_one({}, {"$set": {"x": 1}})) id1 = db.test.insert_one({"x": 1}).inserted_id result = db.test.replace_one({"x": 1}, {"y": 1}) @@ -1400,7 +1285,7 @@ def test_replace_one(self): self.assertTrue(result.acknowledged) self.assertEqual(1, db.test.count_documents({"y": 1})) self.assertEqual(0, db.test.count_documents({"x": 1})) - self.assertEqual(db.test.find_one(id1)["y"], 1) + self.assertEqual(db.test.find_one(id1)["y"], 1) # type: ignore replacement = RawBSONDocument(encode({"_id": id1, "z": 1})) result = db.test.replace_one({"y": 1}, replacement, True) @@ -1411,7 +1296,7 @@ def test_replace_one(self): self.assertTrue(result.acknowledged) self.assertEqual(1, db.test.count_documents({"z": 1})) self.assertEqual(0, db.test.count_documents({"y": 1})) - self.assertEqual(db.test.find_one(id1)["z"], 1) + self.assertEqual(db.test.find_one(id1)["z"], 1) # type: ignore result = db.test.replace_one({"x": 2}, {"y": 2}, True) self.assertTrue(isinstance(result, UpdateResult)) @@ -1421,8 +1306,7 @@ def test_replace_one(self): self.assertTrue(result.acknowledged) self.assertEqual(1, db.test.count_documents({"y": 2})) - db = db.client.get_database(db.name, - write_concern=WriteConcern(w=0)) + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) result = db.test.replace_one({"x": 0}, {"y": 0}) self.assertTrue(isinstance(result, UpdateResult)) self.assertRaises(InvalidOperation, lambda: result.matched_count) @@ -1434,8 +1318,7 @@ def test_update_one(self): db = self.db db.drop_collection("test") - self.assertRaises(ValueError, - lambda: db.test.update_one({}, {"x": 1})) + self.assertRaises(ValueError, lambda: db.test.update_one({}, {"x": 1})) id1 = db.test.insert_one({"x": 5}).inserted_id result = db.test.update_one({}, {"$inc": {"x": 1}}) @@ -1444,7 +1327,7 @@ def test_update_one(self): self.assertTrue(result.modified_count in (None, 1)) self.assertIsNone(result.upserted_id) self.assertTrue(result.acknowledged) - self.assertEqual(db.test.find_one(id1)["x"], 6) + self.assertEqual(db.test.find_one(id1)["x"], 6) # type: ignore id2 = db.test.insert_one({"x": 1}).inserted_id result = db.test.update_one({"x": 6}, {"$inc": {"x": 1}}) @@ -1453,8 +1336,8 @@ def test_update_one(self): self.assertTrue(result.modified_count in (None, 1)) self.assertIsNone(result.upserted_id) self.assertTrue(result.acknowledged) - self.assertEqual(db.test.find_one(id1)["x"], 7) - self.assertEqual(db.test.find_one(id2)["x"], 1) + self.assertEqual(db.test.find_one(id1)["x"], 7) # type: ignore + self.assertEqual(db.test.find_one(id2)["x"], 1) # type: ignore result = db.test.update_one({"x": 2}, {"$set": {"y": 1}}, True) self.assertTrue(isinstance(result, UpdateResult)) @@ -1463,8 +1346,7 @@ def test_update_one(self): self.assertTrue(isinstance(result.upserted_id, ObjectId)) self.assertTrue(result.acknowledged) - db = db.client.get_database(db.name, - write_concern=WriteConcern(w=0)) + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) result = db.test.update_one({"x": 0}, {"$inc": {"x": 1}}) self.assertTrue(isinstance(result, UpdateResult)) self.assertRaises(InvalidOperation, lambda: result.matched_count) @@ -1476,8 +1358,7 @@ def test_update_many(self): db = self.db db.drop_collection("test") - self.assertRaises(ValueError, - lambda: db.test.update_many({}, {"x": 1})) + self.assertRaises(ValueError, lambda: db.test.update_many({}, {"x": 1})) db.test.insert_one({"x": 4, "y": 3}) db.test.insert_one({"x": 5, "y": 5}) @@ -1506,8 +1387,7 @@ def test_update_many(self): self.assertTrue(isinstance(result.upserted_id, ObjectId)) self.assertTrue(result.acknowledged) - db = db.client.get_database(db.name, - write_concern=WriteConcern(w=0)) + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) result = db.test.update_many({"x": 0}, {"$inc": {"x": 1}}) self.assertTrue(isinstance(result, UpdateResult)) self.assertRaises(InvalidOperation, lambda: result.matched_count) @@ -1515,88 +1395,47 @@ def test_update_many(self): self.assertRaises(InvalidOperation, lambda: result.upserted_id) self.assertFalse(result.acknowledged) - # MongoDB >= 3.5.8 allows dotted fields in updates - @client_context.require_version_max(3, 5, 7) - def test_update_with_invalid_keys(self): - self.db.drop_collection("test") - self.assertTrue(self.db.test.insert_one({"hello": "world"})) - doc = self.db.test.find_one() - doc['a.b'] = 'c' - - # Replace - self.assertRaises(OperationFailure, self.db.test.replace_one, - {"hello": "world"}, doc) - # Upsert - self.assertRaises(OperationFailure, self.db.test.replace_one, - {"foo": "bar"}, doc, upsert=True) - - # Check that the last two ops didn't actually modify anything - self.assertTrue('a.b' not in self.db.test.find_one()) - def test_update_check_keys(self): self.db.drop_collection("test") self.assertTrue(self.db.test.insert_one({"hello": "world"})) # Modify shouldn't check keys... - self.assertTrue(self.db.test.update_one({"hello": "world"}, - {"$set": {"foo.bar": "baz"}}, - upsert=True)) + self.assertTrue( + self.db.test.update_one({"hello": "world"}, {"$set": {"foo.bar": "baz"}}, upsert=True) + ) # I know this seems like testing the server but I'd like to be notified # by CI if the server's behavior changes here. doc = SON([("$set", {"foo.bar": "bim"}), ("hello", "world")]) - self.assertRaises(OperationFailure, self.db.test.update_one, - {"hello": "world"}, doc, upsert=True) + self.assertRaises( + OperationFailure, self.db.test.update_one, {"hello": "world"}, doc, upsert=True + ) # This is going to cause keys to be checked and raise InvalidDocument. # That's OK assuming the server's behavior in the previous assert # doesn't change. If the behavior changes checking the first key for # '$' in update won't be good enough anymore. doc = SON([("hello", "world"), ("$set", {"foo.bar": "bim"})]) - self.assertRaises(OperationFailure, self.db.test.replace_one, - {"hello": "world"}, doc, upsert=True) + self.assertRaises( + OperationFailure, self.db.test.replace_one, {"hello": "world"}, doc, upsert=True + ) # Replace with empty document - self.assertNotEqual(0, - self.db.test.replace_one( - {"hello": "world"}, {}).matched_count) + self.assertNotEqual(0, self.db.test.replace_one({"hello": "world"}, {}).matched_count) def test_acknowledged_delete(self): db = self.db db.drop_collection("test") - db.create_collection("test", capped=True, size=1000) - - db.test.insert_one({"x": 1}) - self.assertEqual(1, db.test.count_documents({})) - - # Can't remove from capped collection. - self.assertRaises(OperationFailure, db.test.delete_one, {"x": 1}) - db.drop_collection("test") - db.test.insert_one({"x": 1}) - db.test.insert_one({"x": 1}) + db.test.insert_many([{"x": 1}, {"x": 1}]) self.assertEqual(2, db.test.delete_many({}).deleted_count) self.assertEqual(0, db.test.delete_many({}).deleted_count) + @client_context.require_version_max(4, 9) def test_manual_last_error(self): coll = self.db.get_collection("test", write_concern=WriteConcern(w=0)) coll.insert_one({"x": 1}) self.db.command("getlasterror", w=1, wtimeout=1) - @ignore_deprecations - def test_count(self): - db = self.db - db.drop_collection("test") - - self.assertEqual(db.test.count(), 0) - db.test.insert_many([{}, {}]) - self.assertEqual(db.test.count(), 2) - db.test.insert_many([{'foo': 'bar'}, {'foo': 'baz'}]) - self.assertEqual(db.test.find({'foo': 'bar'}).count(), 1) - self.assertEqual(db.test.count({'foo': 'bar'}), 1) - self.assertEqual(db.test.find({'foo': re.compile(r'ba.*')}).count(), 2) - self.assertEqual( - db.test.count({'foo': re.compile(r'ba.*')}), 2) - def test_count_documents(self): db = self.db db.drop_collection("test") @@ -1607,10 +1446,9 @@ def test_count_documents(self): self.assertEqual(db.test.count_documents({}), 0) db.test.insert_many([{}, {}]) self.assertEqual(db.test.count_documents({}), 2) - db.test.insert_many([{'foo': 'bar'}, {'foo': 'baz'}]) - self.assertEqual(db.test.count_documents({'foo': 'bar'}), 1) - self.assertEqual( - db.test.count_documents({'foo': re.compile(r'ba.*')}), 2) + db.test.insert_many([{"foo": "bar"}, {"foo": "baz"}]) + self.assertEqual(db.test.count_documents({"foo": "bar"}), 1) + self.assertEqual(db.test.count_documents({"foo": re.compile(r"ba.*")}), 2) def test_estimated_document_count(self): db = self.db @@ -1626,54 +1464,40 @@ def test_estimated_document_count(self): def test_aggregate(self): db = self.db db.drop_collection("test") - db.test.insert_one({'foo': [1, 2]}) + db.test.insert_one({"foo": [1, 2]}) self.assertRaises(TypeError, db.test.aggregate, "wow") pipeline = {"$project": {"_id": False, "foo": True}} - # MongoDB 3.5.1+ requires either the 'cursor' or 'explain' options. - if client_context.version.at_least(3, 5, 1): - result = db.test.aggregate([pipeline]) - else: - result = db.test.aggregate([pipeline], useCursor=False) - + result = db.test.aggregate([pipeline]) self.assertTrue(isinstance(result, CommandCursor)) - self.assertEqual([{'foo': [1, 2]}], list(result)) + self.assertEqual([{"foo": [1, 2]}], list(result)) # Test write concern. with self.write_concern_collection() as coll: - coll.aggregate([{'$out': 'output-collection'}]) + coll.aggregate([{"$out": "output-collection"}]) def test_aggregate_raw_bson(self): db = self.db db.drop_collection("test") - db.test.insert_one({'foo': [1, 2]}) + db.test.insert_one({"foo": [1, 2]}) self.assertRaises(TypeError, db.test.aggregate, "wow") pipeline = {"$project": {"_id": False, "foo": True}} - coll = db.get_collection( - 'test', - codec_options=CodecOptions(document_class=RawBSONDocument)) - # MongoDB 3.5.1+ requires either the 'cursor' or 'explain' options. - if client_context.version.at_least(3, 5, 1): - result = coll.aggregate([pipeline]) - else: - result = coll.aggregate([pipeline], useCursor=False) + coll = db.get_collection("test", codec_options=CodecOptions(document_class=RawBSONDocument)) + result = coll.aggregate([pipeline]) self.assertTrue(isinstance(result, CommandCursor)) first_result = next(result) self.assertIsInstance(first_result, RawBSONDocument) - self.assertEqual([1, 2], list(first_result['foo'])) + self.assertEqual([1, 2], list(first_result["foo"])) def test_aggregation_cursor_validation(self): db = self.db - projection = {'$project': {'_id': '$_id'}} + projection = {"$project": {"_id": "$_id"}} cursor = db.test.aggregate([projection], cursor={}) self.assertTrue(isinstance(cursor, CommandCursor)) - cursor = db.test.aggregate([projection], useCursor=True) - self.assertTrue(isinstance(cursor, CommandCursor)) - def test_aggregation_cursor(self): db = self.db if client_context.has_secondaries: @@ -1681,106 +1505,63 @@ def test_aggregation_cursor(self): db = self.client.get_database( db.name, read_preference=ReadPreference.SECONDARY, - write_concern=WriteConcern(w=self.w)) + write_concern=WriteConcern(w=self.w), + ) for collection_size in (10, 1000): db.drop_collection("test") - db.test.insert_many([{'_id': i} for i in range(collection_size)]) + db.test.insert_many([{"_id": i} for i in range(collection_size)]) expected_sum = sum(range(collection_size)) # Use batchSize to ensure multiple getMore messages - cursor = db.test.aggregate( - [{'$project': {'_id': '$_id'}}], - batchSize=5) + cursor = db.test.aggregate([{"$project": {"_id": "$_id"}}], batchSize=5) - self.assertEqual( - expected_sum, - sum(doc['_id'] for doc in cursor)) + self.assertEqual(expected_sum, sum(doc["_id"] for doc in cursor)) # Test that batchSize is handled properly. cursor = db.test.aggregate([], batchSize=5) - self.assertEqual(5, len(cursor._CommandCursor__data)) + self.assertEqual(5, len(cursor._CommandCursor__data)) # type: ignore # Force a getMore - cursor._CommandCursor__data.clear() + cursor._CommandCursor__data.clear() # type: ignore next(cursor) # batchSize - 1 - self.assertEqual(4, len(cursor._CommandCursor__data)) + self.assertEqual(4, len(cursor._CommandCursor__data)) # type: ignore # Exhaust the cursor. There shouldn't be any errors. - for doc in cursor: + for _doc in cursor: pass def test_aggregation_cursor_alive(self): self.db.test.delete_many({}) self.db.test.insert_many([{} for _ in range(3)]) self.addCleanup(self.db.test.delete_many, {}) - cursor = self.db.test.aggregate(pipeline=[], cursor={'batchSize': 2}) + cursor = self.db.test.aggregate(pipeline=[], cursor={"batchSize": 2}) n = 0 while True: cursor.next() n += 1 - if 3 == n: + if n == 3: self.assertFalse(cursor.alive) break self.assertTrue(cursor.alive) - @client_context.require_no_mongos - @client_context.require_version_max(4, 1, 0) - @ignore_deprecations - def test_parallel_scan(self): - db = self.db - db.drop_collection("test") - if client_context.has_secondaries: - # Test that getMore messages are sent to the right server. - db = self.client.get_database( - db.name, - read_preference=ReadPreference.SECONDARY, - write_concern=WriteConcern(w=self.w)) - - coll = db.test - coll.insert_many([{'_id': i} for i in range(8000)]) - docs = [] - threads = [threading.Thread(target=docs.extend, args=(cursor,)) - for cursor in coll.parallel_scan(3)] - for t in threads: - t.start() - for t in threads: - t.join() - - self.assertEqual( - set(range(8000)), - set(doc['_id'] for doc in docs)) + def test_invalid_session_parameter(self): + def try_invalid_session(): + with self.db.test.aggregate([], {}): # type:ignore + pass - @client_context.require_no_mongos - @client_context.require_version_min(3, 3, 10) - @client_context.require_version_max(4, 1, 0) - @client_context.require_test_commands - @ignore_deprecations - def test_parallel_scan_max_time_ms(self): - self.client.admin.command("configureFailPoint", - "maxTimeAlwaysTimeOut", - mode="alwaysOn") - try: - self.assertRaises(ExecutionTimeout, - self.db.test.parallel_scan, - 3, - maxTimeMS=1) - finally: - self.client.admin.command("configureFailPoint", - "maxTimeAlwaysTimeOut", - mode="off") + self.assertRaisesRegex(ValueError, "must be a ClientSession", try_invalid_session) def test_large_limit(self): db = self.db db.drop_collection("test_large_limit") - db.test_large_limit.create_index([('x', 1)]) + db.test_large_limit.create_index([("x", 1)]) my_str = "mongomongo" * 1000 - db.test_large_limit.insert_many( - {"x": i, "y": my_str} for i in range(2000)) + db.test_large_limit.insert_many({"x": i, "y": my_str} for i in range(2000)) i = 0 y = 0 - for doc in db.test_large_limit.find(limit=1900).sort([('x', 1)]): + for doc in db.test_large_limit.find(limit=1900).sort([("x", 1)]): i += 1 y += doc["x"] @@ -1834,8 +1615,9 @@ def test_rename(self): db.foo.rename("test", dropTarget=True) with self.write_concern_collection() as coll: - coll.rename('foo') + coll.rename("foo") + @no_type_check def test_find_one(self): db = self.db db.drop_collection("test") @@ -1846,8 +1628,7 @@ def test_find_one(self): self.assertEqual(db.test.find_one(_id), db.test.find_one()) self.assertEqual(db.test.find_one(None), db.test.find_one()) self.assertEqual(db.test.find_one({}), db.test.find_one()) - self.assertEqual(db.test.find_one({"hello": "world"}), - db.test.find_one()) + self.assertEqual(db.test.find_one({"hello": "world"}), db.test.find_one()) self.assertTrue("hello" in db.test.find_one(projection=["hello"])) self.assertTrue("hello" not in db.test.find_one(projection=["foo"])) @@ -1855,13 +1636,15 @@ def test_find_one(self): self.assertTrue("hello" in db.test.find_one(projection=("hello",))) self.assertTrue("hello" not in db.test.find_one(projection=("foo",))) - self.assertTrue("hello" in db.test.find_one(projection=set(["hello"]))) - self.assertTrue("hello" not in db.test.find_one(projection=set(["foo"]))) + self.assertTrue("hello" in db.test.find_one(projection={"hello"})) + self.assertTrue("hello" not in db.test.find_one(projection={"foo"})) self.assertTrue("hello" in db.test.find_one(projection=frozenset(["hello"]))) self.assertTrue("hello" not in db.test.find_one(projection=frozenset(["foo"]))) - self.assertEqual(["_id"], list(db.test.find_one(projection=[]))) + self.assertEqual(["_id"], list(db.test.find_one(projection={"_id": True}))) + self.assertTrue("hello" in list(db.test.find_one(projection={}))) + self.assertTrue("hello" in list(db.test.find_one(projection=[]))) self.assertEqual(None, db.test.find_one({"hello": "foo"})) self.assertEqual(None, db.test.find_one(ObjectId())) @@ -1903,7 +1686,7 @@ def to_list(things): self.assertRaises(TypeError, db.test.find, sort=5) self.assertRaises(TypeError, db.test.find, sort="hello") - self.assertRaises(ValueError, db.test.find, sort=["hello", 1]) + self.assertRaises(TypeError, db.test.find, sort=["hello", 1]) # TODO doesn't actually test functionality, just that it doesn't blow up def test_cursor_timeout(self): @@ -1912,16 +1695,13 @@ def test_cursor_timeout(self): def test_exhaust(self): if is_mongos(self.db.client): - self.assertRaises(InvalidOperation, - self.db.test.find, - cursor_type=CursorType.EXHAUST) + self.assertRaises(InvalidOperation, self.db.test.find, cursor_type=CursorType.EXHAUST) return # Limit is incompatible with exhaust. - self.assertRaises(InvalidOperation, - self.db.test.find, - cursor_type=CursorType.EXHAUST, - limit=5) + self.assertRaises( + InvalidOperation, self.db.test.find, cursor_type=CursorType.EXHAUST, limit=5 + ) cur = self.db.test.find(cursor_type=CursorType.EXHAUST) self.assertRaises(InvalidOperation, cur.limit, 5) cur = self.db.test.find(limit=5) @@ -1932,36 +1712,45 @@ def test_exhaust(self): self.db.drop_collection("test") # Insert enough documents to require more than one batch - self.db.test.insert_many([{'i': i} for i in range(150)]) + self.db.test.insert_many([{"i": i} for i in range(150)]) client = rs_or_single_client(maxPoolSize=1) - socks = get_pool(client).sockets + self.addCleanup(client.close) + pool = get_pool(client) # Make sure the socket is returned after exhaustion. cur = client[self.db.name].test.find(cursor_type=CursorType.EXHAUST) next(cur) - self.assertEqual(0, len(socks)) + self.assertEqual(0, len(pool.conns)) for _ in cur: pass - self.assertEqual(1, len(socks)) + self.assertEqual(1, len(pool.conns)) # Same as previous but don't call next() for _ in client[self.db.name].test.find(cursor_type=CursorType.EXHAUST): pass - self.assertEqual(1, len(socks)) - - # If the Cursor instance is discarded before being - # completely iterated we have to close and - # discard the socket. - cur = client[self.db.name].test.find(cursor_type=CursorType.EXHAUST) - next(cur) - self.assertEqual(0, len(socks)) - if sys.platform.startswith('java') or 'PyPy' in sys.version: + self.assertEqual(1, len(pool.conns)) + + # If the Cursor instance is discarded before being completely iterated + # and the socket has pending data (more_to_come=True) we have to close + # and discard the socket. + cur = client[self.db.name].test.find(cursor_type=CursorType.EXHAUST, batch_size=2) + if client_context.version.at_least(4, 2): + # On 4.2+ we use OP_MSG which only sets more_to_come=True after the + # first getMore. + for _ in range(3): + next(cur) + else: + next(cur) + self.assertEqual(0, len(pool.conns)) + if sys.platform.startswith("java") or "PyPy" in sys.version: # Don't wait for GC or use gc.collect(), it's unreliable. cur.close() cur = None + # Wait until the background thread returns the socket. + wait_until(lambda: pool.active_sockets == 0, "return socket") # The socket should be discarded. - self.assertEqual(0, len(socks)) + self.assertEqual(0, len(pool.conns)) def test_distinct(self): self.db.drop_collection("test") @@ -1974,11 +1763,11 @@ def test_distinct(self): self.assertEqual([1, 2, 3], distinct) - distinct = test.find({'a': {'$gt': 1}}).distinct("a") + distinct = test.find({"a": {"$gt": 1}}).distinct("a") distinct.sort() self.assertEqual([2, 3], distinct) - distinct = test.distinct('a', {'a': {'$gt': 1}}) + distinct = test.distinct("a", {"a": {"$gt": 1}}) distinct.sort() self.assertEqual([2, 3], distinct) @@ -1999,137 +1788,110 @@ def test_query_on_query_field(self): self.db.test.insert_one({"query": "foo"}) self.db.test.insert_one({"bar": "foo"}) - self.assertEqual(1, - self.db.test.count_documents({"query": {"$ne": None}})) - self.assertEqual(1, - len(list(self.db.test.find({"query": {"$ne": None}}))) - ) + self.assertEqual(1, self.db.test.count_documents({"query": {"$ne": None}})) + self.assertEqual(1, len(list(self.db.test.find({"query": {"$ne": None}})))) def test_min_query(self): self.db.drop_collection("test") self.db.test.insert_many([{"x": 1}, {"x": 2}]) self.db.test.create_index("x") - cursor = self.db.test.find({"$min": {"x": 2}, "$query": {}}) - if client_context.requires_hint_with_min_max_queries: - cursor = cursor.hint("x_1") + cursor = self.db.test.find({"$min": {"x": 2}, "$query": {}}, hint="x_1") docs = list(cursor) self.assertEqual(1, len(docs)) self.assertEqual(2, docs[0]["x"]) def test_numerous_inserts(self): - # Ensure we don't exceed server's 1000-document batch size limit. + # Ensure we don't exceed server's maxWriteBatchSize size limit. self.db.test.drop() - n_docs = 2100 + n_docs = client_context.max_write_batch_size + 100 self.db.test.insert_many([{} for _ in range(n_docs)]) self.assertEqual(n_docs, self.db.test.count_documents({})) self.db.test.drop() - def test_map_reduce(self): - db = self.db - db.drop_collection("test") + def test_insert_many_large_batch(self): + # Tests legacy insert. + db = self.client.test_insert_large_batch + self.addCleanup(self.client.drop_database, "test_insert_large_batch") + max_bson_size = client_context.max_bson_size + # Write commands are limited to 16MB + 16k per batch + big_string = "x" * int(max_bson_size / 2) + + # Batch insert that requires 2 batches. + successful_insert = [ + {"x": big_string}, + {"x": big_string}, + {"x": big_string}, + {"x": big_string}, + ] + db.collection_0.insert_many(successful_insert) + self.assertEqual(4, db.collection_0.count_documents({})) + + db.collection_0.drop() + + # Test that inserts fail after first error. + insert_second_fails = [ + {"_id": "id0", "x": big_string}, + {"_id": "id0", "x": big_string}, + {"_id": "id1", "x": big_string}, + {"_id": "id2", "x": big_string}, + ] + + with self.assertRaises(BulkWriteError): + db.collection_1.insert_many(insert_second_fails) + + self.assertEqual(1, db.collection_1.count_documents({})) + + db.collection_1.drop() + + # 2 batches, 2nd insert fails, unacknowledged, ordered. + unack_coll = db.collection_2.with_options(write_concern=WriteConcern(w=0)) + unack_coll.insert_many(insert_second_fails) + wait_until( + lambda: db.collection_2.count_documents({}) == 1, "insert 1 document", timeout=60 + ) - db.test.insert_one({"id": 1, "tags": ["dog", "cat"]}) - db.test.insert_one({"id": 2, "tags": ["cat"]}) - db.test.insert_one({"id": 3, "tags": ["mouse", "cat", "dog"]}) - db.test.insert_one({"id": 4, "tags": []}) - - map = Code("function () {" - " this.tags.forEach(function(z) {" - " emit(z, 1);" - " });" - "}") - reduce = Code("function (key, values) {" - " var total = 0;" - " for (var i = 0; i < values.length; i++) {" - " total += values[i];" - " }" - " return total;" - "}") - result = db.test.map_reduce(map, reduce, out='mrunittests') - self.assertEqual(3, result.find_one({"_id": "cat"})["value"]) - self.assertEqual(2, result.find_one({"_id": "dog"})["value"]) - self.assertEqual(1, result.find_one({"_id": "mouse"})["value"]) - - db.test.insert_one({"id": 5, "tags": ["hampster"]}) - result = db.test.map_reduce(map, reduce, out='mrunittests') - self.assertEqual(1, result.find_one({"_id": "hampster"})["value"]) - db.test.delete_one({"id": 5}) - - result = db.test.map_reduce(map, reduce, - out={'merge': 'mrunittests'}) - self.assertEqual(3, result.find_one({"_id": "cat"})["value"]) - self.assertEqual(1, result.find_one({"_id": "hampster"})["value"]) - - result = db.test.map_reduce(map, reduce, - out={'reduce': 'mrunittests'}) - - self.assertEqual(6, result.find_one({"_id": "cat"})["value"]) - self.assertEqual(4, result.find_one({"_id": "dog"})["value"]) - self.assertEqual(2, result.find_one({"_id": "mouse"})["value"]) - self.assertEqual(1, result.find_one({"_id": "hampster"})["value"]) - - result = db.test.map_reduce( - map, - reduce, - out={'replace': 'mrunittests'} + db.collection_2.drop() + + # 2 batches, ids of docs 0 and 1 are dupes, ids of docs 2 and 3 are + # dupes. Acknowledged, unordered. + insert_two_failures = [ + {"_id": "id0", "x": big_string}, + {"_id": "id0", "x": big_string}, + {"_id": "id1", "x": big_string}, + {"_id": "id1", "x": big_string}, + ] + + with self.assertRaises(OperationFailure) as context: + db.collection_3.insert_many(insert_two_failures, ordered=False) + + self.assertIn("id1", str(context.exception)) + + # Only the first and third documents should be inserted. + self.assertEqual(2, db.collection_3.count_documents({})) + + db.collection_3.drop() + + # 2 batches, 2 errors, unacknowledged, unordered. + unack_coll = db.collection_4.with_options(write_concern=WriteConcern(w=0)) + unack_coll.insert_many(insert_two_failures, ordered=False) + + # Only the first and third documents are inserted. + wait_until( + lambda: db.collection_4.count_documents({}) == 2, "insert 2 documents", timeout=60 ) - self.assertEqual(3, result.find_one({"_id": "cat"})["value"]) - self.assertEqual(2, result.find_one({"_id": "dog"})["value"]) - self.assertEqual(1, result.find_one({"_id": "mouse"})["value"]) - - # Create the output database. - db.client.mrtestdb.mrunittests.insert_one({}) - result = db.test.map_reduce(map, reduce, - out=SON([('replace', 'mrunittests'), - ('db', 'mrtestdb') - ])) - self.assertEqual(3, result.find_one({"_id": "cat"})["value"]) - self.assertEqual(2, result.find_one({"_id": "dog"})["value"]) - self.assertEqual(1, result.find_one({"_id": "mouse"})["value"]) - self.client.drop_database('mrtestdb') - - full_result = db.test.map_reduce(map, reduce, - out='mrunittests', full_response=True) - self.assertEqual('mrunittests', full_result["result"]) - if client_context.version < (4, 3): - self.assertEqual(6, full_result["counts"]["emit"]) - - result = db.test.map_reduce(map, reduce, out='mrunittests', limit=2) - self.assertEqual(2, result.find_one({"_id": "cat"})["value"]) - self.assertEqual(1, result.find_one({"_id": "dog"})["value"]) - self.assertEqual(None, result.find_one({"_id": "mouse"})) - - result = db.test.map_reduce(map, reduce, out={'inline': 1}) - self.assertTrue(isinstance(result, dict)) - self.assertTrue('results' in result) - self.assertTrue(result['results'][1]["_id"] in ("cat", - "dog", - "mouse")) - - result = db.test.inline_map_reduce(map, reduce) - self.assertTrue(isinstance(result, list)) - self.assertEqual(3, len(result)) - self.assertTrue(result[1]["_id"] in ("cat", "dog", "mouse")) - - full_result = db.test.inline_map_reduce(map, reduce, - full_response=True) - self.assertEqual(3, len(full_result["results"])) - if client_context.version < (4, 3): - self.assertEqual(6, full_result["counts"]["emit"]) - with self.write_concern_collection() as coll: - coll.map_reduce(map, reduce, 'output') + db.collection_4.drop() def test_messages_with_unicode_collection_names(self): db = self.db - db[u"Employés"].insert_one({"x": 1}) - db[u"Employés"].replace_one({"x": 1}, {"x": 2}) - db[u"Employés"].delete_many({}) - db[u"Employés"].find_one() - list(db[u"Employés"].find()) + db["Employés"].insert_one({"x": 1}) + db["Employés"].replace_one({"x": 1}, {"x": 2}) + db["Employés"].delete_many({}) + db["Employés"].find_one() + list(db["Employés"].find()) def test_drop_indexes_non_existent(self): self.db.drop_collection("test") @@ -2146,252 +1908,249 @@ class BadGetAttr(dict): def __getattr__(self, name): pass - bad = BadGetAttr([('foo', 'bar')]) - c.insert_one({'bad': bad}) - self.assertEqual('bar', c.find_one()['bad']['foo']) - - @client_context.require_version_max(3, 5, 5) - def test_array_filters_unsupported(self): - c = self.db.test - with self.assertRaises(ConfigurationError): - c.update_one( - {}, {'$set': {'y.$[i].b': 5}}, array_filters=[{'i.b': 1}]) - with self.assertRaises(ConfigurationError): - c.update_many( - {}, {'$set': {'y.$[i].b': 5}}, array_filters=[{'i.b': 1}]) - with self.assertRaises(ConfigurationError): - c.find_one_and_update( - {}, {'$set': {'y.$[i].b': 5}}, array_filters=[{'i.b': 1}]) + bad = BadGetAttr([("foo", "bar")]) + c.insert_one({"bad": bad}) + self.assertEqual("bar", c.find_one()["bad"]["foo"]) # type: ignore def test_array_filters_validation(self): # array_filters must be a list. c = self.db.test with self.assertRaises(TypeError): - c.update_one({}, {'$set': {'a': 1}}, array_filters={}) + c.update_one({}, {"$set": {"a": 1}}, array_filters={}) # type: ignore[arg-type] with self.assertRaises(TypeError): - c.update_many({}, {'$set': {'a': 1}}, array_filters={}) + c.update_many({}, {"$set": {"a": 1}}, array_filters={}) # type: ignore[arg-type] with self.assertRaises(TypeError): - c.find_one_and_update({}, {'$set': {'a': 1}}, array_filters={}) + update = {"$set": {"a": 1}} + c.find_one_and_update({}, update, array_filters={}) # type: ignore[arg-type] def test_array_filters_unacknowledged(self): c_w0 = self.db.test.with_options(write_concern=WriteConcern(w=0)) with self.assertRaises(ConfigurationError): - c_w0.update_one({}, {'$set': {'y.$[i].b': 5}}, - array_filters=[{'i.b': 1}]) + c_w0.update_one({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) with self.assertRaises(ConfigurationError): - c_w0.update_many({}, {'$set': {'y.$[i].b': 5}}, - array_filters=[{'i.b': 1}]) + c_w0.update_many({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) with self.assertRaises(ConfigurationError): - c_w0.find_one_and_update({}, {'$set': {'y.$[i].b': 5}}, - array_filters=[{'i.b': 1}]) + c_w0.find_one_and_update({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) def test_find_one_and(self): c = self.db.test c.drop() - c.insert_one({'_id': 1, 'i': 1}) - - self.assertEqual({'_id': 1, 'i': 1}, - c.find_one_and_update({'_id': 1}, {'$inc': {'i': 1}})) - self.assertEqual({'_id': 1, 'i': 3}, - c.find_one_and_update( - {'_id': 1}, {'$inc': {'i': 1}}, - return_document=ReturnDocument.AFTER)) - - self.assertEqual({'_id': 1, 'i': 3}, - c.find_one_and_delete({'_id': 1})) - self.assertEqual(None, c.find_one({'_id': 1})) - - self.assertEqual(None, - c.find_one_and_update({'_id': 1}, {'$inc': {'i': 1}})) - self.assertEqual({'_id': 1, 'i': 1}, - c.find_one_and_update( - {'_id': 1}, {'$inc': {'i': 1}}, - return_document=ReturnDocument.AFTER, - upsert=True)) - self.assertEqual({'_id': 1, 'i': 2}, - c.find_one_and_update( - {'_id': 1}, {'$inc': {'i': 1}}, - return_document=ReturnDocument.AFTER)) - - self.assertEqual({'_id': 1, 'i': 3}, - c.find_one_and_replace( - {'_id': 1}, {'i': 3, 'j': 1}, - projection=['i'], - return_document=ReturnDocument.AFTER)) - self.assertEqual({'i': 4}, - c.find_one_and_update( - {'_id': 1}, {'$inc': {'i': 1}}, - projection={'i': 1, '_id': 0}, - return_document=ReturnDocument.AFTER)) + c.insert_one({"_id": 1, "i": 1}) + + self.assertEqual({"_id": 1, "i": 1}, c.find_one_and_update({"_id": 1}, {"$inc": {"i": 1}})) + self.assertEqual( + {"_id": 1, "i": 3}, + c.find_one_and_update( + {"_id": 1}, {"$inc": {"i": 1}}, return_document=ReturnDocument.AFTER + ), + ) + + self.assertEqual({"_id": 1, "i": 3}, c.find_one_and_delete({"_id": 1})) + self.assertEqual(None, c.find_one({"_id": 1})) + + self.assertEqual(None, c.find_one_and_update({"_id": 1}, {"$inc": {"i": 1}})) + self.assertEqual( + {"_id": 1, "i": 1}, + c.find_one_and_update( + {"_id": 1}, {"$inc": {"i": 1}}, return_document=ReturnDocument.AFTER, upsert=True + ), + ) + self.assertEqual( + {"_id": 1, "i": 2}, + c.find_one_and_update( + {"_id": 1}, {"$inc": {"i": 1}}, return_document=ReturnDocument.AFTER + ), + ) + + self.assertEqual( + {"_id": 1, "i": 3}, + c.find_one_and_replace( + {"_id": 1}, {"i": 3, "j": 1}, projection=["i"], return_document=ReturnDocument.AFTER + ), + ) + self.assertEqual( + {"i": 4}, + c.find_one_and_update( + {"_id": 1}, + {"$inc": {"i": 1}}, + projection={"i": 1, "_id": 0}, + return_document=ReturnDocument.AFTER, + ), + ) c.drop() for j in range(5): - c.insert_one({'j': j, 'i': 0}) + c.insert_one({"j": j, "i": 0}) - sort = [('j', DESCENDING)] - self.assertEqual(4, c.find_one_and_update({}, - {'$inc': {'i': 1}}, - sort=sort)['j']) + sort = [("j", DESCENDING)] + self.assertEqual(4, c.find_one_and_update({}, {"$inc": {"i": 1}}, sort=sort)["j"]) def test_find_one_and_write_concern(self): listener = EventListener() db = single_client(event_listeners=[listener])[self.db.name] # non-default WriteConcern. - c_w0 = db.get_collection( - 'test', write_concern=WriteConcern(w=0)) + c_w0 = db.get_collection("test", write_concern=WriteConcern(w=0)) # default WriteConcern. - c_default = db.get_collection('test', write_concern=WriteConcern()) - results = listener.results + c_default = db.get_collection("test", write_concern=WriteConcern()) # Authenticate the client and throw out auth commands from the listener. - db.command('ismaster') - results.clear() - if client_context.version.at_least(3, 1, 9, -1): - c_w0.find_and_modify( - {'_id': 1}, {'$set': {'foo': 'bar'}}) - self.assertEqual( - {'w': 0}, results['started'][0].command['writeConcern']) - results.clear() - - c_w0.find_one_and_update( - {'_id': 1}, {'$set': {'foo': 'bar'}}) - self.assertEqual( - {'w': 0}, results['started'][0].command['writeConcern']) - results.clear() - - c_w0.find_one_and_replace({'_id': 1}, {'foo': 'bar'}) - self.assertEqual( - {'w': 0}, results['started'][0].command['writeConcern']) - results.clear() - - c_w0.find_one_and_delete({'_id': 1}) - self.assertEqual( - {'w': 0}, results['started'][0].command['writeConcern']) - results.clear() - - # Test write concern errors. - if client_context.is_rs: - c_wc_error = db.get_collection( - 'test', - write_concern=WriteConcern( - w=len(client_context.nodes) + 1)) - self.assertRaises( - WriteConcernError, - c_wc_error.find_and_modify, - {'_id': 1}, {'$set': {'foo': 'bar'}}) - self.assertRaises( - WriteConcernError, - c_wc_error.find_one_and_update, - {'_id': 1}, {'$set': {'foo': 'bar'}}) - self.assertRaises( - WriteConcernError, - c_wc_error.find_one_and_replace, - {'w': 0}, results['started'][0].command['writeConcern']) - self.assertRaises( - WriteConcernError, - c_wc_error.find_one_and_delete, - {'w': 0}, results['started'][0].command['writeConcern']) - results.clear() - else: - c_w0.find_and_modify( - {'_id': 1}, {'$set': {'foo': 'bar'}}) - self.assertNotIn('writeConcern', results['started'][0].command) - results.clear() - - c_w0.find_one_and_update( - {'_id': 1}, {'$set': {'foo': 'bar'}}) - self.assertNotIn('writeConcern', results['started'][0].command) - results.clear() - - c_w0.find_one_and_replace({'_id': 1}, {'foo': 'bar'}) - self.assertNotIn('writeConcern', results['started'][0].command) - results.clear() - - c_w0.find_one_and_delete({'_id': 1}) - self.assertNotIn('writeConcern', results['started'][0].command) - results.clear() - - c_default.find_and_modify({'_id': 1}, {'$set': {'foo': 'bar'}}) - self.assertNotIn('writeConcern', results['started'][0].command) - results.clear() + db.command("ping") + listener.reset() + c_w0.find_one_and_update({"_id": 1}, {"$set": {"foo": "bar"}}) + self.assertEqual({"w": 0}, listener.started_events[0].command["writeConcern"]) + listener.reset() + + c_w0.find_one_and_replace({"_id": 1}, {"foo": "bar"}) + self.assertEqual({"w": 0}, listener.started_events[0].command["writeConcern"]) + listener.reset() + + c_w0.find_one_and_delete({"_id": 1}) + self.assertEqual({"w": 0}, listener.started_events[0].command["writeConcern"]) + listener.reset() + + # Test write concern errors. + if client_context.is_rs: + c_wc_error = db.get_collection( + "test", write_concern=WriteConcern(w=len(client_context.nodes) + 1) + ) + self.assertRaises( + WriteConcernError, + c_wc_error.find_one_and_update, + {"_id": 1}, + {"$set": {"foo": "bar"}}, + ) + self.assertRaises( + WriteConcernError, + c_wc_error.find_one_and_replace, + {"w": 0}, + listener.started_events[0].command["writeConcern"], + ) + self.assertRaises( + WriteConcernError, + c_wc_error.find_one_and_delete, + {"w": 0}, + listener.started_events[0].command["writeConcern"], + ) + listener.reset() - c_default.find_one_and_update({'_id': 1}, {'$set': {'foo': 'bar'}}) - self.assertNotIn('writeConcern', results['started'][0].command) - results.clear() + c_default.find_one_and_update({"_id": 1}, {"$set": {"foo": "bar"}}) + self.assertNotIn("writeConcern", listener.started_events[0].command) + listener.reset() - c_default.find_one_and_replace({'_id': 1}, {'foo': 'bar'}) - self.assertNotIn('writeConcern', results['started'][0].command) - results.clear() + c_default.find_one_and_replace({"_id": 1}, {"foo": "bar"}) + self.assertNotIn("writeConcern", listener.started_events[0].command) + listener.reset() - c_default.find_one_and_delete({'_id': 1}) - self.assertNotIn('writeConcern', results['started'][0].command) - results.clear() + c_default.find_one_and_delete({"_id": 1}) + self.assertNotIn("writeConcern", listener.started_events[0].command) + listener.reset() def test_find_with_nested(self): c = self.db.test c.drop() - c.insert_many([{'i': i} for i in range(5)]) # [0, 1, 2, 3, 4] + c.insert_many([{"i": i} for i in range(5)]) # [0, 1, 2, 3, 4] self.assertEqual( [2], - [i['i'] for i in c.find({ - '$and': [ + [ + i["i"] + for i in c.find( { - # This clause gives us [1,2,4] - '$or': [ - {'i': {'$lte': 2}}, - {'i': {'$gt': 3}}, - ], - }, - { - # This clause gives us [2,3] - '$or': [ - {'i': 2}, - {'i': 3}, + "$and": [ + { + # This clause gives us [1,2,4] + "$or": [ + {"i": {"$lte": 2}}, + {"i": {"$gt": 3}}, + ], + }, + { + # This clause gives us [2,3] + "$or": [ + {"i": 2}, + {"i": 3}, + ] + }, ] - }, - ] - })] + } + ) + ], ) self.assertEqual( [0, 1, 2], - [i['i'] for i in c.find({ - '$or': [ - { - # This clause gives us [2] - '$and': [ - {'i': {'$gte': 2}}, - {'i': {'$lt': 3}}, - ], - }, + [ + i["i"] + for i in c.find( { - # This clause gives us [0,1] - '$and': [ - {'i': {'$gt': -100}}, - {'i': {'$lt': 2}}, + "$or": [ + { + # This clause gives us [2] + "$and": [ + {"i": {"$gte": 2}}, + {"i": {"$lt": 3}}, + ], + }, + { + # This clause gives us [0,1] + "$and": [ + {"i": {"$gt": -100}}, + {"i": {"$lt": 2}}, + ] + }, ] - }, - ] - })] + } + ) + ], ) def test_find_regex(self): c = self.db.test c.drop() - c.insert_one({'r': re.compile('.*')}) + c.insert_one({"r": re.compile(".*")}) - self.assertTrue(isinstance(c.find_one()['r'], Regex)) + self.assertTrue(isinstance(c.find_one()["r"], Regex)) # type: ignore for doc in c.find(): - self.assertTrue(isinstance(doc['r'], Regex)) + self.assertTrue(isinstance(doc["r"], Regex)) def test_find_command_generation(self): - cmd = _gen_find_command('coll', {'$query': {'foo': 1}, '$dumb': 2}, - None, 0, 0, 0, None, DEFAULT_READ_CONCERN, - None, None) + cmd = _gen_find_command( + "coll", + {"$query": {"foo": 1}, "$dumb": 2}, + None, + 0, + 0, + 0, + None, + DEFAULT_READ_CONCERN, + None, + None, + ) self.assertEqual( - cmd.to_dict(), - SON([('find', 'coll'), - ('$dumb', 2), - ('filter', {'foo': 1})]).to_dict()) + cmd.to_dict(), SON([("find", "coll"), ("$dumb", 2), ("filter", {"foo": 1})]).to_dict() + ) + + def test_bool(self): + with self.assertRaises(NotImplementedError): + bool(Collection(self.db, "test")) + + @client_context.require_version_min(5, 0, 0) + def test_helpers_with_let(self): + c = self.db.test + helpers = [ + (c.delete_many, ({}, {})), + (c.delete_one, ({}, {})), + (c.find, ({})), + (c.update_many, ({}, {"$inc": {"x": 3}})), + (c.update_one, ({}, {"$inc": {"x": 3}})), + (c.find_one_and_delete, ({}, {})), + (c.find_one_and_replace, ({}, {})), + (c.aggregate, ([],)), + ] + for let in [10, "str", [], False]: + for helper, args in helpers: + with self.assertRaisesRegex(TypeError, "let must be an instance of dict"): + helper(*args, let=let) # type: ignore + for helper, args in helpers: + helper(*args, let={}) # type: ignore if __name__ == "__main__": diff --git a/test/test_collection_management.py b/test/test_collection_management.py new file mode 100644 index 0000000000..0eacde1302 --- /dev/null +++ b/test/test_collection_management.py @@ -0,0 +1,33 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the collection management unified spec tests.""" +from __future__ import annotations + +import os +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.unified_format import generate_test_classes + +# Location of JSON test specifications. +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "collection_management") + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_monotonic.py b/test/test_command_monitoring.py similarity index 52% rename from test/test_monotonic.py rename to test/test_command_monitoring.py index 411a25abcf..d2f578824d 100644 --- a/test/test_monotonic.py +++ b/test/test_command_monitoring.py @@ -1,4 +1,4 @@ -# Copyright 2018-present MongoDB, Inc. +# Copyright 2015-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,29 +12,27 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Test the monotonic module.""" +"""Run the command monitoring unified format spec tests.""" +from __future__ import annotations +import os import sys sys.path[0:0] = [""] -from pymongo.monotonic import time as pymongo_time - from test import unittest +from test.unified_format import generate_test_classes + +# Location of JSON test specifications. +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "command_monitoring") -class TestMonotonic(unittest.TestCase): - def test_monotonic_time(self): - try: - from monotonic import monotonic - self.assertIs(monotonic, pymongo_time) - except ImportError: - if sys.version_info[:2] >= (3, 3): - from time import monotonic - self.assertIs(monotonic, pymongo_time) - else: - from time import time - self.assertIs(time, pymongo_time) +globals().update( + generate_test_classes( + _TEST_PATH, + module=__name__, + ) +) if __name__ == "__main__": diff --git a/test/test_command_monitoring_spec.py b/test/test_command_monitoring_spec.py deleted file mode 100644 index 3d41d4b487..0000000000 --- a/test/test_command_monitoring_spec.py +++ /dev/null @@ -1,239 +0,0 @@ -# Copyright 2015-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Run the command monitoring spec tests.""" - -import os -import re -import sys - -sys.path[0:0] = [""] - -import pymongo - -from bson import json_util -from pymongo.errors import OperationFailure -from pymongo.write_concern import WriteConcern -from test import unittest, client_context -from test.utils import single_client, wait_until, EventListener, parse_read_preference - -# Location of JSON test specifications. -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), - 'command_monitoring') - - -def camel_to_snake(camel): - # Regex to convert CamelCase to snake_case. - snake = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camel) - return re.sub('([a-z0-9])([A-Z])', r'\1_\2', snake).lower() - - -class TestAllScenarios(unittest.TestCase): - - @classmethod - @client_context.require_connection - def setUpClass(cls): - cls.listener = EventListener() - cls.client = single_client(event_listeners=[cls.listener]) - - def tearDown(self): - self.listener.results.clear() - - -def format_actual_results(results): - started = results['started'] - succeeded = results['succeeded'] - failed = results['failed'] - msg = "\nStarted: %r" % (started[0].command if len(started) else None,) - msg += "\nSucceeded: %r" % (succeeded[0].reply if len(succeeded) else None,) - msg += "\nFailed: %r" % (failed[0].failure if len(failed) else None,) - return msg - - -def create_test(scenario_def, test): - def run_scenario(self): - dbname = scenario_def['database_name'] - collname = scenario_def['collection_name'] - - coll = self.client[dbname][collname] - coll.drop() - coll.insert_many(scenario_def['data']) - self.listener.results.clear() - name = camel_to_snake(test['operation']['name']) - if 'read_preference' in test['operation']: - coll = coll.with_options(read_preference=parse_read_preference( - test['operation']['read_preference'])) - if 'collectionOptions' in test['operation']: - colloptions = test['operation']['collectionOptions'] - if 'writeConcern' in colloptions: - concern = colloptions['writeConcern'] - coll = coll.with_options( - write_concern=WriteConcern(**concern)) - - test_args = test['operation']['arguments'] - if 'options' in test_args: - options = test_args.pop('options') - test_args.update(options) - args = {} - for arg in test_args: - args[camel_to_snake(arg)] = test_args[arg] - - if name == 'bulk_write': - bulk_args = [] - for request in args['requests']: - opname = request['name'] - klass = opname[0:1].upper() + opname[1:] - arg = getattr(pymongo, klass)(**request['arguments']) - bulk_args.append(arg) - try: - coll.bulk_write(bulk_args, args.get('ordered', True)) - except OperationFailure: - pass - elif name == 'find': - if 'sort' in args: - args['sort'] = list(args['sort'].items()) - for arg in 'skip', 'limit': - if arg in args: - args[arg] = int(args[arg]) - try: - # Iterate the cursor. - tuple(coll.find(**args)) - except OperationFailure: - pass - # Wait for the killCursors thread to run if necessary. - if 'limit' in args and client_context.version[:2] < (3, 1): - self.client._kill_cursors_executor.wake() - started = self.listener.results['started'] - succeeded = self.listener.results['succeeded'] - wait_until( - lambda: started[-1].command_name == 'killCursors', - "publish a start event for killCursors.") - wait_until( - lambda: succeeded[-1].command_name == 'killCursors', - "publish a succeeded event for killCursors.") - else: - try: - getattr(coll, name)(**args) - except OperationFailure: - pass - - res = self.listener.results - for expectation in test['expectations']: - event_type = next(iter(expectation)) - if event_type == "command_started_event": - event = res['started'][0] if len(res['started']) else None - if event is not None: - # The tests substitute 42 for any number other than 0. - if (event.command_name == 'getMore' - and event.command['getMore']): - event.command['getMore'] = 42 - elif event.command_name == 'killCursors': - event.command['cursors'] = [42] - elif event_type == "command_succeeded_event": - event = ( - res['succeeded'].pop(0) if len(res['succeeded']) else None) - if event is not None: - reply = event.reply - # The tests substitute 42 for any number other than 0, - # and "" for any error message. - if 'writeErrors' in reply: - for doc in reply['writeErrors']: - # Remove any new fields the server adds. The tests - # only have index, code, and errmsg. - diff = set(doc) - set(['index', 'code', 'errmsg']) - for field in diff: - doc.pop(field) - doc['code'] = 42 - doc['errmsg'] = "" - elif 'cursor' in reply: - if reply['cursor']['id']: - reply['cursor']['id'] = 42 - elif event.command_name == 'killCursors': - # Make the tests continue to pass when the killCursors - # command is actually in use. - if 'cursorsKilled' in reply: - reply.pop('cursorsKilled') - reply['cursorsUnknown'] = [42] - # Found succeeded event. Pop related started event. - res['started'].pop(0) - elif event_type == "command_failed_event": - event = res['failed'].pop(0) if len(res['failed']) else None - if event is not None: - # Found failed event. Pop related started event. - res['started'].pop(0) - else: - self.fail("Unknown event type") - - if event is None: - event_name = event_type.split('_')[1] - self.fail( - "Expected %s event for %s command. Actual " - "results:%s" % ( - event_name, - expectation[event_type]['command_name'], - format_actual_results(res))) - - for attr, expected in expectation[event_type].items(): - if 'options' in expected: - options = expected.pop('options') - expected.update(options) - actual = getattr(event, attr) - if isinstance(expected, dict): - for key, val in expected.items(): - self.assertEqual(val, actual[key]) - else: - self.assertEqual(actual, expected) - - return run_scenario - - -def create_tests(): - for dirpath, _, filenames in os.walk(_TEST_PATH): - dirname = os.path.split(dirpath)[-1] - for filename in filenames: - with open(os.path.join(dirpath, filename)) as scenario_stream: - scenario_def = json_util.loads(scenario_stream.read()) - assert bool(scenario_def.get('tests')), "tests cannot be empty" - # Construct test from scenario. - for test in scenario_def['tests']: - new_test = create_test(scenario_def, test) - if "ignore_if_server_version_greater_than" in test: - version = test["ignore_if_server_version_greater_than"] - ver = tuple(int(elt) for elt in version.split('.')) - new_test = client_context.require_version_max(*ver)( - new_test) - if "ignore_if_server_version_less_than" in test: - version = test["ignore_if_server_version_less_than"] - ver = tuple(int(elt) for elt in version.split('.')) - new_test = client_context.require_version_min(*ver)( - new_test) - if "ignore_if_topology_type" in test: - types = set(test["ignore_if_topology_type"]) - if "sharded" in types: - new_test = client_context.require_no_mongos(None)( - new_test) - - test_name = 'test_%s_%s_%s' % ( - dirname, - os.path.splitext(filename)[0], - str(test['description'].replace(" ", "_"))) - new_test.__name__ = test_name - setattr(TestAllScenarios, new_test.__name__, new_test) - - -create_tests() - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_comment.py b/test/test_comment.py new file mode 100644 index 0000000000..baac68be58 --- /dev/null +++ b/test/test_comment.py @@ -0,0 +1,178 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the keyword argument 'comment' in various helpers.""" + +from __future__ import annotations + +import inspect +import sys + +sys.path[0:0] = [""] + +from test import IntegrationTest, client_context, unittest +from test.utils import EventListener, rs_or_single_client + +from bson.dbref import DBRef +from pymongo.command_cursor import CommandCursor +from pymongo.operations import IndexModel + + +class Empty: + def __getattr__(self, item): + try: + self.__dict__[item] + except KeyError: + return self.empty + + def empty(self, *args, **kwargs): + return Empty() + + +class TestComment(IntegrationTest): + def _test_ops( + self, helpers, already_supported, listener, db=Empty(), coll=Empty() # noqa: B008 + ): + for h, args in helpers: + c = "testing comment with " + h.__name__ + with self.subTest("collection-" + h.__name__ + "-comment"): + for cc in [c, {"key": c}, ["any", 1]]: + listener.reset() + kwargs = {"comment": cc} + if h == coll.rename: + _ = db.get_collection("temp_temp_temp").drop() + destruct_coll = db.get_collection("test_temp") + destruct_coll.insert_one({}) + maybe_cursor = destruct_coll.rename(*args, **kwargs) + destruct_coll.drop() + elif h == db.validate_collection: + coll = db.get_collection("test") + coll.insert_one({}) + maybe_cursor = db.validate_collection(*args, **kwargs) + else: + coll.create_index("a") + maybe_cursor = h(*args, **kwargs) + self.assertIn( + "comment", + inspect.signature(h).parameters, + msg="Could not find 'comment' in the " + "signature of function %s" % (h.__name__), + ) + self.assertEqual( + inspect.signature(h).parameters["comment"].annotation, "Optional[Any]" + ) + if isinstance(maybe_cursor, CommandCursor): + maybe_cursor.close() + tested = False + # For some reason collection.list_indexes creates two commands and the first + # one doesn't contain 'comment'. + for i in listener.started_events: + if cc == i.command.get("comment", ""): + self.assertEqual(cc, i.command["comment"]) + tested = True + self.assertTrue(tested) + if h not in [coll.aggregate_raw_batches]: + self.assertIn( + "`comment` (optional):", + h.__doc__, + ) + if h not in already_supported: + self.assertIn( + "Added ``comment`` parameter", + h.__doc__, + ) + else: + self.assertNotIn( + "Added ``comment`` parameter", + h.__doc__, + ) + + listener.reset() + + @client_context.require_version_min(4, 7, -1) + @client_context.require_replica_set + def test_database_helpers(self): + listener = EventListener() + db = rs_or_single_client(event_listeners=[listener]).db + helpers = [ + (db.watch, []), + (db.command, ["hello"]), + (db.list_collections, []), + (db.list_collection_names, []), + (db.drop_collection, ["hello"]), + (db.validate_collection, ["test"]), + (db.dereference, [DBRef("collection", 1)]), + ] + already_supported = [db.command, db.list_collections, db.list_collection_names] + self._test_ops(helpers, already_supported, listener, db=db, coll=db.get_collection("test")) + + @client_context.require_version_min(4, 7, -1) + @client_context.require_replica_set + def test_client_helpers(self): + listener = EventListener() + cli = rs_or_single_client(event_listeners=[listener]) + helpers = [ + (cli.watch, []), + (cli.list_databases, []), + (cli.list_database_names, []), + (cli.drop_database, ["test"]), + ] + already_supported = [ + cli.list_databases, + ] + self._test_ops(helpers, already_supported, listener) + + @client_context.require_version_min(4, 7, -1) + def test_collection_helpers(self): + listener = EventListener() + db = rs_or_single_client(event_listeners=[listener])[self.db.name] + coll = db.get_collection("test") + + helpers = [ + (coll.list_indexes, []), + (coll.drop, []), + (coll.index_information, []), + (coll.options, []), + (coll.aggregate, [[{"$set": {"x": 1}}]]), + (coll.aggregate_raw_batches, [[{"$set": {"x": 1}}]]), + (coll.rename, ["temp_temp_temp"]), + (coll.distinct, ["_id"]), + (coll.find_one_and_delete, [{}]), + (coll.find_one_and_replace, [{}, {}]), + (coll.find_one_and_update, [{}, {"$set": {"a": 1}}]), + (coll.estimated_document_count, []), + (coll.count_documents, [{}]), + (coll.create_indexes, [[IndexModel("a")]]), + (coll.create_index, ["a"]), + (coll.drop_index, [[("a", 1)]]), + (coll.drop_indexes, []), + ] + already_supported = [ + coll.estimated_document_count, + coll.count_documents, + coll.create_indexes, + coll.drop_indexes, + coll.options, + coll.find_one_and_replace, + coll.drop_index, + coll.rename, + coll.distinct, + coll.find_one_and_delete, + coll.find_one_and_update, + ] + self._test_ops(helpers, already_supported, listener, coll=coll, db=db) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_common.py b/test/test_common.py index 5175dd8bfd..fdd4513d04 100644 --- a/test/test_common.py +++ b/test/test_common.py @@ -13,20 +13,21 @@ # limitations under the License. """Test the pymongo common module.""" +from __future__ import annotations import sys import uuid sys.path[0:0] = [""] -from bson.binary import UUIDLegacy, PYTHON_LEGACY, STANDARD -from bson.code import Code +from test import IntegrationTest, client_context, unittest +from test.utils import connected, rs_or_single_client, single_client + +from bson.binary import PYTHON_LEGACY, STANDARD, Binary, UuidRepresentation from bson.codec_options import CodecOptions from bson.objectid import ObjectId from pymongo.errors import OperationFailure from pymongo.write_concern import WriteConcern -from test import client_context, IntegrationTest -from test.utils import connected, rs_or_single_client, single_client @client_context.require_connection @@ -35,128 +36,90 @@ def setUpModule(): class TestCommon(IntegrationTest): - def test_uuid_representation(self): coll = self.db.uuid coll.drop() # Test property - self.assertEqual(PYTHON_LEGACY, - coll.codec_options.uuid_representation) + self.assertEqual(UuidRepresentation.UNSPECIFIED, coll.codec_options.uuid_representation) # Test basic query uu = uuid.uuid4() # Insert as binary subtype 3 - coll.insert_one({'uu': uu}) - self.assertEqual(uu, coll.find_one({'uu': uu})['uu']) - coll = self.db.get_collection( - "uuid", CodecOptions(uuid_representation=STANDARD)) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + legacy_opts = coll.codec_options + coll.insert_one({"uu": uu}) + self.assertEqual(uu, coll.find_one({"uu": uu})["uu"]) # type: ignore + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=STANDARD)) self.assertEqual(STANDARD, coll.codec_options.uuid_representation) - self.assertEqual(None, coll.find_one({'uu': uu})) - self.assertEqual(uu, coll.find_one({'uu': UUIDLegacy(uu)})['uu']) + self.assertEqual(None, coll.find_one({"uu": uu})) + uul = Binary.from_uuid(uu, PYTHON_LEGACY) + self.assertEqual(uul, coll.find_one({"uu": uul})["uu"]) # type: ignore # Test count_documents - self.assertEqual(0, coll.count_documents({'uu': uu})) - coll = self.db.get_collection( - "uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) - self.assertEqual(1, coll.count_documents({'uu': uu})) + self.assertEqual(0, coll.count_documents({"uu": uu})) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + self.assertEqual(1, coll.count_documents({"uu": uu})) # Test delete - coll = self.db.get_collection( - "uuid", CodecOptions(uuid_representation=STANDARD)) - coll.delete_one({'uu': uu}) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=STANDARD)) + coll.delete_one({"uu": uu}) self.assertEqual(1, coll.count_documents({})) - coll = self.db.get_collection( - "uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) - coll.delete_one({'uu': uu}) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + coll.delete_one({"uu": uu}) self.assertEqual(0, coll.count_documents({})) # Test update_one - coll.insert_one({'_id': uu, 'i': 1}) - coll = self.db.get_collection( - "uuid", CodecOptions(uuid_representation=STANDARD)) - coll.update_one({'_id': uu}, {'$set': {'i': 2}}) - coll = self.db.get_collection( - "uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) - self.assertEqual(1, coll.find_one({'_id': uu})['i']) - coll.update_one({'_id': uu}, {'$set': {'i': 2}}) - self.assertEqual(2, coll.find_one({'_id': uu})['i']) + coll.insert_one({"_id": uu, "i": 1}) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=STANDARD)) + coll.update_one({"_id": uu}, {"$set": {"i": 2}}) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + self.assertEqual(1, coll.find_one({"_id": uu})["i"]) # type: ignore + coll.update_one({"_id": uu}, {"$set": {"i": 2}}) + self.assertEqual(2, coll.find_one({"_id": uu})["i"]) # type: ignore # Test Cursor.distinct - self.assertEqual([2], coll.find({'_id': uu}).distinct('i')) - coll = self.db.get_collection( - "uuid", CodecOptions(uuid_representation=STANDARD)) - self.assertEqual([], coll.find({'_id': uu}).distinct('i')) + self.assertEqual([2], coll.find({"_id": uu}).distinct("i")) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=STANDARD)) + self.assertEqual([], coll.find({"_id": uu}).distinct("i")) # Test findAndModify - self.assertEqual(None, coll.find_one_and_update({'_id': uu}, - {'$set': {'i': 5}})) - coll = self.db.get_collection( - "uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) - self.assertEqual(2, coll.find_one_and_update({'_id': uu}, - {'$set': {'i': 5}})['i']) - self.assertEqual(5, coll.find_one({'_id': uu})['i']) + self.assertEqual(None, coll.find_one_and_update({"_id": uu}, {"$set": {"i": 5}})) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + self.assertEqual(2, coll.find_one_and_update({"_id": uu}, {"$set": {"i": 5}})["i"]) + self.assertEqual(5, coll.find_one({"_id": uu})["i"]) # type: ignore # Test command - self.assertEqual(5, self.db.command('findAndModify', 'uuid', - update={'$set': {'i': 6}}, - query={'_id': uu})['value']['i']) - self.assertEqual(6, self.db.command( - 'findAndModify', 'uuid', - update={'$set': {'i': 7}}, - query={'_id': UUIDLegacy(uu)})['value']['i']) - - # Test (inline)_map_reduce - coll.drop() - coll.insert_one({"_id": uu, "x": 1, "tags": ["dog", "cat"]}) - coll.insert_one({"_id": uuid.uuid4(), "x": 3, - "tags": ["mouse", "cat", "dog"]}) - - map = Code("function () {" - " this.tags.forEach(function(z) {" - " emit(z, 1);" - " });" - "}") - - reduce = Code("function (key, values) {" - " var total = 0;" - " for (var i = 0; i < values.length; i++) {" - " total += values[i];" - " }" - " return total;" - "}") - - coll = self.db.get_collection( - "uuid", CodecOptions(uuid_representation=STANDARD)) - q = {"_id": uu} - result = coll.inline_map_reduce(map, reduce, query=q) - self.assertEqual([], result) - - result = coll.map_reduce(map, reduce, "results", query=q) - self.assertEqual(0, self.db.results.count_documents({})) - - coll = self.db.get_collection( - "uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) - q = {"_id": uu} - result = coll.inline_map_reduce(map, reduce, query=q) - self.assertEqual(2, len(result)) - - result = coll.map_reduce(map, reduce, "results", query=q) - self.assertEqual(2, self.db.results.count_documents({})) - - self.db.drop_collection("result") - coll.drop() + self.assertEqual( + 5, + self.db.command( + "findAndModify", + "uuid", + update={"$set": {"i": 6}}, + query={"_id": uu}, + codec_options=legacy_opts, + )["value"]["i"], + ) + self.assertEqual( + 6, + self.db.command( + "findAndModify", + "uuid", + update={"$set": {"i": 7}}, + query={"_id": Binary.from_uuid(uu, PYTHON_LEGACY)}, + )["value"]["i"], + ) def test_write_concern(self): c = rs_or_single_client(connect=False) self.assertEqual(WriteConcern(), c.write_concern) - c = rs_or_single_client(connect=False, w=2, wtimeout=1000) + c = rs_or_single_client(connect=False, w=2, wTimeoutMS=1000) wc = WriteConcern(w=2, wtimeout=1000) self.assertEqual(wc, c.write_concern) # Can we override back to the server default? - db = c.get_database('pymongo_test', write_concern=WriteConcern()) + db = c.get_database("pymongo_test", write_concern=WriteConcern()) self.assertEqual(db.write_concern, WriteConcern()) db = c.pymongo_test @@ -165,7 +128,7 @@ def test_write_concern(self): self.assertEqual(wc, coll.write_concern) cwc = WriteConcern(j=True) - coll = db.get_collection('test', write_concern=cwc) + coll = db.get_collection("test", write_concern=cwc) self.assertEqual(cwc, coll.write_concern) self.assertEqual(wc, db.write_concern) @@ -186,24 +149,30 @@ def test_mongo_client(self): self.assertTrue(new_coll.insert_one(doc)) self.assertRaises(OperationFailure, coll.insert_one, doc) - m = rs_or_single_client("mongodb://%s/" % (pair,), - replicaSet=client_context.replica_set_name) + m = rs_or_single_client(f"mongodb://{pair}/", replicaSet=client_context.replica_set_name) coll = m.pymongo_test.write_concern_test self.assertRaises(OperationFailure, coll.insert_one, doc) - m = rs_or_single_client("mongodb://%s/?w=0" % (pair,), - replicaSet=client_context.replica_set_name) + m = rs_or_single_client( + f"mongodb://{pair}/?w=0", replicaSet=client_context.replica_set_name + ) coll = m.pymongo_test.write_concern_test coll.insert_one(doc) # Equality tests direct = connected(single_client(w=0)) - direct2 = connected(single_client("mongodb://%s/?w=0" % (pair,), - **self.credentials)) + direct2 = connected(single_client(f"mongodb://{pair}/?w=0", **self.credentials)) self.assertEqual(direct, direct2) self.assertFalse(direct != direct2) + def test_validate_boolean(self): + self.db.test.update_one({}, {"$set": {"total": 1}}, upsert=True) + with self.assertRaisesRegex( + TypeError, "upsert must be True or False, was: upsert={'upsert': True}" + ): + self.db.test.update_one({}, {"$set": {"total": 1}}, {"upsert": True}) # type: ignore + if __name__ == "__main__": unittest.main() diff --git a/test/test_connections_survive_primary_stepdown_spec.py b/test/test_connections_survive_primary_stepdown_spec.py index 4a63e0e239..ef8500ae6a 100644 --- a/test/test_connections_survive_primary_stepdown_spec.py +++ b/test/test_connections_survive_primary_stepdown_spec.py @@ -13,43 +13,52 @@ # limitations under the License. """Test compliance with the connections survive primary step down spec.""" +from __future__ import annotations import sys sys.path[0:0] = [""] +from test import IntegrationTest, client_context, unittest +from test.utils import ( + CMAPListener, + ensure_all_connected, + repl_set_step_down, + rs_or_single_client, +) + from bson import SON from pymongo import monitoring -from pymongo.errors import NotMasterError +from pymongo.collection import Collection +from pymongo.errors import NotPrimaryError from pymongo.write_concern import WriteConcern -from test import (client_context, - unittest, - IntegrationTest) -from test.utils import (CMAPListener, - ensure_all_connected, - rs_or_single_client) - class TestConnectionsSurvivePrimaryStepDown(IntegrationTest): + listener: CMAPListener + coll: Collection + @classmethod @client_context.require_replica_set def setUpClass(cls): - super(TestConnectionsSurvivePrimaryStepDown, cls).setUpClass() + super().setUpClass() cls.listener = CMAPListener() - cls.client = rs_or_single_client(event_listeners=[cls.listener], - retryWrites=False) + cls.client = rs_or_single_client( + event_listeners=[cls.listener], retryWrites=False, heartbeatFrequencyMS=500 + ) # Ensure connections to all servers in replica set. This is to test - # that the is_writable flag is properly updated for sockets that + # that the is_writable flag is properly updated for connections that # survive a replica set election. ensure_all_connected(cls.client) cls.listener.reset() - cls.db = cls.client.get_database( - "step-down", write_concern=WriteConcern("majority")) - cls.coll = cls.db.get_collection( - "step-down", write_concern=WriteConcern("majority")) + cls.db = cls.client.get_database("step-down", write_concern=WriteConcern("majority")) + cls.coll = cls.db.get_collection("step-down", write_concern=WriteConcern("majority")) + + @classmethod + def tearDownClass(cls): + cls.client.close() def setUp(self): # Note that all ops use same write-concern as self.db (majority). @@ -63,36 +72,32 @@ def set_fail_point(self, command_args): self.client.admin.command(cmd) def verify_pool_cleared(self): - self.assertEqual( - self.listener.event_count(monitoring.PoolClearedEvent), 1) + self.assertEqual(self.listener.event_count(monitoring.PoolClearedEvent), 1) def verify_pool_not_cleared(self): - self.assertEqual( - self.listener.event_count(monitoring.PoolClearedEvent), 0) + self.assertEqual(self.listener.event_count(monitoring.PoolClearedEvent), 0) @client_context.require_version_min(4, 2, -1) def test_get_more_iteration(self): # Insert 5 documents with WC majority. - self.coll.insert_many([{'data': k} for k in range(5)]) + self.coll.insert_many([{"data": k} for k in range(5)]) # Start a find operation and retrieve first batch of results. batch_size = 2 cursor = self.coll.find(batch_size=batch_size) for _ in range(batch_size): cursor.next() # Force step-down the primary. - res = self.client.admin.command( - SON([("replSetStepDown", 5), ("force", True)])) - self.assertEqual(res["ok"], 1.0) + repl_set_step_down(self.client, replSetStepDown=5, force=True) # Get next batch of results. for _ in range(batch_size): cursor.next() # Verify pool not cleared. self.verify_pool_not_cleared() # Attempt insertion to mark server description as stale and prevent a - # notMaster error on the subsequent operation. + # NotPrimaryError on the subsequent operation. try: self.coll.insert_one({}) - except NotMasterError: + except NotPrimaryError: pass # Next insert should succeed on the new primary without clearing pool. self.coll.insert_one({}) @@ -100,14 +105,14 @@ def test_get_more_iteration(self): def run_scenario(self, error_code, retry, pool_status_checker): # Set fail point. - self.set_fail_point({"mode": {"times": 1}, - "data": {"failCommands": ["insert"], - "errorCode": error_code}}) + self.set_fail_point( + {"mode": {"times": 1}, "data": {"failCommands": ["insert"], "errorCode": error_code}} + ) self.addCleanup(self.set_fail_point, {"mode": "off"}) # Insert record and verify failure. - with self.assertRaises(NotMasterError) as exc: + with self.assertRaises(NotPrimaryError) as exc: self.coll.insert_one({"test": 1}) - self.assertEqual(exc.exception.details['code'], error_code) + self.assertEqual(exc.exception.details["code"], error_code) # type: ignore[call-overload] # Retry before CMAPListener assertion if retry_before=True. if retry: self.coll.insert_one({"test": 1}) @@ -118,13 +123,13 @@ def run_scenario(self, error_code, retry, pool_status_checker): @client_context.require_version_min(4, 2, -1) @client_context.require_test_commands - def test_not_master_keep_connection_pool(self): + def test_not_primary_keep_connection_pool(self): self.run_scenario(10107, True, self.verify_pool_not_cleared) @client_context.require_version_min(4, 0, 0) @client_context.require_version_max(4, 1, 0, -1) @client_context.require_test_commands - def test_not_master_reset_connection_pool(self): + def test_not_primary_reset_connection_pool(self): self.run_scenario(10107, False, self.verify_pool_cleared) @client_context.require_version_min(4, 0, 0) diff --git a/test/test_create_entities.py b/test/test_create_entities.py new file mode 100644 index 0000000000..b7965d4a1d --- /dev/null +++ b/test/test_create_entities.py @@ -0,0 +1,126 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import sys +import unittest + +sys.path[0:0] = [""] + +from test import IntegrationTest +from test.unified_format import UnifiedSpecTestMixinV1 + + +class TestCreateEntities(IntegrationTest): + def test_store_events_as_entities(self): + self.scenario_runner = UnifiedSpecTestMixinV1() + spec = { + "description": "blank", + "schemaVersion": "1.2", + "createEntities": [ + { + "client": { + "id": "client0", + "storeEventsAsEntities": [ + { + "id": "events1", + "events": [ + "PoolCreatedEvent", + ], + } + ], + } + }, + ], + "tests": [{"description": "foo", "operations": []}], + } + self.scenario_runner.TEST_SPEC = spec + self.scenario_runner.setUp() + self.scenario_runner.run_scenario(spec["tests"][0]) + self.scenario_runner.entity_map["client0"].close() + final_entity_map = self.scenario_runner.entity_map + self.assertIn("events1", final_entity_map) + self.assertGreater(len(final_entity_map["events1"]), 0) + for event in final_entity_map["events1"]: + self.assertIn("PoolCreatedEvent", event["name"]) + + def test_store_all_others_as_entities(self): + self.scenario_runner = UnifiedSpecTestMixinV1() + spec = { + "description": "Find", + "schemaVersion": "1.2", + "createEntities": [ + { + "client": { + "id": "client0", + "uriOptions": {"retryReads": True}, + } + }, + {"database": {"id": "database0", "client": "client0", "databaseName": "dat"}}, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "dat", + } + }, + ], + "tests": [ + { + "description": "test loops", + "operations": [ + { + "name": "loop", + "object": "testRunner", + "arguments": { + "storeIterationsAsEntity": "iterations", + "storeSuccessesAsEntity": "successes", + "storeFailuresAsEntity": "failures", + "storeErrorsAsEntity": "errors", + "numIterations": 5, + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": {"document": {"_id": 1, "x": 44}}, + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": {"document": {"_id": 2, "x": 44}}, + }, + ], + }, + } + ], + } + ], + } + + self.client.dat.dat.delete_many({}) + self.scenario_runner.TEST_SPEC = spec + self.scenario_runner.setUp() + self.scenario_runner.run_scenario(spec["tests"][0]) + self.scenario_runner.entity_map["client0"].close() + entity_map = self.scenario_runner.entity_map + self.assertEqual(len(entity_map["errors"]), 4) + for error in entity_map["errors"]: + self.assertEqual(error["type"], "DuplicateKeyError") + self.assertEqual(entity_map["failures"], []) + self.assertEqual(entity_map["successes"], 2) + self.assertEqual(entity_map["iterations"], 5) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_crud_unified.py b/test/test_crud_unified.py new file mode 100644 index 0000000000..92a60a47fc --- /dev/null +++ b/test/test_crud_unified.py @@ -0,0 +1,33 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the CRUD unified spec tests.""" +from __future__ import annotations + +import os +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.unified_format import generate_test_classes + +# Location of JSON test specifications. +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "crud", "unified") + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__, RUN_ON_SERVERLESS=True)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_crud_v1.py b/test/test_crud_v1.py index 48df16df90..c9f8dbe4b4 100644 --- a/test/test_crud_v1.py +++ b/test/test_crud_v1.py @@ -13,50 +13,53 @@ # limitations under the License. """Test the collection module.""" +from __future__ import annotations -import json import os -import re import sys sys.path[0:0] = [""] -from bson.py3compat import iteritems -from pymongo import operations, WriteConcern +from test import IntegrationTest, unittest +from test.utils import ( + SpecTestCreator, + camel_to_snake, + camel_to_snake_args, + camel_to_upper_camel, + drop_collections, +) + +from pymongo import WriteConcern, operations from pymongo.command_cursor import CommandCursor from pymongo.cursor import Cursor from pymongo.errors import PyMongoError +from pymongo.operations import ( + DeleteMany, + DeleteOne, + InsertOne, + ReplaceOne, + UpdateMany, + UpdateOne, +) from pymongo.read_concern import ReadConcern -from pymongo.results import _WriteResult, BulkWriteResult -from pymongo.operations import (InsertOne, - DeleteOne, - DeleteMany, - ReplaceOne, - UpdateOne, - UpdateMany) - -from test import unittest, client_context, IntegrationTest -from test.utils import (camel_to_snake, camel_to_upper_camel, - camel_to_snake_args, drop_collections, TestCreator) +from pymongo.results import BulkWriteResult, _WriteResult # Location of JSON test specifications. -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'crud', 'v1') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "crud", "v1") class TestAllScenarios(IntegrationTest): - pass + RUN_ON_SERVERLESS = True def check_result(self, expected_result, result): if isinstance(result, _WriteResult): for res in expected_result: prop = camel_to_snake(res) - msg = "%s : %r != %r" % (prop, expected_result, result) + msg = f"{prop} : {expected_result!r} != {result!r}" # SPEC-869: Only BulkWriteResult has upserted_count. - if (prop == "upserted_count" - and not isinstance(result, BulkWriteResult)): - if result.upserted_id is not None: + if prop == "upserted_count" and not isinstance(result, BulkWriteResult): + if result.upserted_id is not None: # type: ignore upserted_count = 1 else: upserted_count = 0 @@ -64,25 +67,23 @@ def check_result(self, expected_result, result): elif prop == "inserted_ids": # BulkWriteResult does not have inserted_ids. if isinstance(result, BulkWriteResult): - self.assertEqual(len(expected_result[res]), - result.inserted_count) + self.assertEqual(len(expected_result[res]), result.inserted_count) else: # InsertManyResult may be compared to [id1] from the # crud spec or {"0": id1} from the retryable write spec. ids = expected_result[res] if isinstance(ids, dict): ids = [ids[str(i)] for i in range(len(ids))] - self.assertEqual(ids, result.inserted_ids, msg) + self.assertEqual(ids, result.inserted_ids, msg) # type: ignore elif prop == "upserted_ids": # Convert indexes from strings to integers. ids = expected_result[res] expected_ids = {} for str_index in ids: expected_ids[int(str_index)] = ids[str_index] - self.assertEqual(expected_ids, result.upserted_ids, msg) + self.assertEqual(expected_ids, result.upserted_ids, msg) # type: ignore else: - self.assertEqual( - getattr(result, prop), expected_result[res], msg) + self.assertEqual(getattr(result, prop), expected_result[res], msg) else: self.assertEqual(result, expected_result) @@ -90,14 +91,16 @@ def check_result(self, expected_result, result): def run_operation(collection, test): # Convert command from CamelCase to pymongo.collection method. - operation = camel_to_snake(test['operation']['name']) + operation = camel_to_snake(test["operation"]["name"]) cmd = getattr(collection, operation) # Convert arguments to snake_case and handle special cases. - arguments = test['operation']['arguments'] + arguments = test["operation"]["arguments"] options = arguments.pop("options", {}) for option_name in options: arguments[camel_to_snake(option_name)] = options[option_name] + if operation == "count": + raise unittest.SkipTest("PyMongo does not support count") if operation == "bulk_write": # Parse each request into a bulk write model. requests = [] @@ -113,7 +116,7 @@ def run_operation(collection, test): # PyMongo accepts sort as list of tuples. if arg_name == "sort": sort_dict = arguments[arg_name] - arguments[arg_name] = list(iteritems(sort_dict)) + arguments[arg_name] = list(sort_dict.items()) # Named "key" instead not fieldName. if arg_name == "fieldName": arguments["key"] = arguments.pop(arg_name) @@ -128,11 +131,6 @@ def run_operation(collection, test): result = cmd(**arguments) - if operation == "aggregate": - if arguments["pipeline"] and "$out" in arguments["pipeline"][-1]: - out = collection.database[arguments["pipeline"][-1]["$out"]] - result = out.find() - if isinstance(result, Cursor) or isinstance(result, CommandCursor): return list(result) @@ -143,90 +141,135 @@ def create_test(scenario_def, test, name): def run_scenario(self): # Cleanup state and load data (if provided). drop_collections(self.db) - data = scenario_def.get('data') + data = scenario_def.get("data") if data: - self.db.test.with_options( - write_concern=WriteConcern(w="majority")).insert_many( - scenario_def['data']) + self.db.test.with_options(write_concern=WriteConcern(w="majority")).insert_many( + scenario_def["data"] + ) # Run operations and check results or errors. - expected_result = test.get('outcome', {}).get('result') - expected_error = test.get('outcome', {}).get('error') + expected_result = test.get("outcome", {}).get("result") + expected_error = test.get("outcome", {}).get("error") if expected_error is True: with self.assertRaises(PyMongoError): run_operation(self.db.test, test) else: result = run_operation(self.db.test, test) - check_result(self, expected_result, result) + if expected_result is not None: + check_result(self, expected_result, result) # Assert final state is expected. - expected_c = test['outcome'].get('collection') + expected_c = test["outcome"].get("collection") if expected_c is not None: - expected_name = expected_c.get('name') + expected_name = expected_c.get("name") if expected_name is not None: db_coll = self.db[expected_name] else: db_coll = self.db.test - db_coll = db_coll.with_options( - read_concern=ReadConcern(level="local")) - self.assertEqual(list(db_coll.find()), expected_c['data']) + db_coll = db_coll.with_options(read_concern=ReadConcern(level="local")) + self.assertEqual(list(db_coll.find()), expected_c["data"]) return run_scenario -test_creator = TestCreator(create_test, TestAllScenarios, _TEST_PATH) +test_creator = SpecTestCreator(create_test, TestAllScenarios, _TEST_PATH) test_creator.create_tests() class TestWriteOpsComparison(unittest.TestCase): def test_InsertOneEquals(self): - self.assertEqual(InsertOne({'foo': 42}), InsertOne({'foo': 42})) + self.assertEqual(InsertOne({"foo": 42}), InsertOne({"foo": 42})) def test_InsertOneNotEquals(self): - self.assertNotEqual(InsertOne({'foo': 42}), InsertOne({'foo': 23})) + self.assertNotEqual(InsertOne({"foo": 42}), InsertOne({"foo": 23})) def test_DeleteOneEquals(self): - self.assertEqual(DeleteOne({'foo': 42}), DeleteOne({'foo': 42})) + self.assertEqual(DeleteOne({"foo": 42}), DeleteOne({"foo": 42})) + self.assertEqual( + DeleteOne({"foo": 42}, {"locale": "en_US"}), DeleteOne({"foo": 42}, {"locale": "en_US"}) + ) + self.assertEqual( + DeleteOne({"foo": 42}, {"locale": "en_US"}, {"hint": 1}), + DeleteOne({"foo": 42}, {"locale": "en_US"}, {"hint": 1}), + ) def test_DeleteOneNotEquals(self): - self.assertNotEqual(DeleteOne({'foo': 42}), DeleteOne({'foo': 23})) + self.assertNotEqual(DeleteOne({"foo": 42}), DeleteOne({"foo": 23})) + self.assertNotEqual( + DeleteOne({"foo": 42}, {"locale": "en_US"}), DeleteOne({"foo": 42}, {"locale": "en_GB"}) + ) + self.assertNotEqual( + DeleteOne({"foo": 42}, {"locale": "en_US"}, {"hint": 1}), + DeleteOne({"foo": 42}, {"locale": "en_US"}, {"hint": 2}), + ) def test_DeleteManyEquals(self): - self.assertEqual(DeleteMany({'foo': 42}), DeleteMany({'foo': 42})) + self.assertEqual(DeleteMany({"foo": 42}), DeleteMany({"foo": 42})) + self.assertEqual( + DeleteMany({"foo": 42}, {"locale": "en_US"}), + DeleteMany({"foo": 42}, {"locale": "en_US"}), + ) + self.assertEqual( + DeleteMany({"foo": 42}, {"locale": "en_US"}, {"hint": 1}), + DeleteMany({"foo": 42}, {"locale": "en_US"}, {"hint": 1}), + ) def test_DeleteManyNotEquals(self): - self.assertNotEqual(DeleteMany({'foo': 42}), DeleteMany({'foo': 23})) + self.assertNotEqual(DeleteMany({"foo": 42}), DeleteMany({"foo": 23})) + self.assertNotEqual( + DeleteMany({"foo": 42}, {"locale": "en_US"}), + DeleteMany({"foo": 42}, {"locale": "en_GB"}), + ) + self.assertNotEqual( + DeleteMany({"foo": 42}, {"locale": "en_US"}, {"hint": 1}), + DeleteMany({"foo": 42}, {"locale": "en_US"}, {"hint": 2}), + ) def test_DeleteOneNotEqualsDeleteMany(self): - self.assertNotEqual(DeleteOne({'foo': 42}), DeleteMany({'foo': 42})) + self.assertNotEqual(DeleteOne({"foo": 42}), DeleteMany({"foo": 42})) def test_ReplaceOneEquals(self): - self.assertEqual(ReplaceOne({'foo': 42}, {'bar': 42}, upsert=False), - ReplaceOne({'foo': 42}, {'bar': 42}, upsert=False)) + self.assertEqual( + ReplaceOne({"foo": 42}, {"bar": 42}, upsert=False), + ReplaceOne({"foo": 42}, {"bar": 42}, upsert=False), + ) def test_ReplaceOneNotEquals(self): - self.assertNotEqual(ReplaceOne({'foo': 42}, {'bar': 42}, upsert=False), - ReplaceOne({'foo': 42}, {'bar': 42}, upsert=True)) + self.assertNotEqual( + ReplaceOne({"foo": 42}, {"bar": 42}, upsert=False), + ReplaceOne({"foo": 42}, {"bar": 42}, upsert=True), + ) def test_UpdateOneEquals(self): - self.assertEqual(UpdateOne({'foo': 42}, {'$set': {'bar': 42}}), - UpdateOne({'foo': 42}, {'$set': {'bar': 42}})) + self.assertEqual( + UpdateOne({"foo": 42}, {"$set": {"bar": 42}}), + UpdateOne({"foo": 42}, {"$set": {"bar": 42}}), + ) def test_UpdateOneNotEquals(self): - self.assertNotEqual(UpdateOne({'foo': 42}, {'$set': {'bar': 42}}), - UpdateOne({'foo': 42}, {'$set': {'bar': 23}})) + self.assertNotEqual( + UpdateOne({"foo": 42}, {"$set": {"bar": 42}}), + UpdateOne({"foo": 42}, {"$set": {"bar": 23}}), + ) def test_UpdateManyEquals(self): - self.assertEqual(UpdateMany({'foo': 42}, {'$set': {'bar': 42}}), - UpdateMany({'foo': 42}, {'$set': {'bar': 42}})) + self.assertEqual( + UpdateMany({"foo": 42}, {"$set": {"bar": 42}}), + UpdateMany({"foo": 42}, {"$set": {"bar": 42}}), + ) def test_UpdateManyNotEquals(self): - self.assertNotEqual(UpdateMany({'foo': 42}, {'$set': {'bar': 42}}), - UpdateMany({'foo': 42}, {'$set': {'bar': 23}})) + self.assertNotEqual( + UpdateMany({"foo": 42}, {"$set": {"bar": 42}}), + UpdateMany({"foo": 42}, {"$set": {"bar": 23}}), + ) def test_UpdateOneNotEqualsUpdateMany(self): - self.assertNotEqual(UpdateOne({'foo': 42}, {'$set': {'bar': 42}}), - UpdateMany({'foo': 42}, {'$set': {'bar': 42}})) + self.assertNotEqual( + UpdateOne({"foo": 42}, {"$set": {"bar": 42}}), + UpdateMany({"foo": 42}, {"$set": {"bar": 42}}), + ) + if __name__ == "__main__": unittest.main() diff --git a/test/test_csot.py b/test/test_csot.py new file mode 100644 index 0000000000..e8ee92d4a6 --- /dev/null +++ b/test/test_csot.py @@ -0,0 +1,107 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the CSOT unified spec tests.""" +from __future__ import annotations + +import os +import sys + +sys.path[0:0] = [""] + +from test import IntegrationTest, client_context, unittest +from test.unified_format import generate_test_classes + +import pymongo +from pymongo import _csot +from pymongo.errors import PyMongoError + +# Location of JSON test specifications. +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "csot") + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + + +class TestCSOT(IntegrationTest): + RUN_ON_SERVERLESS = True + RUN_ON_LOAD_BALANCER = True + + def test_timeout_nested(self): + coll = self.db.coll + self.assertEqual(_csot.get_timeout(), None) + self.assertEqual(_csot.get_deadline(), float("inf")) + self.assertEqual(_csot.get_rtt(), 0.0) + with pymongo.timeout(10): + coll.find_one() + self.assertEqual(_csot.get_timeout(), 10) + deadline_10 = _csot.get_deadline() + + # Capped at the original 10 deadline. + with pymongo.timeout(15): + coll.find_one() + self.assertEqual(_csot.get_timeout(), 15) + self.assertEqual(_csot.get_deadline(), deadline_10) + + # Should be reset to previous values + self.assertEqual(_csot.get_timeout(), 10) + self.assertEqual(_csot.get_deadline(), deadline_10) + coll.find_one() + + with pymongo.timeout(5): + coll.find_one() + self.assertEqual(_csot.get_timeout(), 5) + self.assertLess(_csot.get_deadline(), deadline_10) + + # Should be reset to previous values + self.assertEqual(_csot.get_timeout(), 10) + self.assertEqual(_csot.get_deadline(), deadline_10) + coll.find_one() + + # Should be reset to previous values + self.assertEqual(_csot.get_timeout(), None) + self.assertEqual(_csot.get_deadline(), float("inf")) + self.assertEqual(_csot.get_rtt(), 0.0) + + @client_context.require_change_streams + def test_change_stream_can_resume_after_timeouts(self): + coll = self.db.test + with coll.watch() as stream: + with pymongo.timeout(0.1): + with self.assertRaises(PyMongoError) as ctx: + stream.next() + self.assertTrue(ctx.exception.timeout) + self.assertTrue(stream.alive) + with self.assertRaises(PyMongoError) as ctx: + stream.try_next() + self.assertTrue(ctx.exception.timeout) + self.assertTrue(stream.alive) + # Resume before the insert on 3.6 because 4.0 is required to avoid skipping documents + if client_context.version < (4, 0): + stream.try_next() + coll.insert_one({}) + with pymongo.timeout(10): + self.assertTrue(stream.next()) + self.assertTrue(stream.alive) + # Timeout applies to entire next() call, not only individual commands. + with pymongo.timeout(0.5): + with self.assertRaises(PyMongoError) as ctx: + stream.next() + self.assertTrue(ctx.exception.timeout) + self.assertTrue(stream.alive) + self.assertFalse(stream.alive) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_cursor.py b/test/test_cursor.py index ca19ebb28b..7b10c2cac6 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -13,56 +13,57 @@ # limitations under the License. """Test the cursor module.""" +from __future__ import annotations + import copy import gc import itertools import random import re import sys -import time import threading -import warnings +import time + +import pymongo sys.path[0:0] = [""] +from test import IntegrationTest, client_context, unittest +from test.utils import ( + AllowListEventListener, + EventListener, + OvertCommandListener, + ignore_deprecations, + rs_or_single_client, + wait_until, +) + from bson import decode_all from bson.code import Code -from bson.py3compat import PY3 from bson.son import SON -from pymongo import (ASCENDING, - DESCENDING, - ALL, - OFF) +from pymongo import ASCENDING, DESCENDING from pymongo.collation import Collation from pymongo.cursor import Cursor, CursorType -from pymongo.errors import (ConfigurationError, - ExecutionTimeout, - InvalidOperation, - OperationFailure) +from pymongo.errors import ExecutionTimeout, InvalidOperation, OperationFailure +from pymongo.operations import _IndexList from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference -from test import (client_context, - unittest, - IntegrationTest) -from test.utils import (EventListener, - ignore_deprecations, - rs_or_single_client, - WhiteListEventListener) - -if PY3: - long = int +from pymongo.write_concern import WriteConcern class TestCursor(IntegrationTest): def test_deepcopy_cursor_littered_with_regexes(self): - cursor = self.db.test.find({ - "x": re.compile("^hmmm.*"), - "y": [re.compile("^hmm.*")], - "z": {"a": [re.compile("^hm.*")]}, - re.compile("^key.*"): {"a": [re.compile("^hm.*")]}}) + cursor = self.db.test.find( + { + "x": re.compile("^hmmm.*"), + "y": [re.compile("^hmm.*")], + "z": {"a": [re.compile("^hm.*")]}, + re.compile("^key.*"): {"a": [re.compile("^hm.*")]}, + } + ) cursor2 = copy.deepcopy(cursor) - self.assertEqual(cursor._Cursor__spec, cursor2._Cursor__spec) + self.assertEqual(cursor._Cursor__spec, cursor2._Cursor__spec) # type: ignore def test_add_remove_option(self): cursor = self.db.test.find() @@ -70,19 +71,15 @@ def test_add_remove_option(self): cursor.add_option(2) cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE) self.assertEqual(2, cursor2._Cursor__query_flags) - self.assertEqual(cursor._Cursor__query_flags, - cursor2._Cursor__query_flags) + self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) cursor.add_option(32) cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT) self.assertEqual(34, cursor2._Cursor__query_flags) - self.assertEqual(cursor._Cursor__query_flags, - cursor2._Cursor__query_flags) + self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) cursor.add_option(128) - cursor2 = self.db.test.find( - cursor_type=CursorType.TAILABLE_AWAIT).add_option(128) + cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT).add_option(128) self.assertEqual(162, cursor2._Cursor__query_flags) - self.assertEqual(cursor._Cursor__query_flags, - cursor2._Cursor__query_flags) + self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) self.assertEqual(162, cursor._Cursor__query_flags) cursor.add_option(128) @@ -91,13 +88,11 @@ def test_add_remove_option(self): cursor.remove_option(128) cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT) self.assertEqual(34, cursor2._Cursor__query_flags) - self.assertEqual(cursor._Cursor__query_flags, - cursor2._Cursor__query_flags) + self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) cursor.remove_option(32) cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE) self.assertEqual(2, cursor2._Cursor__query_flags) - self.assertEqual(cursor._Cursor__query_flags, - cursor2._Cursor__query_flags) + self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) self.assertEqual(2, cursor._Cursor__query_flags) cursor.remove_option(32) @@ -107,8 +102,7 @@ def test_add_remove_option(self): cursor = self.db.test.find(no_cursor_timeout=True) self.assertEqual(16, cursor._Cursor__query_flags) cursor2 = self.db.test.find().add_option(16) - self.assertEqual(cursor._Cursor__query_flags, - cursor2._Cursor__query_flags) + self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) cursor.remove_option(16) self.assertEqual(0, cursor._Cursor__query_flags) @@ -116,8 +110,7 @@ def test_add_remove_option(self): cursor = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT) self.assertEqual(34, cursor._Cursor__query_flags) cursor2 = self.db.test.find().add_option(34) - self.assertEqual(cursor._Cursor__query_flags, - cursor2._Cursor__query_flags) + self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) cursor.remove_option(32) self.assertEqual(2, cursor._Cursor__query_flags) @@ -125,8 +118,7 @@ def test_add_remove_option(self): cursor = self.db.test.find(allow_partial_results=True) self.assertEqual(128, cursor._Cursor__query_flags) cursor2 = self.db.test.find().add_option(128) - self.assertEqual(cursor._Cursor__query_flags, - cursor2._Cursor__query_flags) + self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) cursor.remove_option(128) self.assertEqual(0, cursor._Cursor__query_flags) @@ -139,44 +131,52 @@ def test_add_remove_option_exhaust(self): cursor = self.db.test.find(cursor_type=CursorType.EXHAUST) self.assertEqual(64, cursor._Cursor__query_flags) cursor2 = self.db.test.find().add_option(64) - self.assertEqual(cursor._Cursor__query_flags, - cursor2._Cursor__query_flags) + self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) self.assertTrue(cursor._Cursor__exhaust) cursor.remove_option(64) self.assertEqual(0, cursor._Cursor__query_flags) self.assertFalse(cursor._Cursor__exhaust) + def test_allow_disk_use(self): + db = self.db + db.pymongo_test.drop() + coll = db.pymongo_test + + self.assertRaises(TypeError, coll.find().allow_disk_use, "baz") + + cursor = coll.find().allow_disk_use(True) + self.assertEqual(True, cursor._Cursor__allow_disk_use) # type: ignore + cursor = coll.find().allow_disk_use(False) + self.assertEqual(False, cursor._Cursor__allow_disk_use) # type: ignore + def test_max_time_ms(self): db = self.db db.pymongo_test.drop() coll = db.pymongo_test - self.assertRaises(TypeError, coll.find().max_time_ms, 'foo') + self.assertRaises(TypeError, coll.find().max_time_ms, "foo") coll.insert_one({"amalia": 1}) coll.insert_one({"amalia": 2}) coll.find().max_time_ms(None) - coll.find().max_time_ms(long(1)) + coll.find().max_time_ms(1) cursor = coll.find().max_time_ms(999) - self.assertEqual(999, cursor._Cursor__max_time_ms) + self.assertEqual(999, cursor._Cursor__max_time_ms) # type: ignore cursor = coll.find().max_time_ms(10).max_time_ms(1000) - self.assertEqual(1000, cursor._Cursor__max_time_ms) + self.assertEqual(1000, cursor._Cursor__max_time_ms) # type: ignore cursor = coll.find().max_time_ms(999) c2 = cursor.clone() - self.assertEqual(999, c2._Cursor__max_time_ms) - self.assertTrue("$maxTimeMS" in cursor._Cursor__query_spec()) - self.assertTrue("$maxTimeMS" in c2._Cursor__query_spec()) + self.assertEqual(999, c2._Cursor__max_time_ms) # type: ignore + self.assertTrue("$maxTimeMS" in cursor._Cursor__query_spec()) # type: ignore + self.assertTrue("$maxTimeMS" in c2._Cursor__query_spec()) # type: ignore self.assertTrue(coll.find_one(max_time_ms=1000)) client = self.client - if (not client_context.is_mongos - and client_context.test_commands_enabled): + if not client_context.is_mongos and client_context.test_commands_enabled: # Cursor parses server timeout error in response to initial query. - client.admin.command("configureFailPoint", - "maxTimeAlwaysTimeOut", - mode="alwaysOn") + client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="alwaysOn") try: cursor = coll.find().max_time_ms(1) try: @@ -185,25 +185,21 @@ def test_max_time_ms(self): pass else: self.fail("ExecutionTimeout not raised") - self.assertRaises(ExecutionTimeout, - coll.find_one, max_time_ms=1) + self.assertRaises(ExecutionTimeout, coll.find_one, max_time_ms=1) finally: - client.admin.command("configureFailPoint", - "maxTimeAlwaysTimeOut", - mode="off") + client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="off") - @client_context.require_version_min(3, 1, 9, -1) def test_max_await_time_ms(self): db = self.db db.pymongo_test.drop() coll = db.create_collection("pymongo_test", capped=True, size=4096) - self.assertRaises(TypeError, coll.find().max_await_time_ms, 'foo') + self.assertRaises(TypeError, coll.find().max_await_time_ms, "foo") coll.insert_one({"amalia": 1}) coll.insert_one({"amalia": 2}) coll.find().max_await_time_ms(None) - coll.find().max_await_time_ms(long(1)) + coll.find().max_await_time_ms(1) # When cursor is not tailable_await cursor = coll.find() @@ -216,95 +212,90 @@ def test_max_await_time_ms(self): self.assertEqual(None, cursor._Cursor__max_await_time_ms) # If cursor is tailable_await and timeout is set - cursor = coll.find( - cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms(99) + cursor = coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms(99) self.assertEqual(99, cursor._Cursor__max_await_time_ms) - cursor = coll.find( - cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms( - 10).max_await_time_ms(90) + cursor = ( + coll.find(cursor_type=CursorType.TAILABLE_AWAIT) + .max_await_time_ms(10) + .max_await_time_ms(90) + ) self.assertEqual(90, cursor._Cursor__max_await_time_ms) - listener = WhiteListEventListener('find', 'getMore') - coll = rs_or_single_client( - event_listeners=[listener])[self.db.name].pymongo_test - results = listener.results + listener = AllowListEventListener("find", "getMore") + coll = rs_or_single_client(event_listeners=[listener])[self.db.name].pymongo_test # Tailable_await defaults. list(coll.find(cursor_type=CursorType.TAILABLE_AWAIT)) # find - self.assertFalse('maxTimeMS' in results['started'][0].command) + self.assertFalse("maxTimeMS" in listener.started_events[0].command) # getMore - self.assertFalse('maxTimeMS' in results['started'][1].command) - results.clear() + self.assertFalse("maxTimeMS" in listener.started_events[1].command) + listener.reset() # Tailable_await with max_await_time_ms set. - list(coll.find( - cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms(99)) + list(coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms(99)) # find - self.assertEqual('find', results['started'][0].command_name) - self.assertFalse('maxTimeMS' in results['started'][0].command) + self.assertEqual("find", listener.started_events[0].command_name) + self.assertFalse("maxTimeMS" in listener.started_events[0].command) # getMore - self.assertEqual('getMore', results['started'][1].command_name) - self.assertTrue('maxTimeMS' in results['started'][1].command) - self.assertEqual(99, results['started'][1].command['maxTimeMS']) - results.clear() + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertTrue("maxTimeMS" in listener.started_events[1].command) + self.assertEqual(99, listener.started_events[1].command["maxTimeMS"]) + listener.reset() # Tailable_await with max_time_ms - list(coll.find( - cursor_type=CursorType.TAILABLE_AWAIT).max_time_ms(99)) + list(coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_time_ms(99)) # find - self.assertEqual('find', results['started'][0].command_name) - self.assertTrue('maxTimeMS' in results['started'][0].command) - self.assertEqual(99, results['started'][0].command['maxTimeMS']) + self.assertEqual("find", listener.started_events[0].command_name) + self.assertTrue("maxTimeMS" in listener.started_events[0].command) + self.assertEqual(99, listener.started_events[0].command["maxTimeMS"]) # getMore - self.assertEqual('getMore', results['started'][1].command_name) - self.assertFalse('maxTimeMS' in results['started'][1].command) - results.clear() + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertFalse("maxTimeMS" in listener.started_events[1].command) + listener.reset() # Tailable_await with both max_time_ms and max_await_time_ms - list(coll.find( - cursor_type=CursorType.TAILABLE_AWAIT).max_time_ms( - 99).max_await_time_ms(99)) + list(coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_time_ms(99).max_await_time_ms(99)) # find - self.assertEqual('find', results['started'][0].command_name) - self.assertTrue('maxTimeMS' in results['started'][0].command) - self.assertEqual(99, results['started'][0].command['maxTimeMS']) + self.assertEqual("find", listener.started_events[0].command_name) + self.assertTrue("maxTimeMS" in listener.started_events[0].command) + self.assertEqual(99, listener.started_events[0].command["maxTimeMS"]) # getMore - self.assertEqual('getMore', results['started'][1].command_name) - self.assertTrue('maxTimeMS' in results['started'][1].command) - self.assertEqual(99, results['started'][1].command['maxTimeMS']) - results.clear() + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertTrue("maxTimeMS" in listener.started_events[1].command) + self.assertEqual(99, listener.started_events[1].command["maxTimeMS"]) + listener.reset() # Non tailable_await with max_await_time_ms list(coll.find(batch_size=1).max_await_time_ms(99)) # find - self.assertEqual('find', results['started'][0].command_name) - self.assertFalse('maxTimeMS' in results['started'][0].command) + self.assertEqual("find", listener.started_events[0].command_name) + self.assertFalse("maxTimeMS" in listener.started_events[0].command) # getMore - self.assertEqual('getMore', results['started'][1].command_name) - self.assertFalse('maxTimeMS' in results['started'][1].command) - results.clear() + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertFalse("maxTimeMS" in listener.started_events[1].command) + listener.reset() # Non tailable_await with max_time_ms list(coll.find(batch_size=1).max_time_ms(99)) # find - self.assertEqual('find', results['started'][0].command_name) - self.assertTrue('maxTimeMS' in results['started'][0].command) - self.assertEqual(99, results['started'][0].command['maxTimeMS']) + self.assertEqual("find", listener.started_events[0].command_name) + self.assertTrue("maxTimeMS" in listener.started_events[0].command) + self.assertEqual(99, listener.started_events[0].command["maxTimeMS"]) # getMore - self.assertEqual('getMore', results['started'][1].command_name) - self.assertFalse('maxTimeMS' in results['started'][1].command) + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertFalse("maxTimeMS" in listener.started_events[1].command) # Non tailable_await with both max_time_ms and max_await_time_ms list(coll.find(batch_size=1).max_time_ms(99).max_await_time_ms(88)) # find - self.assertEqual('find', results['started'][0].command_name) - self.assertTrue('maxTimeMS' in results['started'][0].command) - self.assertEqual(99, results['started'][0].command['maxTimeMS']) + self.assertEqual("find", listener.started_events[0].command_name) + self.assertTrue("maxTimeMS" in listener.started_events[0].command) + self.assertEqual(99, listener.started_events[0].command["maxTimeMS"]) # getMore - self.assertEqual('getMore', results['started'][1].command_name) - self.assertFalse('maxTimeMS' in results['started'][1].command) + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertFalse("maxTimeMS" in listener.started_events[1].command) @client_context.require_test_commands @client_context.require_no_mongos @@ -316,9 +307,7 @@ def test_max_time_ms_getmore(self): # Send initial query before turning on failpoint. next(cursor) - self.client.admin.command("configureFailPoint", - "maxTimeAlwaysTimeOut", - mode="alwaysOn") + self.client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="alwaysOn") try: try: # Iterate up to first getmore. @@ -328,9 +317,7 @@ def test_max_time_ms_getmore(self): else: self.fail("ExecutionTimeout not raised") finally: - self.client.admin.command("configureFailPoint", - "maxTimeAlwaysTimeOut", - mode="off") + self.client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="off") def test_explain(self): a = self.db.test.find() @@ -338,18 +325,16 @@ def test_explain(self): for _ in a: break b = a.explain() - # "cursor" pre MongoDB 2.7.6, "executionStats" post - self.assertTrue("cursor" in b or "executionStats" in b) + self.assertIn("executionStats", b) def test_explain_with_read_concern(self): # Do not add readConcern level to explain. - listener = WhiteListEventListener("explain") + listener = AllowListEventListener("explain") client = rs_or_single_client(event_listeners=[listener]) self.addCleanup(client.close) - coll = client.pymongo_test.test.with_options( - read_concern=ReadConcern(level="local")) + coll = client.pymongo_test.test.with_options(read_concern=ReadConcern(level="local")) self.assertTrue(coll.find().explain()) - started = listener.results['started'] + started = listener.started_events self.assertEqual(len(started), 1) self.assertNotIn("readConcern", started[0].command) @@ -360,23 +345,26 @@ def test_hint(self): db.test.insert_many([{"num": i, "foo": i} for i in range(100)]) - self.assertRaises(OperationFailure, - db.test.find({"num": 17, "foo": 17}) - .hint([("num", ASCENDING)]).explain) - self.assertRaises(OperationFailure, - db.test.find({"num": 17, "foo": 17}) - .hint([("foo", ASCENDING)]).explain) + self.assertRaises( + OperationFailure, + db.test.find({"num": 17, "foo": 17}).hint([("num", ASCENDING)]).explain, + ) + self.assertRaises( + OperationFailure, + db.test.find({"num": 17, "foo": 17}).hint([("foo", ASCENDING)]).explain, + ) spec = [("num", DESCENDING)] - index = db.test.create_index(spec) + _ = db.test.create_index(spec) first = next(db.test.find()) - self.assertEqual(0, first.get('num')) + self.assertEqual(0, first.get("num")) first = next(db.test.find().hint(spec)) - self.assertEqual(99, first.get('num')) - self.assertRaises(OperationFailure, - db.test.find({"num": 17, "foo": 17}) - .hint([("foo", ASCENDING)]).explain) + self.assertEqual(99, first.get("num")) + self.assertRaises( + OperationFailure, + db.test.find({"num": 17, "foo": 17}).hint([("foo", ASCENDING)]).explain, + ) a = db.test.find({"num": 17}) a.hint(spec) @@ -384,17 +372,32 @@ def test_hint(self): break self.assertRaises(InvalidOperation, a.hint, spec) + db.test.drop() + db.test.insert_many([{"num": i, "foo": i} for i in range(100)]) + spec: _IndexList = ["num", ("foo", DESCENDING)] + db.test.create_index(spec) + first = next(db.test.find().hint(spec)) + self.assertEqual(0, first.get("num")) + self.assertEqual(0, first.get("foo")) + + db.test.drop() + db.test.insert_many([{"num": i, "foo": i} for i in range(100)]) + spec = ["num"] + db.test.create_index(spec) + first = next(db.test.find().hint(spec)) + self.assertEqual(0, first.get("num")) + def test_hint_by_name(self): db = self.db db.test.drop() db.test.insert_many([{"i": i} for i in range(100)]) - db.test.create_index([('i', DESCENDING)], name='fooindex') + db.test.create_index([("i", DESCENDING)], name="fooindex") first = next(db.test.find()) - self.assertEqual(0, first.get('i')) - first = next(db.test.find().hint('fooindex')) - self.assertEqual(99, first.get('i')) + self.assertEqual(0, first.get("i")) + first = next(db.test.find().hint("fooindex")) + self.assertEqual(99, first.get("i")) def test_limit(self): db = self.db @@ -402,7 +405,7 @@ def test_limit(self): self.assertRaises(TypeError, db.test.find().limit, None) self.assertRaises(TypeError, db.test.find().limit, "hello") self.assertRaises(TypeError, db.test.find().limit, 5.5) - self.assertTrue(db.test.find().limit(long(5))) + self.assertTrue(db.test.find().limit(5)) db.test.drop() db.test.insert_many([{"x": i} for i in range(100)]) @@ -443,7 +446,6 @@ def test_limit(self): break self.assertRaises(InvalidOperation, a.limit, 5) - @ignore_deprecations # Ignore max without hint. def test_max(self): db = self.db db.test.drop() @@ -453,10 +455,7 @@ def test_max(self): db.test.insert_many([{"j": j, "k": j} for j in range(10)]) def find(max_spec, expected_index): - cursor = db.test.find().max(max_spec) - if client_context.requires_hint_with_min_max_queries: - cursor = cursor.hint(expected_index) - return cursor + return db.test.find().max(max_spec).hint(expected_index) cursor = find([("j", 3)], j_index) self.assertEqual(len(list(cursor)), 3) @@ -482,7 +481,6 @@ def find(max_spec, expected_index): self.assertRaises(TypeError, db.test.find().max, 10) self.assertRaises(TypeError, db.test.find().max, {"j": 10}) - @ignore_deprecations # Ignore min without hint. def test_min(self): db = self.db db.test.drop() @@ -492,10 +490,7 @@ def test_min(self): db.test.insert_many([{"j": j, "k": j} for j in range(10)]) def find(min_spec, expected_index): - cursor = db.test.find().min(min_spec) - if client_context.requires_hint_with_min_max_queries: - cursor = cursor.hint(expected_index) - return cursor + return db.test.find().min(min_spec).hint(expected_index) cursor = find([("j", 3)], j_index) self.assertEqual(len(list(cursor)), 7) @@ -521,23 +516,15 @@ def find(min_spec, expected_index): self.assertRaises(TypeError, db.test.find().min, 10) self.assertRaises(TypeError, db.test.find().min, {"j": 10}) - @client_context.require_version_max(4, 1, -1) def test_min_max_without_hint(self): coll = self.db.test j_index = [("j", ASCENDING)] coll.create_index(j_index) - with warnings.catch_warnings(record=True) as warns: - warnings.simplefilter("default", DeprecationWarning) - list(coll.find().min([("j", 3)])) - self.assertIn('using a min/max query operator', str(warns[0])) - # Ensure the warning is raised with the proper stack level. - del warns[:] + with self.assertRaises(InvalidOperation): list(coll.find().min([("j", 3)])) - self.assertIn('using a min/max query operator', str(warns[0])) - del warns[:] + with self.assertRaises(InvalidOperation): list(coll.find().max([("j", 3)])) - self.assertIn('using a min/max query operator', str(warns[0])) def test_batch_size(self): db = self.db @@ -548,7 +535,7 @@ def test_batch_size(self): self.assertRaises(TypeError, db.test.find().batch_size, "hello") self.assertRaises(TypeError, db.test.find().batch_size, 5.5) self.assertRaises(ValueError, db.test.find().batch_size, -1) - self.assertTrue(db.test.find().batch_size(long(5))) + self.assertTrue(db.test.find().batch_size(5)) a = db.test.find() for _ in a: break @@ -583,12 +570,8 @@ def cursor_count(cursor, expected_count): cur = db.test.find().batch_size(1) next(cur) - if client_context.version.at_least(3, 1, 9): - # find command batchSize should be 1 - self.assertEqual(0, len(cur._Cursor__data)) - else: - # OP_QUERY ntoreturn should be 2 - self.assertEqual(1, len(cur._Cursor__data)) + # find command batchSize should be 1 + self.assertEqual(0, len(cur._Cursor__data)) next(cur) self.assertEqual(0, len(cur._Cursor__data)) next(cur) @@ -672,7 +655,7 @@ def test_skip(self): self.assertRaises(TypeError, db.test.find().skip, "hello") self.assertRaises(TypeError, db.test.find().skip, 5.5) self.assertRaises(ValueError, db.test.find().skip, -5) - self.assertTrue(db.test.find().skip(long(5))) + self.assertTrue(db.test.find().skip(5)) db.drop_collection("test") @@ -717,8 +700,7 @@ def test_sort(self): self.assertRaises(TypeError, db.test.find().sort, 5) self.assertRaises(ValueError, db.test.find().sort, []) self.assertRaises(TypeError, db.test.find().sort, [], ASCENDING) - self.assertRaises(TypeError, db.test.find().sort, - [("hello", DESCENDING)], DESCENDING) + self.assertRaises(TypeError, db.test.find().sort, [("hello", DESCENDING)], DESCENDING) db.test.drop() @@ -739,8 +721,7 @@ def test_sort(self): self.assertEqual(desc, expect) desc = [i["x"] for i in db.test.find().sort([("x", DESCENDING)])] self.assertEqual(desc, expect) - desc = [i["x"] for i in - db.test.find().sort("x", ASCENDING).sort("x", DESCENDING)] + desc = [i["x"] for i in db.test.find().sort("x", ASCENDING).sort("x", DESCENDING)] self.assertEqual(desc, expect) expected = [(1, 5), (2, 5), (0, 3), (7, 3), (9, 2), (2, 1), (3, 1)] @@ -751,9 +732,11 @@ def test_sort(self): for (a, b) in shuffled: db.test.insert_one({"a": a, "b": b}) - result = [(i["a"], i["b"]) for i in - db.test.find().sort([("b", DESCENDING), - ("a", ASCENDING)])] + result = [ + (i["a"], i["b"]) for i in db.test.find().sort([("b", DESCENDING), ("a", ASCENDING)]) + ] + self.assertEqual(result, expected) + result = [(i["a"], i["b"]) for i in db.test.find().sort([("b", DESCENDING), "a"])] self.assertEqual(result, expected) a = db.test.find() @@ -762,62 +745,6 @@ def test_sort(self): break self.assertRaises(InvalidOperation, a.sort, "x", ASCENDING) - @ignore_deprecations - def test_count(self): - db = self.db - db.test.drop() - - self.assertEqual(0, db.test.find().count()) - - db.test.insert_many([{"x": i} for i in range(10)]) - - self.assertEqual(10, db.test.find().count()) - self.assertTrue(isinstance(db.test.find().count(), int)) - self.assertEqual(10, db.test.find().limit(5).count()) - self.assertEqual(10, db.test.find().skip(5).count()) - - self.assertEqual(1, db.test.find({"x": 1}).count()) - self.assertEqual(5, db.test.find({"x": {"$lt": 5}}).count()) - - a = db.test.find() - b = a.count() - for _ in a: - break - self.assertEqual(b, a.count()) - - self.assertEqual(0, db.test.acollectionthatdoesntexist.find().count()) - - @ignore_deprecations - def test_count_with_hint(self): - collection = self.db.test - collection.drop() - - collection.insert_many([{'i': 1}, {'i': 2}]) - self.assertEqual(2, collection.find().count()) - - collection.create_index([('i', 1)]) - - self.assertEqual(1, collection.find({'i': 1}).hint("_id_").count()) - self.assertEqual(2, collection.find().hint("_id_").count()) - - self.assertRaises(OperationFailure, - collection.find({'i': 1}).hint("BAD HINT").count) - - # Create a sparse index which should have no entries. - collection.create_index([('x', 1)], sparse=True) - - self.assertEqual(0, collection.find({'i': 1}).hint("x_1").count()) - self.assertEqual( - 0, collection.find({'i': 1}).hint([("x", 1)]).count()) - - if client_context.version.at_least(3, 3, 2): - self.assertEqual(0, collection.find().hint("x_1").count()) - self.assertEqual(0, collection.find().hint([("x", 1)]).count()) - else: - self.assertEqual(2, collection.find().hint("x_1").count()) - self.assertEqual(2, collection.find().hint([("x", 1)]).count()) - - @ignore_deprecations def test_where(self): db = self.db db.test.drop() @@ -829,34 +756,34 @@ def test_where(self): db.test.insert_many([{"x": i} for i in range(10)]) - self.assertEqual(3, len(list(db.test.find().where('this.x < 3')))) - self.assertEqual(3, - len(list(db.test.find().where(Code('this.x < 3'))))) - self.assertEqual(3, len(list(db.test.find().where(Code('this.x < i', - {"i": 3}))))) + self.assertEqual(3, len(list(db.test.find().where("this.x < 3")))) + self.assertEqual(3, len(list(db.test.find().where(Code("this.x < 3"))))) + + code_with_scope = Code("this.x < i", {"i": 3}) + if client_context.version.at_least(4, 3, 3): + # MongoDB 4.4 removed support for Code with scope. + with self.assertRaises(OperationFailure): + list(db.test.find().where(code_with_scope)) + + code_with_empty_scope = Code("this.x < 3", {}) + with self.assertRaises(OperationFailure): + list(db.test.find().where(code_with_empty_scope)) + else: + self.assertEqual(3, len(list(db.test.find().where(code_with_scope)))) + self.assertEqual(10, len(list(db.test.find()))) + self.assertEqual([0, 1, 2], [a["x"] for a in db.test.find().where("this.x < 3")]) + self.assertEqual([], [a["x"] for a in db.test.find({"x": 5}).where("this.x < 3")]) + self.assertEqual([5], [a["x"] for a in db.test.find({"x": 5}).where("this.x > 3")]) - self.assertEqual(3, db.test.find().where('this.x < 3').count()) - self.assertEqual(10, db.test.find().count()) - self.assertEqual(3, db.test.find().where(u'this.x < 3').count()) - self.assertEqual([0, 1, 2], - [a["x"] for a in - db.test.find().where('this.x < 3')]) - self.assertEqual([], - [a["x"] for a in - db.test.find({"x": 5}).where('this.x < 3')]) - self.assertEqual([5], - [a["x"] for a in - db.test.find({"x": 5}).where('this.x > 3')]) - - cursor = db.test.find().where('this.x < 3').where('this.x > 7') + cursor = db.test.find().where("this.x < 3").where("this.x > 7") self.assertEqual([8, 9], [a["x"] for a in cursor]) a = db.test.find() - b = a.where('this.x > 3') + _ = a.where("this.x > 3") for _ in a: break - self.assertRaises(InvalidOperation, a.where, 'this.x < 3') + self.assertRaises(InvalidOperation, a.where, "this.x < 3") def test_rewind(self): self.db.test.insert_many([{"x": i} for i in range(1, 4)]) @@ -890,6 +817,8 @@ def test_rewind(self): self.assertEqual(cursor, cursor.rewind()) + # oplog_reply, and snapshot are all deprecated. + @ignore_deprecations def test_clone(self): self.db.test.insert_many([{"x": i} for i in range(1, 4)]) @@ -927,46 +856,46 @@ def test_clone(self): self.assertNotEqual(cursor, cursor.clone()) # Just test attributes - cursor = self.db.test.find({"x": re.compile("^hello.*")}, - skip=1, - no_cursor_timeout=True, - cursor_type=CursorType.TAILABLE_AWAIT, - allow_partial_results=True, - manipulate=False, - projection={'_id': False}).limit(2) - cursor.min([('a', 1)]).max([('b', 3)]) + cursor = self.db.test.find( + {"x": re.compile("^hello.*")}, + projection={"_id": False}, + skip=1, + no_cursor_timeout=True, + cursor_type=CursorType.TAILABLE_AWAIT, + sort=[("x", 1)], + allow_partial_results=True, + oplog_replay=True, + batch_size=123, + collation={"locale": "en_US"}, + hint=[("_id", 1)], + max_scan=100, + max_time_ms=1000, + return_key=True, + show_record_id=True, + snapshot=True, + allow_disk_use=True, + ).limit(2) + cursor.min([("a", 1)]).max([("b", 3)]) cursor.add_option(128) - cursor.comment('hi!') + cursor.comment("hi!") + # Every attribute should be the same. cursor2 = cursor.clone() - self.assertEqual(cursor._Cursor__skip, cursor2._Cursor__skip) - self.assertEqual(cursor._Cursor__limit, cursor2._Cursor__limit) - self.assertEqual(type(cursor._Cursor__codec_options), - type(cursor2._Cursor__codec_options)) - self.assertEqual(cursor._Cursor__manipulate, - cursor2._Cursor__manipulate) - self.assertEqual(cursor._Cursor__query_flags, - cursor2._Cursor__query_flags) - self.assertEqual(cursor._Cursor__comment, - cursor2._Cursor__comment) - self.assertEqual(cursor._Cursor__min, - cursor2._Cursor__min) - self.assertEqual(cursor._Cursor__max, - cursor2._Cursor__max) + self.assertEqual(cursor.__dict__, cursor2.__dict__) # Shallow copies can so can mutate cursor2 = copy.copy(cursor) - cursor2._Cursor__projection['cursor2'] = False - self.assertTrue('cursor2' in cursor._Cursor__projection) + cursor2._Cursor__projection["cursor2"] = False + self.assertTrue("cursor2" in cursor._Cursor__projection) # Deepcopies and shouldn't mutate cursor3 = copy.deepcopy(cursor) - cursor3._Cursor__projection['cursor3'] = False - self.assertFalse('cursor3' in cursor._Cursor__projection) + cursor3._Cursor__projection["cursor3"] = False + self.assertFalse("cursor3" in cursor._Cursor__projection) cursor4 = cursor.clone() - cursor4._Cursor__projection['cursor4'] = False - self.assertFalse('cursor4' in cursor._Cursor__projection) + cursor4._Cursor__projection["cursor4"] = False + self.assertFalse("cursor4" in cursor._Cursor__projection) # Test memo when deepcopying queries query = {"hello": "world"} @@ -975,23 +904,23 @@ def test_clone(self): cursor2 = copy.deepcopy(cursor) - self.assertNotEqual(id(cursor._Cursor__spec), - id(cursor2._Cursor__spec)) - self.assertEqual(id(cursor2._Cursor__spec['reflexive']), - id(cursor2._Cursor__spec)) + self.assertNotEqual(id(cursor._Cursor__spec), id(cursor2._Cursor__spec)) + self.assertEqual(id(cursor2._Cursor__spec["reflexive"]), id(cursor2._Cursor__spec)) self.assertEqual(len(cursor2._Cursor__spec), 2) # Ensure hints are cloned as the correct type - cursor = self.db.test.find().hint([('z', 1), ("a", 1)]) + cursor = self.db.test.find().hint([("z", 1), ("a", 1)]) cursor2 = copy.deepcopy(cursor) self.assertTrue(isinstance(cursor2._Cursor__hint, SON)) self.assertEqual(cursor._Cursor__hint, cursor2._Cursor__hint) - @ignore_deprecations - def test_count_with_fields(self): - self.db.test.drop() - self.db.test.insert_one({"x": 1}) - self.assertEqual(1, self.db.test.find({}, ["a"]).count()) + def test_clone_empty(self): + self.db.test.delete_many({}) + self.db.test.insert_many([{"x": i} for i in range(1, 4)]) + cursor = self.db.test.find()[2:2] + cursor2 = cursor.clone() + self.assertRaises(StopIteration, cursor.next) + self.assertRaises(StopIteration, cursor2.next) def test_bad_getitem(self): self.assertRaises(TypeError, lambda x: self.db.test.find()[x], "hello") @@ -1008,46 +937,38 @@ def test_getitem_slice_index(self): self.assertRaises(IndexError, lambda: self.db.test.find()[1:2:2]) for a, b in zip(count(0), self.db.test.find()): - self.assertEqual(a, b['i']) + self.assertEqual(a, b["i"]) self.assertEqual(100, len(list(self.db.test.find()[0:]))) for a, b in zip(count(0), self.db.test.find()[0:]): - self.assertEqual(a, b['i']) + self.assertEqual(a, b["i"]) self.assertEqual(80, len(list(self.db.test.find()[20:]))) for a, b in zip(count(20), self.db.test.find()[20:]): - self.assertEqual(a, b['i']) + self.assertEqual(a, b["i"]) for a, b in zip(count(99), self.db.test.find()[99:]): - self.assertEqual(a, b['i']) + self.assertEqual(a, b["i"]) - for i in self.db.test.find()[1000:]: + for _i in self.db.test.find()[1000:]: self.fail() self.assertEqual(5, len(list(self.db.test.find()[20:25]))) - self.assertEqual(5, len(list( - self.db.test.find()[long(20):long(25)]))) + self.assertEqual(5, len(list(self.db.test.find()[20:25]))) for a, b in zip(count(20), self.db.test.find()[20:25]): - self.assertEqual(a, b['i']) + self.assertEqual(a, b["i"]) self.assertEqual(80, len(list(self.db.test.find()[40:45][20:]))) for a, b in zip(count(20), self.db.test.find()[40:45][20:]): - self.assertEqual(a, b['i']) - - self.assertEqual(80, - len(list(self.db.test.find()[40:45].limit(0).skip(20)) - ) - ) - for a, b in zip(count(20), - self.db.test.find()[40:45].limit(0).skip(20)): - self.assertEqual(a, b['i']) - - self.assertEqual(80, - len(list(self.db.test.find().limit(10).skip(40)[20:])) - ) - for a, b in zip(count(20), - self.db.test.find().limit(10).skip(40)[20:]): - self.assertEqual(a, b['i']) + self.assertEqual(a, b["i"]) + + self.assertEqual(80, len(list(self.db.test.find()[40:45].limit(0).skip(20)))) + for a, b in zip(count(20), self.db.test.find()[40:45].limit(0).skip(20)): + self.assertEqual(a, b["i"]) + + self.assertEqual(80, len(list(self.db.test.find().limit(10).skip(40)[20:]))) + for a, b in zip(count(20), self.db.test.find().limit(10).skip(40)[20:]): + self.assertEqual(a, b["i"]) self.assertEqual(1, len(list(self.db.test.find()[:1]))) self.assertEqual(5, len(list(self.db.test.find()[:5]))) @@ -1056,10 +977,7 @@ def test_getitem_slice_index(self): self.assertEqual(1, len(list(self.db.test.find()[99:1000]))) self.assertEqual(0, len(list(self.db.test.find()[10:10]))) self.assertEqual(0, len(list(self.db.test.find()[:0]))) - self.assertEqual(80, - len(list(self.db.test.find()[10:10].limit(0).skip(20)) - ) - ) + self.assertEqual(80, len(list(self.db.test.find()[10:10].limit(0).skip(20)))) self.assertRaises(IndexError, lambda: self.db.test.find()[10:8]) @@ -1067,41 +985,16 @@ def test_getitem_numeric_index(self): self.db.drop_collection("test") self.db.test.insert_many([{"i": i} for i in range(100)]) - self.assertEqual(0, self.db.test.find()[0]['i']) - self.assertEqual(50, self.db.test.find()[50]['i']) - self.assertEqual(50, self.db.test.find().skip(50)[0]['i']) - self.assertEqual(50, self.db.test.find().skip(49)[1]['i']) - self.assertEqual(50, self.db.test.find()[long(50)]['i']) - self.assertEqual(99, self.db.test.find()[99]['i']) + self.assertEqual(0, self.db.test.find()[0]["i"]) + self.assertEqual(50, self.db.test.find()[50]["i"]) + self.assertEqual(50, self.db.test.find().skip(50)[0]["i"]) + self.assertEqual(50, self.db.test.find().skip(49)[1]["i"]) + self.assertEqual(50, self.db.test.find()[50]["i"]) + self.assertEqual(99, self.db.test.find()[99]["i"]) self.assertRaises(IndexError, lambda x: self.db.test.find()[x], -1) self.assertRaises(IndexError, lambda x: self.db.test.find()[x], 100) - self.assertRaises(IndexError, - lambda x: self.db.test.find().skip(50)[x], 50) - - @ignore_deprecations - def test_count_with_limit_and_skip(self): - self.assertRaises(TypeError, self.db.test.find().count, "foo") - - def check_len(cursor, length): - self.assertEqual(len(list(cursor)), cursor.count(True)) - self.assertEqual(length, cursor.count(True)) - - self.db.drop_collection("test") - self.db.test.insert_many([{"i": i} for i in range(100)]) - - check_len(self.db.test.find(), 100) - - check_len(self.db.test.find().limit(10), 10) - check_len(self.db.test.find().limit(110), 100) - - check_len(self.db.test.find().skip(10), 90) - check_len(self.db.test.find().skip(110), 0) - - check_len(self.db.test.find().limit(10).skip(10), 10) - check_len(self.db.test.find()[10:20], 10) - check_len(self.db.test.find().limit(10).skip(95), 5) - check_len(self.db.test.find()[95:105], 5) + self.assertRaises(IndexError, lambda x: self.db.test.find().skip(50)[x], 50) def test_len(self): self.assertRaises(TypeError, len, self.db.test.find()) @@ -1110,14 +1003,14 @@ def test_properties(self): self.assertEqual(self.db.test, self.db.test.find().collection) def set_coll(): - self.db.test.find().collection = "hello" + self.db.test.find().collection = "hello" # type: ignore self.assertRaises(AttributeError, set_coll) def test_get_more(self): db = self.db db.drop_collection("test") - db.test.insert_many([{'i': i} for i in range(10)]) + db.test.insert_many([{"i": i} for i in range(10)]) self.assertEqual(10, len(list(db.test.find().batch_size(5)))) def test_tailable(self): @@ -1160,8 +1053,10 @@ def test_tailable(self): self.assertEqual(3, db.test.count_documents({})) # __getitem__(index) - for cursor in (db.test.find(cursor_type=CursorType.TAILABLE), - db.test.find(cursor_type=CursorType.TAILABLE_AWAIT)): + for cursor in ( + db.test.find(cursor_type=CursorType.TAILABLE), + db.test.find(cursor_type=CursorType.TAILABLE_AWAIT), + ): self.assertEqual(4, cursor[0]["x"]) self.assertEqual(5, cursor[1]["x"]) self.assertEqual(6, cursor[2]["x"]) @@ -1189,8 +1084,9 @@ def test_concurrent_close(self): def iterate_cursor(): while cursor.alive: - for doc in cursor: + for _doc in cursor: pass + t = threading.Thread(target=iterate_cursor) t.start() time.sleep(1) @@ -1199,12 +1095,10 @@ def iterate_cursor(): t.join(3) self.assertFalse(t.is_alive()) - def test_distinct(self): self.db.drop_collection("test") - self.db.test.insert_many( - [{"a": 1}, {"a": 2}, {"a": 2}, {"a": 2}, {"a": 3}]) + self.db.test.insert_many([{"a": 1}, {"a": 2}, {"a": 2}, {"a": 2}, {"a": 3}]) distinct = self.db.test.find({"a": {"$lt": 3}}).distinct("a") distinct.sort() @@ -1230,8 +1124,7 @@ def test_max_scan(self): self.assertEqual(100, len(list(self.db.test.find()))) self.assertEqual(50, len(list(self.db.test.find().max_scan(50)))) - self.assertEqual(50, len(list(self.db.test.find() - .max_scan(90).max_scan(50)))) + self.assertEqual(50, len(list(self.db.test.find().max_scan(90).max_scan(50)))) def test_with_statement(self): self.db.drop_collection("test") @@ -1248,100 +1141,34 @@ def test_with_statement(self): self.assertTrue(c1.alive) @client_context.require_no_mongos - @ignore_deprecations def test_comment(self): - # MongoDB 3.1.5 changed the ns for commands. - regex = {'$regex': r'pymongo_test.(\$cmd|test)'} - - if client_context.version.at_least(3, 5, 8, -1): - query_key = "command.comment" - elif client_context.version.at_least(3, 1, 8, -1): - query_key = "query.comment" - else: - query_key = "query.$comment" - self.client.drop_database(self.db) - self.db.set_profiling_level(ALL) + self.db.command("profile", 2) # Profile ALL commands. try: - list(self.db.test.find().comment('foo')) - op = self.db.system.profile.find({'ns': 'pymongo_test.test', - 'op': 'query', - query_key: 'foo'}) - self.assertEqual(op.count(), 1) - - self.db.test.find().comment('foo').count() - op = self.db.system.profile.find({'ns': regex, - 'op': 'command', - 'command.count': 'test', - 'command.comment': 'foo'}) - self.assertEqual(op.count(), 1) - - self.db.test.find().comment('foo').distinct('type') - op = self.db.system.profile.find({'ns': regex, - 'op': 'command', - 'command.distinct': 'test', - 'command.comment': 'foo'}) - self.assertEqual(op.count(), 1) + list(self.db.test.find().comment("foo")) + count = self.db.system.profile.count_documents( + {"ns": "pymongo_test.test", "op": "query", "command.comment": "foo"} + ) + self.assertEqual(count, 1) + + self.db.test.find().comment("foo").distinct("type") + count = self.db.system.profile.count_documents( + { + "ns": "pymongo_test.test", + "op": "command", + "command.distinct": "test", + "command.comment": "foo", + } + ) + self.assertEqual(count, 1) finally: - self.db.set_profiling_level(OFF) + self.db.command("profile", 0) # Turn off profiling. self.db.system.profile.drop() self.db.test.insert_many([{}, {}]) cursor = self.db.test.find() next(cursor) - self.assertRaises(InvalidOperation, cursor.comment, 'hello') - - def test_modifiers(self): - c = self.db.test - - # "modifiers" is deprecated. - with ignore_deprecations(): - cur = c.find() - self.assertTrue('$query' not in cur._Cursor__query_spec()) - cur = c.find().comment("testing").max_time_ms(500) - self.assertTrue('$query' in cur._Cursor__query_spec()) - self.assertEqual(cur._Cursor__query_spec()["$comment"], "testing") - self.assertEqual(cur._Cursor__query_spec()["$maxTimeMS"], 500) - cur = c.find( - modifiers={"$maxTimeMS": 500, "$comment": "testing"}) - self.assertTrue('$query' in cur._Cursor__query_spec()) - self.assertEqual(cur._Cursor__query_spec()["$comment"], "testing") - self.assertEqual(cur._Cursor__query_spec()["$maxTimeMS"], 500) - - # Keyword arg overwrites modifier. - # If we remove the "modifiers" arg, delete this test after checking - # that TestCommandMonitoring.test_find_options covers all cases. - cur = c.find(comment="hi", modifiers={"$comment": "bye"}) - self.assertEqual(cur._Cursor__query_spec()["$comment"], "hi") - - cur = c.find(max_scan=1, modifiers={"$maxScan": 2}) - self.assertEqual(cur._Cursor__query_spec()["$maxScan"], 1) - - cur = c.find(max_time_ms=1, modifiers={"$maxTimeMS": 2}) - self.assertEqual(cur._Cursor__query_spec()["$maxTimeMS"], 1) - - cur = c.find(min=1, modifiers={"$min": 2}) - self.assertEqual(cur._Cursor__query_spec()["$min"], 1) - - cur = c.find(max=1, modifiers={"$max": 2}) - self.assertEqual(cur._Cursor__query_spec()["$max"], 1) - - cur = c.find(return_key=True, modifiers={"$returnKey": False}) - self.assertEqual(cur._Cursor__query_spec()["$returnKey"], True) - - cur = c.find(hint=[("a", 1)], modifiers={"$hint": {"b": "1"}}) - self.assertEqual(cur._Cursor__query_spec()["$hint"], {"a": 1}) - - # The arg is named show_record_id after the "find" command arg, the - # modifier is named $showDiskLoc for the OP_QUERY modifier. It's - # stored as $showDiskLoc then upgraded to showRecordId if we send a - # "find" command. - cur = c.find(show_record_id=True, modifiers={"$showDiskLoc": False}) - self.assertEqual(cur._Cursor__query_spec()["$showDiskLoc"], True) - - if not client_context.version.at_least(3, 7, 3): - cur = c.find(snapshot=True, modifiers={"$snapshot": False}) - self.assertEqual(cur._Cursor__query_spec()["$snapshot"], True) + self.assertRaises(InvalidOperation, cursor.comment, "hello") def test_alive(self): self.db.test.delete_many({}) @@ -1352,7 +1179,7 @@ def test_alive(self): while True: cursor.next() n += 1 - if 3 == n: + if n == 3: self.assertFalse(cursor.alive) break @@ -1363,8 +1190,7 @@ def test_close_kills_cursor_synchronously(self): gc.collect() self.client._process_periodic_tasks() - listener = WhiteListEventListener("killCursors") - results = listener.results + listener = AllowListEventListener("killCursors") client = rs_or_single_client(event_listeners=[listener]) self.addCleanup(client.close) coll = client[self.db.name].test_close_kills_cursors @@ -1373,7 +1199,7 @@ def test_close_kills_cursor_synchronously(self): docs_inserted = 1000 coll.insert_many([{"i": i} for i in range(docs_inserted)]) - results.clear() + listener.reset() # Close a cursor while it's still open on the server. cursor = coll.find().batch_size(10) @@ -1382,14 +1208,13 @@ def test_close_kills_cursor_synchronously(self): cursor.close() def assertCursorKilled(): - self.assertEqual(1, len(results["started"])) - self.assertEqual("killCursors", results["started"][0].command_name) - self.assertEqual(1, len(results["succeeded"])) - self.assertEqual("killCursors", - results["succeeded"][0].command_name) + self.assertEqual(1, len(listener.started_events)) + self.assertEqual("killCursors", listener.started_events[0].command_name) + self.assertEqual(1, len(listener.succeeded_events)) + self.assertEqual("killCursors", listener.succeeded_events[0].command_name) assertCursorKilled() - results.clear() + listener.reset() # Close a command cursor while it's still open on the server. cursor = coll.aggregate([], batchSize=10) @@ -1400,7 +1225,60 @@ def assertCursorKilled(): if cursor.cursor_id: assertCursorKilled() else: - self.assertEqual(0, len(results["started"])) + self.assertEqual(0, len(listener.started_events)) + + @client_context.require_failCommand_appName + def test_timeout_kills_cursor_asynchronously(self): + listener = AllowListEventListener("killCursors") + client = rs_or_single_client(event_listeners=[listener]) + self.addCleanup(client.close) + coll = client[self.db.name].test_timeout_kills_cursor + + # Add some test data. + docs_inserted = 10 + coll.insert_many([{"i": i} for i in range(docs_inserted)]) + + listener.reset() + + cursor = coll.find({}, batch_size=1) + cursor.next() + + # Mock getMore commands timing out. + mock_timeout_errors = { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "errorCode": 50, + "failCommands": ["getMore"], + }, + } + + with self.fail_point(mock_timeout_errors): + with self.assertRaises(ExecutionTimeout): + cursor.next() + + def assertCursorKilled(): + wait_until( + lambda: len(listener.succeeded_events), + "find successful killCursors command", + ) + + self.assertEqual(1, len(listener.started_events)) + self.assertEqual("killCursors", listener.started_events[0].command_name) + self.assertEqual(1, len(listener.succeeded_events)) + self.assertEqual("killCursors", listener.succeeded_events[0].command_name) + + assertCursorKilled() + listener.reset() + + cursor = coll.aggregate([], batchSize=1) + cursor.next() + + with self.fail_point(mock_timeout_errors): + with self.assertRaises(ExecutionTimeout): + cursor.next() + + assertCursorKilled() def test_delete_not_initialized(self): # Creating a cursor with invalid arguments will not run __init__ @@ -1408,41 +1286,111 @@ def test_delete_not_initialized(self): cursor = Cursor.__new__(Cursor) # Skip calling __init__ cursor.__del__() # no error - @client_context.require_version_min(3, 6) def test_getMore_does_not_send_readPreference(self): - listener = WhiteListEventListener('find', 'getMore') - client = rs_or_single_client( - event_listeners=[listener]) + listener = AllowListEventListener("find", "getMore") + client = rs_or_single_client(event_listeners=[listener]) self.addCleanup(client.close) - coll = client[self.db.name].test + # We never send primary read preference so override the default. + coll = client[self.db.name].get_collection( + "test", read_preference=ReadPreference.PRIMARY_PREFERRED + ) coll.delete_many({}) coll.insert_many([{} for _ in range(5)]) self.addCleanup(coll.drop) list(coll.find(batch_size=3)) - started = listener.results['started'] + started = listener.started_events self.assertEqual(2, len(started)) - self.assertEqual('find', started[0].command_name) - self.assertIn('$readPreference', started[0].command) - self.assertEqual('getMore', started[1].command_name) - self.assertNotIn('$readPreference', started[1].command) + self.assertEqual("find", started[0].command_name) + if client_context.is_rs or client_context.is_mongos: + self.assertIn("$readPreference", started[0].command) + else: + self.assertNotIn("$readPreference", started[0].command) + self.assertEqual("getMore", started[1].command_name) + self.assertNotIn("$readPreference", started[1].command) class TestRawBatchCursor(IntegrationTest): def test_find_raw(self): c = self.db.test c.drop() - docs = [{'_id': i, 'x': 3.0 * i} for i in range(10)] + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] c.insert_many(docs) - batches = list(c.find_raw_batches().sort('_id')) + batches = list(c.find_raw_batches().sort("_id")) self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) - def test_manipulate(self): + @client_context.require_transactions + def test_find_raw_transaction(self): c = self.db.test - with self.assertRaises(InvalidOperation): - c.find_raw_batches(manipulate=True) + c.drop() + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] + c.insert_many(docs) + + listener = OvertCommandListener() + client = rs_or_single_client(event_listeners=[listener]) + with client.start_session() as session: + with session.start_transaction(): + batches = list( + client[self.db.name].test.find_raw_batches(session=session).sort("_id") + ) + cmd = listener.started_events[0] + self.assertEqual(cmd.command_name, "find") + self.assertIn("$clusterTime", cmd.command) + self.assertEqual(cmd.command["startTransaction"], True) + self.assertEqual(cmd.command["txnNumber"], 1) + # Ensure we update $clusterTime from the command response. + last_cmd = listener.succeeded_events[-1] + self.assertEqual( + last_cmd.reply["$clusterTime"]["clusterTime"], + session.cluster_time["clusterTime"], + ) + + self.assertEqual(1, len(batches)) + self.assertEqual(docs, decode_all(batches[0])) + + @client_context.require_sessions + @client_context.require_failCommand_fail_point + def test_find_raw_retryable_reads(self): + c = self.db.test + c.drop() + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] + c.insert_many(docs) + + listener = OvertCommandListener() + client = rs_or_single_client(event_listeners=[listener], retryReads=True) + with self.fail_point( + {"mode": {"times": 1}, "data": {"failCommands": ["find"], "closeConnection": True}} + ): + batches = list(client[self.db.name].test.find_raw_batches().sort("_id")) + + self.assertEqual(1, len(batches)) + self.assertEqual(docs, decode_all(batches[0])) + self.assertEqual(len(listener.started_events), 2) + for cmd in listener.started_events: + self.assertEqual(cmd.command_name, "find") + + @client_context.require_version_min(5, 0, 0) + @client_context.require_no_standalone + def test_find_raw_snapshot_reads(self): + c = self.db.get_collection("test", write_concern=WriteConcern(w="majority")) + c.drop() + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] + c.insert_many(docs) + + listener = OvertCommandListener() + client = rs_or_single_client(event_listeners=[listener], retryReads=True) + db = client[self.db.name] + with client.start_session(snapshot=True) as session: + db.test.distinct("x", {}, session=session) + batches = list(db.test.find_raw_batches(session=session).sort("_id")) + self.assertEqual(1, len(batches)) + self.assertEqual(docs, decode_all(batches[0])) + + find_cmd = listener.started_events[1].command + self.assertEqual(find_cmd["readConcern"]["level"], "snapshot") + self.assertIsNotNone(find_cmd["readConcern"]["atClusterTime"]) def test_explain(self): c = self.db.test @@ -1450,7 +1398,14 @@ def test_explain(self): explanation = c.find_raw_batches().explain() self.assertIsInstance(explanation, dict) + def test_empty(self): + self.db.test.drop() + cursor = self.db.test.find_raw_batches() + with self.assertRaises(StopIteration): + next(cursor) + def test_clone(self): + self.db.test.insert_one({}) cursor = self.db.test.find_raw_batches() # Copy of a RawBatchCursor is also a RawBatchCursor, not a Cursor. self.assertIsInstance(next(cursor.clone()), bytes) @@ -1460,13 +1415,13 @@ def test_clone(self): def test_exhaust(self): c = self.db.test c.drop() - c.insert_many({'_id': i} for i in range(200)) - result = b''.join(c.find_raw_batches(cursor_type=CursorType.EXHAUST)) - self.assertEqual([{'_id': i} for i in range(200)], decode_all(result)) + c.insert_many({"_id": i} for i in range(200)) + result = b"".join(c.find_raw_batches(cursor_type=CursorType.EXHAUST)) + self.assertEqual([{"_id": i} for i in range(200)], decode_all(result)) def test_server_error(self): with self.assertRaises(OperationFailure) as exc: - next(self.db.test.find_raw_batches({'x': {'$bad': 1}})) + next(self.db.test.find_raw_batches({"x": {"$bad": 1}})) # The server response was decoded, not left raw. self.assertIsInstance(exc.exception.details, dict) @@ -1475,70 +1430,56 @@ def test_get_item(self): with self.assertRaises(InvalidOperation): self.db.test.find_raw_batches()[0] - @client_context.require_version_min(3, 4) def test_collation(self): - next(self.db.test.find_raw_batches(collation=Collation('en_US'))) - - @client_context.require_version_max(3, 2) - def test_collation_error(self): - with self.assertRaises(ConfigurationError): - next(self.db.test.find_raw_batches(collation=Collation('en_US'))) + next(self.db.test.find_raw_batches(collation=Collation("en_US"))) - @client_context.require_version_min(3, 2) + @client_context.require_no_mmap # MMAPv1 does not support read concern def test_read_concern(self): + self.db.get_collection("test", write_concern=WriteConcern(w="majority")).insert_one({}) c = self.db.get_collection("test", read_concern=ReadConcern("majority")) next(c.find_raw_batches()) - @client_context.require_version_max(3, 1) - def test_read_concern_error(self): - c = self.db.get_collection("test", read_concern=ReadConcern("majority")) - with self.assertRaises(ConfigurationError): - next(c.find_raw_batches()) - def test_monitoring(self): listener = EventListener() client = rs_or_single_client(event_listeners=[listener]) c = client.pymongo_test.test c.drop() - c.insert_many([{'_id': i} for i in range(10)]) + c.insert_many([{"_id": i} for i in range(10)]) - listener.results.clear() + listener.reset() cursor = c.find_raw_batches(batch_size=4) # First raw batch of 4 documents. next(cursor) - started = listener.results['started'][0] - succeeded = listener.results['succeeded'][0] - self.assertEqual(0, len(listener.results['failed'])) - self.assertEqual('find', started.command_name) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('find', succeeded.command_name) + started = listener.started_events[0] + succeeded = listener.succeeded_events[0] + self.assertEqual(0, len(listener.failed_events)) + self.assertEqual("find", started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("find", succeeded.command_name) csr = succeeded.reply["cursor"] self.assertEqual(csr["ns"], "pymongo_test.test") # The batch is a list of one raw bytes object. self.assertEqual(len(csr["firstBatch"]), 1) - self.assertEqual(decode_all(csr["firstBatch"][0]), - [{'_id': i} for i in range(0, 4)]) + self.assertEqual(decode_all(csr["firstBatch"][0]), [{"_id": i} for i in range(4)]) - listener.results.clear() + listener.reset() # Next raw batch of 4 documents. next(cursor) try: - results = listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertEqual('getMore', started.command_name) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('getMore', succeeded.command_name) + started = listener.started_events[0] + succeeded = listener.succeeded_events[0] + self.assertEqual(0, len(listener.failed_events)) + self.assertEqual("getMore", started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("getMore", succeeded.command_name) csr = succeeded.reply["cursor"] self.assertEqual(csr["ns"], "pymongo_test.test") self.assertEqual(len(csr["nextBatch"]), 1) - self.assertEqual(decode_all(csr["nextBatch"][0]), - [{'_id': i} for i in range(4, 8)]) + self.assertEqual(decode_all(csr["nextBatch"][0]), [{"_id": i} for i in range(4, 8)]) finally: # Finish the cursor. tuple(cursor) @@ -1547,30 +1488,109 @@ def test_monitoring(self): class TestRawBatchCommandCursor(IntegrationTest): @classmethod def setUpClass(cls): - super(TestRawBatchCommandCursor, cls).setUpClass() + super().setUpClass() def test_aggregate_raw(self): c = self.db.test c.drop() - docs = [{'_id': i, 'x': 3.0 * i} for i in range(10)] + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] c.insert_many(docs) - batches = list(c.aggregate_raw_batches([{'$sort': {'_id': 1}}])) + batches = list(c.aggregate_raw_batches([{"$sort": {"_id": 1}}])) self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) + @client_context.require_transactions + def test_aggregate_raw_transaction(self): + c = self.db.test + c.drop() + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] + c.insert_many(docs) + + listener = OvertCommandListener() + client = rs_or_single_client(event_listeners=[listener]) + with client.start_session() as session: + with session.start_transaction(): + batches = list( + client[self.db.name].test.aggregate_raw_batches( + [{"$sort": {"_id": 1}}], session=session + ) + ) + cmd = listener.started_events[0] + self.assertEqual(cmd.command_name, "aggregate") + self.assertIn("$clusterTime", cmd.command) + self.assertEqual(cmd.command["startTransaction"], True) + self.assertEqual(cmd.command["txnNumber"], 1) + # Ensure we update $clusterTime from the command response. + last_cmd = listener.succeeded_events[-1] + self.assertEqual( + last_cmd.reply["$clusterTime"]["clusterTime"], + session.cluster_time["clusterTime"], + ) + self.assertEqual(1, len(batches)) + self.assertEqual(docs, decode_all(batches[0])) + + @client_context.require_sessions + @client_context.require_failCommand_fail_point + def test_aggregate_raw_retryable_reads(self): + c = self.db.test + c.drop() + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] + c.insert_many(docs) + + listener = OvertCommandListener() + client = rs_or_single_client(event_listeners=[listener], retryReads=True) + with self.fail_point( + {"mode": {"times": 1}, "data": {"failCommands": ["aggregate"], "closeConnection": True}} + ): + batches = list(client[self.db.name].test.aggregate_raw_batches([{"$sort": {"_id": 1}}])) + + self.assertEqual(1, len(batches)) + self.assertEqual(docs, decode_all(batches[0])) + self.assertEqual(len(listener.started_events), 3) + cmds = listener.started_events + self.assertEqual(cmds[0].command_name, "aggregate") + self.assertEqual(cmds[1].command_name, "aggregate") + + @client_context.require_version_min(5, 0, -1) + @client_context.require_no_standalone + def test_aggregate_raw_snapshot_reads(self): + c = self.db.get_collection("test", write_concern=WriteConcern(w="majority")) + c.drop() + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] + c.insert_many(docs) + + listener = OvertCommandListener() + client = rs_or_single_client(event_listeners=[listener], retryReads=True) + db = client[self.db.name] + with client.start_session(snapshot=True) as session: + db.test.distinct("x", {}, session=session) + batches = list(db.test.aggregate_raw_batches([{"$sort": {"_id": 1}}], session=session)) + self.assertEqual(1, len(batches)) + self.assertEqual(docs, decode_all(batches[0])) + + find_cmd = listener.started_events[1].command + self.assertEqual(find_cmd["readConcern"]["level"], "snapshot") + self.assertIsNotNone(find_cmd["readConcern"]["atClusterTime"]) + def test_server_error(self): c = self.db.test c.drop() - docs = [{'_id': i, 'x': 3.0 * i} for i in range(10)] + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] c.insert_many(docs) - c.insert_one({'_id': 10, 'x': 'not a number'}) + c.insert_one({"_id": 10, "x": "not a number"}) with self.assertRaises(OperationFailure) as exc: - list(self.db.test.aggregate_raw_batches([{ - '$sort': {'_id': 1}, - }, { - '$project': {'x': {'$multiply': [2, '$x']}} - }], batchSize=4)) + list( + self.db.test.aggregate_raw_batches( + [ + { + "$sort": {"_id": 1}, + }, + {"$project": {"x": {"$multiply": [2, "$x"]}}}, + ], + batchSize=4, + ) + ) # The server response was decoded, not left raw. self.assertIsInstance(exc.exception.details, dict) @@ -1579,58 +1599,72 @@ def test_get_item(self): with self.assertRaises(InvalidOperation): self.db.test.aggregate_raw_batches([])[0] - @client_context.require_version_min(3, 4) def test_collation(self): - next(self.db.test.aggregate_raw_batches([], collation=Collation('en_US'))) - - @client_context.require_version_max(3, 2) - def test_collation_error(self): - with self.assertRaises(ConfigurationError): - next(self.db.test.aggregate_raw_batches([], collation=Collation('en_US'))) + next(self.db.test.aggregate_raw_batches([], collation=Collation("en_US"))) def test_monitoring(self): listener = EventListener() client = rs_or_single_client(event_listeners=[listener]) c = client.pymongo_test.test c.drop() - c.insert_many([{'_id': i} for i in range(10)]) + c.insert_many([{"_id": i} for i in range(10)]) - listener.results.clear() - cursor = c.aggregate_raw_batches([{'$sort': {'_id': 1}}], batchSize=4) + listener.reset() + cursor = c.aggregate_raw_batches([{"$sort": {"_id": 1}}], batchSize=4) # Start cursor, no initial batch. - started = listener.results['started'][0] - succeeded = listener.results['succeeded'][0] - self.assertEqual(0, len(listener.results['failed'])) - self.assertEqual('aggregate', started.command_name) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('aggregate', succeeded.command_name) + started = listener.started_events[0] + succeeded = listener.succeeded_events[0] + self.assertEqual(0, len(listener.failed_events)) + self.assertEqual("aggregate", started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("aggregate", succeeded.command_name) csr = succeeded.reply["cursor"] self.assertEqual(csr["ns"], "pymongo_test.test") # First batch is empty. self.assertEqual(len(csr["firstBatch"]), 0) - listener.results.clear() + listener.reset() # Batches of 4 documents. n = 0 for batch in cursor: - results = listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertEqual('getMore', started.command_name) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('getMore', succeeded.command_name) + started = listener.started_events[0] + succeeded = listener.succeeded_events[0] + self.assertEqual(0, len(listener.failed_events)) + self.assertEqual("getMore", started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("getMore", succeeded.command_name) csr = succeeded.reply["cursor"] self.assertEqual(csr["ns"], "pymongo_test.test") self.assertEqual(len(csr["nextBatch"]), 1) self.assertEqual(csr["nextBatch"][0], batch) - self.assertEqual(decode_all(batch), - [{'_id': i} for i in range(n, min(n + 4, 10))]) + self.assertEqual(decode_all(batch), [{"_id": i} for i in range(n, min(n + 4, 10))]) n += 4 - listener.results.clear() + listener.reset() + + @client_context.require_version_min(5, 0, -1) + @client_context.require_no_mongos + def test_exhaust_cursor_db_set(self): + listener = OvertCommandListener() + client = rs_or_single_client(event_listeners=[listener]) + self.addCleanup(client.close) + c = client.pymongo_test.test + c.delete_many({}) + c.insert_many([{"_id": i} for i in range(3)]) + + listener.reset() + + result = list(c.find({}, cursor_type=pymongo.CursorType.EXHAUST, batch_size=1)) + + self.assertEqual(len(result), 3) + + self.assertEqual( + listener.started_command_names(), ["find", "getMore", "getMore", "getMore"] + ) + for cmd in listener.started_events: + self.assertEqual(cmd.command["$db"], "pymongo_test") if __name__ == "__main__": diff --git a/test/test_cursor_manager.py b/test/test_cursor_manager.py deleted file mode 100644 index 1b0114028d..0000000000 --- a/test/test_cursor_manager.py +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright 2014-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Test the cursor_manager module.""" - -import sys -import warnings - -sys.path[0:0] = [""] - -from pymongo.cursor_manager import CursorManager -from pymongo.errors import CursorNotFound -from pymongo.message import _CursorAddress -from test import (client_context, - client_knobs, - unittest, - IntegrationTest, - SkipTest) -from test.utils import rs_or_single_client, wait_until - - -class TestCursorManager(IntegrationTest): - - @classmethod - def setUpClass(cls): - super(TestCursorManager, cls).setUpClass() - cls.warn_context = warnings.catch_warnings() - cls.warn_context.__enter__() - warnings.simplefilter("ignore", DeprecationWarning) - - cls.collection = cls.db.test - cls.collection.drop() - - # Ensure two batches. - cls.collection.insert_many([{'_id': i} for i in range(200)]) - - @classmethod - def tearDownClass(cls): - cls.warn_context.__exit__() - cls.warn_context = None - cls.collection.drop() - - def test_cursor_manager_validation(self): - with self.assertRaises(TypeError): - client_context.client.set_cursor_manager(1) - - def test_cursor_manager(self): - self.close_was_called = False - - test_case = self - - class CM(CursorManager): - def __init__(self, client): - super(CM, self).__init__(client) - - def close(self, cursor_id, address): - test_case.close_was_called = True - super(CM, self).close(cursor_id, address) - - with client_knobs(kill_cursor_frequency=0.01): - client = rs_or_single_client(maxPoolSize=1) - client.set_cursor_manager(CM) - - # Create a cursor on the same client so we're certain the getMore - # is sent after the killCursors message. - cursor = client.pymongo_test.test.find().batch_size(1) - next(cursor) - client.close_cursor( - cursor.cursor_id, - _CursorAddress(self.client.address, self.collection.full_name)) - - def raises_cursor_not_found(): - try: - next(cursor) - return False - except CursorNotFound: - return True - - wait_until(raises_cursor_not_found, 'close cursor') - self.assertTrue(self.close_was_called) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_custom_types.py b/test/test_custom_types.py index ba0bb0ca69..da4bf03344 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -13,6 +13,7 @@ # limitations under the License. """Test support for callbacks to encode/decode custom types.""" +from __future__ import annotations import datetime import sys @@ -20,37 +21,41 @@ from collections import OrderedDict from decimal import Decimal from random import random +from typing import Any, Tuple, Type, no_type_check sys.path[0:0] = [""] -from bson import (Decimal128, - decode, - decode_all, - decode_file_iter, - decode_iter, - encode, - RE_TYPE, - _BUILT_IN_TYPES, - _dict_to_bson, - _bson_to_dict) -from bson.code import Code -from bson.codec_options import (CodecOptions, TypeCodec, TypeDecoder, - TypeEncoder, TypeRegistry) +from test import client_context, unittest +from test.test_client import IntegrationTest +from test.utils import rs_client + +from bson import ( + _BUILT_IN_TYPES, + RE_TYPE, + Decimal128, + _bson_to_dict, + _dict_to_bson, + decode, + decode_all, + decode_file_iter, + decode_iter, + encode, +) +from bson.codec_options import ( + CodecOptions, + TypeCodec, + TypeDecoder, + TypeEncoder, + TypeRegistry, +) from bson.errors import InvalidDocument from bson.int64 import Int64 from bson.raw_bson import RawBSONDocument -from bson.py3compat import text_type - from gridfs import GridIn, GridOut - from pymongo.collection import ReturnDocument from pymongo.errors import DuplicateKeyError from pymongo.message import _CursorAddress -from test import client_context, unittest -from test.test_client import IntegrationTest -from test.utils import ignore_deprecations, rs_client - class DecimalEncoder(TypeEncoder): @property @@ -74,11 +79,10 @@ class DecimalCodec(DecimalDecoder, DecimalEncoder): pass -DECIMAL_CODECOPTS = CodecOptions( - type_registry=TypeRegistry([DecimalCodec()])) +DECIMAL_CODECOPTS = CodecOptions(type_registry=TypeRegistry([DecimalCodec()])) -class UndecipherableInt64Type(object): +class UndecipherableInt64Type: def __init__(self, value): self.value = value @@ -91,120 +95,138 @@ def __eq__(self, other): class UndecipherableIntDecoder(TypeDecoder): bson_type = Int64 + def transform_bson(self, value): return UndecipherableInt64Type(value) class UndecipherableIntEncoder(TypeEncoder): python_type = UndecipherableInt64Type + def transform_python(self, value): return Int64(value.value) UNINT_DECODER_CODECOPTS = CodecOptions( - type_registry=TypeRegistry([UndecipherableIntDecoder(), ])) + type_registry=TypeRegistry( + [ + UndecipherableIntDecoder(), + ] + ) +) -UNINT_CODECOPTS = CodecOptions(type_registry=TypeRegistry( - [UndecipherableIntDecoder(), UndecipherableIntEncoder()])) +UNINT_CODECOPTS = CodecOptions( + type_registry=TypeRegistry([UndecipherableIntDecoder(), UndecipherableIntEncoder()]) +) class UppercaseTextDecoder(TypeDecoder): - bson_type = text_type + bson_type = str + def transform_bson(self, value): return value.upper() -UPPERSTR_DECODER_CODECOPTS = CodecOptions(type_registry=TypeRegistry( - [UppercaseTextDecoder(),])) +UPPERSTR_DECODER_CODECOPTS = CodecOptions( + type_registry=TypeRegistry( + [ + UppercaseTextDecoder(), + ] + ) +) def type_obfuscating_decoder_factory(rt_type): class ResumeTokenToNanDecoder(TypeDecoder): bson_type = rt_type + def transform_bson(self, value): return "NaN" + return ResumeTokenToNanDecoder -class CustomBSONTypeTests(object): +class CustomBSONTypeTests: + @no_type_check def roundtrip(self, doc): bsonbytes = encode(doc, codec_options=self.codecopts) rt_document = decode(bsonbytes, codec_options=self.codecopts) self.assertEqual(doc, rt_document) def test_encode_decode_roundtrip(self): - self.roundtrip({'average': Decimal('56.47')}) - self.roundtrip({'average': {'b': Decimal('56.47')}}) - self.roundtrip({'average': [Decimal('56.47')]}) - self.roundtrip({'average': [[Decimal('56.47')]]}) - self.roundtrip({'average': [{'b': Decimal('56.47')}]}) + self.roundtrip({"average": Decimal("56.47")}) + self.roundtrip({"average": {"b": Decimal("56.47")}}) + self.roundtrip({"average": [Decimal("56.47")]}) + self.roundtrip({"average": [[Decimal("56.47")]]}) + self.roundtrip({"average": [{"b": Decimal("56.47")}]}) + @no_type_check def test_decode_all(self): documents = [] for dec in range(3): - documents.append({'average': Decimal('56.4%s' % (dec,))}) + documents.append({"average": Decimal(f"56.4{dec}")}) - bsonstream = bytes() + bsonstream = b"" for doc in documents: bsonstream += encode(doc, codec_options=self.codecopts) - self.assertEqual( - decode_all(bsonstream, self.codecopts), documents) + self.assertEqual(decode_all(bsonstream, self.codecopts), documents) + @no_type_check def test__bson_to_dict(self): - document = {'average': Decimal('56.47')} + document = {"average": Decimal("56.47")} rawbytes = encode(document, codec_options=self.codecopts) decoded_document = _bson_to_dict(rawbytes, self.codecopts) self.assertEqual(document, decoded_document) + @no_type_check def test__dict_to_bson(self): - document = {'average': Decimal('56.47')} + document = {"average": Decimal("56.47")} rawbytes = encode(document, codec_options=self.codecopts) encoded_document = _dict_to_bson(document, False, self.codecopts) self.assertEqual(encoded_document, rawbytes) def _generate_multidocument_bson_stream(self): inp_num = [str(random() * 100)[:4] for _ in range(10)] - docs = [{'n': Decimal128(dec)} for dec in inp_num] - edocs = [{'n': Decimal(dec)} for dec in inp_num] + docs = [{"n": Decimal128(dec)} for dec in inp_num] + edocs = [{"n": Decimal(dec)} for dec in inp_num] bsonstream = b"" for doc in docs: bsonstream += encode(doc) return edocs, bsonstream + @no_type_check def test_decode_iter(self): expected, bson_data = self._generate_multidocument_bson_stream() - for expected_doc, decoded_doc in zip( - expected, decode_iter(bson_data, self.codecopts)): + for expected_doc, decoded_doc in zip(expected, decode_iter(bson_data, self.codecopts)): self.assertEqual(expected_doc, decoded_doc) + @no_type_check def test_decode_file_iter(self): expected, bson_data = self._generate_multidocument_bson_stream() fileobj = tempfile.TemporaryFile() fileobj.write(bson_data) fileobj.seek(0) - for expected_doc, decoded_doc in zip( - expected, decode_file_iter(fileobj, self.codecopts)): + for expected_doc, decoded_doc in zip(expected, decode_file_iter(fileobj, self.codecopts)): self.assertEqual(expected_doc, decoded_doc) fileobj.close() -class TestCustomPythonBSONTypeToBSONMonolithicCodec(CustomBSONTypeTests, - unittest.TestCase): +class TestCustomPythonBSONTypeToBSONMonolithicCodec(CustomBSONTypeTests, unittest.TestCase): @classmethod def setUpClass(cls): cls.codecopts = DECIMAL_CODECOPTS -class TestCustomPythonBSONTypeToBSONMultiplexedCodec(CustomBSONTypeTests, - unittest.TestCase): +class TestCustomPythonBSONTypeToBSONMultiplexedCodec(CustomBSONTypeTests, unittest.TestCase): @classmethod def setUpClass(cls): codec_options = CodecOptions( - type_registry=TypeRegistry((DecimalEncoder(), DecimalDecoder()))) + type_registry=TypeRegistry((DecimalEncoder(), DecimalDecoder())) + ) cls.codecopts = codec_options @@ -215,29 +237,29 @@ def _get_codec_options(self, fallback_encoder): def test_simple(self): codecopts = self._get_codec_options(lambda x: Decimal128(x)) - document = {'average': Decimal('56.47')} + document = {"average": Decimal("56.47")} bsonbytes = encode(document, codec_options=codecopts) - exp_document = {'average': Decimal128('56.47')} + exp_document = {"average": Decimal128("56.47")} exp_bsonbytes = encode(exp_document) self.assertEqual(bsonbytes, exp_bsonbytes) def test_erroring_fallback_encoder(self): - codecopts = self._get_codec_options(lambda _: 1/0) + codecopts = self._get_codec_options(lambda _: 1 / 0) # fallback converter should not be invoked when encoding known types. encode( - {'a': 1, 'b': Decimal128('1.01'), 'c': {'arr': ['abc', 3.678]}}, - codec_options=codecopts) + {"a": 1, "b": Decimal128("1.01"), "c": {"arr": ["abc", 3.678]}}, codec_options=codecopts + ) # expect an error when encoding a custom type. - document = {'average': Decimal('56.47')} + document = {"average": Decimal("56.47")} with self.assertRaises(ZeroDivisionError): encode(document, codec_options=codecopts) def test_noop_fallback_encoder(self): codecopts = self._get_codec_options(lambda x: x) - document = {'average': Decimal('56.47')} + document = {"average": Decimal("56.47")} with self.assertRaises(InvalidDocument): encode(document, codec_options=codecopts) @@ -247,44 +269,84 @@ def fallback_encoder(value): return Decimal128(value) except: raise TypeError("cannot encode type %s" % (type(value))) + codecopts = self._get_codec_options(fallback_encoder) - document = {'average': Decimal} + document = {"average": Decimal} with self.assertRaises(TypeError): encode(document, codec_options=codecopts) + def test_call_only_once_for_not_handled_big_integers(self): + called_with = [] + + def fallback_encoder(value): + called_with.append(value) + return value + + codecopts = self._get_codec_options(fallback_encoder) + document = {"a": {"b": {"c": 2 << 65}}} + + msg = "MongoDB can only handle up to 8-byte ints" + with self.assertRaises(OverflowError, msg=msg): + encode(document, codec_options=codecopts) + + self.assertEqual(called_with, [2 << 65]) + class TestBSONTypeEnDeCodecs(unittest.TestCase): def test_instantiation(self): - msg = "Can't instantiate abstract class .* with abstract methods .*" + msg = "Can't instantiate abstract class" + def run_test(base, attrs, fail): - codec = type('testcodec', (base,), attrs) + codec = type("testcodec", (base,), attrs) if fail: with self.assertRaisesRegex(TypeError, msg): codec() else: codec() - class MyType(object): + class MyType: pass - run_test(TypeEncoder, {'python_type': MyType,}, fail=True) - run_test(TypeEncoder, {'transform_python': lambda s, x: x}, fail=True) - run_test(TypeEncoder, {'transform_python': lambda s, x: x, - 'python_type': MyType}, fail=False) - - run_test(TypeDecoder, {'bson_type': Decimal128, }, fail=True) - run_test(TypeDecoder, {'transform_bson': lambda s, x: x}, fail=True) - run_test(TypeDecoder, {'transform_bson': lambda s, x: x, - 'bson_type': Decimal128}, fail=False) - - run_test(TypeCodec, {'bson_type': Decimal128, - 'python_type': MyType}, fail=True) - run_test(TypeCodec, {'transform_bson': lambda s, x: x, - 'transform_python': lambda s, x: x}, fail=True) - run_test(TypeCodec, {'python_type': MyType, - 'transform_python': lambda s, x: x, - 'transform_bson': lambda s, x: x, - 'bson_type': Decimal128}, fail=False) + run_test( + TypeEncoder, + { + "python_type": MyType, + }, + fail=True, + ) + run_test(TypeEncoder, {"transform_python": lambda s, x: x}, fail=True) + run_test( + TypeEncoder, {"transform_python": lambda s, x: x, "python_type": MyType}, fail=False + ) + + run_test( + TypeDecoder, + { + "bson_type": Decimal128, + }, + fail=True, + ) + run_test(TypeDecoder, {"transform_bson": lambda s, x: x}, fail=True) + run_test( + TypeDecoder, {"transform_bson": lambda s, x: x, "bson_type": Decimal128}, fail=False + ) + + run_test(TypeCodec, {"bson_type": Decimal128, "python_type": MyType}, fail=True) + run_test( + TypeCodec, + {"transform_bson": lambda s, x: x, "transform_python": lambda s, x: x}, + fail=True, + ) + run_test( + TypeCodec, + { + "python_type": MyType, + "transform_python": lambda s, x: x, + "transform_bson": lambda s, x: x, + "bson_type": Decimal128, + }, + fail=False, + ) def test_type_checks(self): self.assertTrue(issubclass(TypeCodec, TypeEncoder)) @@ -294,13 +356,22 @@ def test_type_checks(self): class TestBSONCustomTypeEncoderAndFallbackEncoderTandem(unittest.TestCase): + + TypeA: Any + TypeB: Any + fallback_encoder_A2B: Any + fallback_encoder_A2BSON: Any + B2BSON: Type[TypeEncoder] + B2A: Type[TypeEncoder] + A2B: Type[TypeEncoder] + @classmethod def setUpClass(cls): - class TypeA(object): + class TypeA: def __init__(self, x): self.value = x - class TypeB(object): + class TypeB: def __init__(self, x): self.value = x @@ -317,6 +388,7 @@ def fallback_encoder_A2BSON(value): # transforms B into something encodable class B2BSON(TypeEncoder): python_type = TypeB + def transform_python(self, value): return value.value @@ -325,6 +397,7 @@ def transform_python(self, value): # BSON-encodable. class A2B(TypeEncoder): python_type = TypeA + def transform_python(self, value): return TypeB(value.value) @@ -333,6 +406,7 @@ def transform_python(self, value): # BSON-encodable. class B2A(TypeEncoder): python_type = TypeB + def transform_python(self, value): return TypeA(value.value) @@ -345,48 +419,52 @@ def transform_python(self, value): cls.A2B = A2B def test_encode_fallback_then_custom(self): - codecopts = CodecOptions(type_registry=TypeRegistry( - [self.B2BSON()], fallback_encoder=self.fallback_encoder_A2B)) - testdoc = {'x': self.TypeA(123)} - expected_bytes = encode({'x': 123}) + codecopts = CodecOptions( + type_registry=TypeRegistry([self.B2BSON()], fallback_encoder=self.fallback_encoder_A2B) + ) + testdoc = {"x": self.TypeA(123)} + expected_bytes = encode({"x": 123}) - self.assertEqual(encode(testdoc, codec_options=codecopts), - expected_bytes) + self.assertEqual(encode(testdoc, codec_options=codecopts), expected_bytes) def test_encode_custom_then_fallback(self): - codecopts = CodecOptions(type_registry=TypeRegistry( - [self.B2A()], fallback_encoder=self.fallback_encoder_A2BSON)) - testdoc = {'x': self.TypeB(123)} - expected_bytes = encode({'x': 123}) + codecopts = CodecOptions( + type_registry=TypeRegistry([self.B2A()], fallback_encoder=self.fallback_encoder_A2BSON) + ) + testdoc = {"x": self.TypeB(123)} + expected_bytes = encode({"x": 123}) - self.assertEqual(encode(testdoc, codec_options=codecopts), - expected_bytes) + self.assertEqual(encode(testdoc, codec_options=codecopts), expected_bytes) def test_chaining_encoders_fails(self): - codecopts = CodecOptions(type_registry=TypeRegistry( - [self.A2B(), self.B2BSON()])) + codecopts = CodecOptions(type_registry=TypeRegistry([self.A2B(), self.B2BSON()])) with self.assertRaises(InvalidDocument): - encode({'x': self.TypeA(123)}, codec_options=codecopts) + encode({"x": self.TypeA(123)}, codec_options=codecopts) def test_infinite_loop_exceeds_max_recursion_depth(self): - codecopts = CodecOptions(type_registry=TypeRegistry( - [self.B2A()], fallback_encoder=self.fallback_encoder_A2B)) + codecopts = CodecOptions( + type_registry=TypeRegistry([self.B2A()], fallback_encoder=self.fallback_encoder_A2B) + ) # Raises max recursion depth exceeded error with self.assertRaises(RuntimeError): - encode({'x': self.TypeA(100)}, codec_options=codecopts) + encode({"x": self.TypeA(100)}, codec_options=codecopts) class TestTypeRegistry(unittest.TestCase): + types: Tuple[object, object] + codecs: Tuple[Type[TypeCodec], Type[TypeCodec]] + fallback_encoder: Any + @classmethod def setUpClass(cls): - class MyIntType(object): + class MyIntType: def __init__(self, x): assert isinstance(x, int) self.x = x - class MyStrType(object): + class MyStrType: def __init__(self, x): assert isinstance(x, str) self.x = x @@ -430,29 +508,34 @@ def fallback_encoder(value): def test_simple(self): codec_instances = [codec() for codec in self.codecs] + def assert_proper_initialization(type_registry, codec_instances): - self.assertEqual(type_registry._encoder_map, { - self.types[0]: codec_instances[0].transform_python, - self.types[1]: codec_instances[1].transform_python}) - self.assertEqual(type_registry._decoder_map, { - int: codec_instances[0].transform_bson, - str: codec_instances[1].transform_bson}) self.assertEqual( - type_registry._fallback_encoder, self.fallback_encoder) + type_registry._encoder_map, + { + self.types[0]: codec_instances[0].transform_python, + self.types[1]: codec_instances[1].transform_python, + }, + ) + self.assertEqual( + type_registry._decoder_map, + {int: codec_instances[0].transform_bson, str: codec_instances[1].transform_bson}, + ) + self.assertEqual(type_registry._fallback_encoder, self.fallback_encoder) type_registry = TypeRegistry(codec_instances, self.fallback_encoder) assert_proper_initialization(type_registry, codec_instances) type_registry = TypeRegistry( - fallback_encoder=self.fallback_encoder, type_codecs=codec_instances) + fallback_encoder=self.fallback_encoder, type_codecs=codec_instances + ) assert_proper_initialization(type_registry, codec_instances) # Ensure codec list held by the type registry doesn't change if we # mutate the initial list. codec_instances_copy = list(codec_instances) codec_instances.pop(0) - self.assertListEqual( - type_registry._TypeRegistry__type_codecs, codec_instances_copy) + self.assertListEqual(type_registry._TypeRegistry__type_codecs, codec_instances_copy) def test_simple_separate_codecs(self): class MyIntEncoder(TypeEncoder): @@ -467,77 +550,87 @@ class MyIntDecoder(TypeDecoder): def transform_bson(self, value): return self.types[0](value) - codec_instances = [MyIntDecoder(), MyIntEncoder()] + codec_instances: list = [MyIntDecoder(), MyIntEncoder()] type_registry = TypeRegistry(codec_instances) self.assertEqual( type_registry._encoder_map, - {MyIntEncoder.python_type: codec_instances[1].transform_python}) + {MyIntEncoder.python_type: codec_instances[1].transform_python}, + ) self.assertEqual( type_registry._decoder_map, - {MyIntDecoder.bson_type: codec_instances[0].transform_bson}) + {MyIntDecoder.bson_type: codec_instances[0].transform_bson}, + ) def test_initialize_fail(self): - err_msg = ("Expected an instance of TypeEncoder, TypeDecoder, " - "or TypeCodec, got .* instead") + err_msg = "Expected an instance of TypeEncoder, TypeDecoder, or TypeCodec, got .* instead" with self.assertRaisesRegex(TypeError, err_msg): - TypeRegistry(self.codecs) + TypeRegistry(self.codecs) # type: ignore[arg-type] with self.assertRaisesRegex(TypeError, err_msg): - TypeRegistry([type('AnyType', (object,), {})()]) + TypeRegistry([type("AnyType", (object,), {})()]) - err_msg = "fallback_encoder %r is not a callable" % (True,) + err_msg = f"fallback_encoder {True!r} is not a callable" with self.assertRaisesRegex(TypeError, err_msg): - TypeRegistry([], True) + TypeRegistry([], True) # type: ignore[arg-type] - err_msg = "fallback_encoder %r is not a callable" % ('hello',) + err_msg = "fallback_encoder {!r} is not a callable".format("hello") with self.assertRaisesRegex(TypeError, err_msg): - TypeRegistry(fallback_encoder='hello') + TypeRegistry(fallback_encoder="hello") # type: ignore[arg-type] def test_type_registry_repr(self): codec_instances = [codec() for codec in self.codecs] type_registry = TypeRegistry(codec_instances) - r = ("TypeRegistry(type_codecs=%r, fallback_encoder=%r)" % ( - codec_instances, None)) + r = f"TypeRegistry(type_codecs={codec_instances!r}, fallback_encoder={None!r})" self.assertEqual(r, repr(type_registry)) def test_type_registry_eq(self): codec_instances = [codec() for codec in self.codecs] - self.assertEqual( - TypeRegistry(codec_instances), TypeRegistry(codec_instances)) + self.assertEqual(TypeRegistry(codec_instances), TypeRegistry(codec_instances)) codec_instances_2 = [codec() for codec in self.codecs] - self.assertNotEqual( - TypeRegistry(codec_instances), TypeRegistry(codec_instances_2)) + self.assertNotEqual(TypeRegistry(codec_instances), TypeRegistry(codec_instances_2)) def test_builtin_types_override_fails(self): def run_test(base, attrs): - msg = (r"TypeEncoders cannot change how built-in types " - r"are encoded \(encoder .* transforms type .*\)") + msg = ( + r"TypeEncoders cannot change how built-in types " + r"are encoded \(encoder .* transforms type .*\)" + ) for pytype in _BUILT_IN_TYPES: - attrs.update({'python_type': pytype, - 'transform_python': lambda x: x}) - codec = type('testcodec', (base, ), attrs) + attrs.update({"python_type": pytype, "transform_python": lambda x: x}) + codec = type("testcodec", (base,), attrs) codec_instance = codec() with self.assertRaisesRegex(TypeError, msg): - TypeRegistry([codec_instance,]) + TypeRegistry( + [ + codec_instance, + ] + ) # Test only some subtypes as not all can be subclassed. - if pytype in [bool, type(None), RE_TYPE,]: + if pytype in [ + bool, + type(None), + RE_TYPE, + ]: continue - class MyType(pytype): + class MyType(pytype): # type: ignore pass - attrs.update({'python_type': MyType, - 'transform_python': lambda x: x}) - codec = type('testcodec', (base, ), attrs) + + attrs.update({"python_type": MyType, "transform_python": lambda x: x}) + codec = type("testcodec", (base,), attrs) codec_instance = codec() with self.assertRaisesRegex(TypeError, msg): - TypeRegistry([codec_instance,]) + TypeRegistry( + [ + codec_instance, + ] + ) run_test(TypeEncoder, {}) - run_test(TypeCodec, {'bson_type': Decimal128, - 'transform_bson': lambda x: x}) + run_test(TypeCodec, {"bson_type": Decimal128, "transform_bson": lambda x: x}) class TestCollectionWCustomType(IntegrationTest): @@ -547,179 +640,138 @@ def setUp(self): def tearDown(self): self.db.test.drop() + def test_overflow_int_w_custom_decoder(self): + type_registry = TypeRegistry(fallback_encoder=lambda val: str(val)) + codec_options = CodecOptions(type_registry=type_registry) + collection = self.db.get_collection("test", codec_options=codec_options) + + collection.insert_one({"_id": 1, "data": 2**520}) + ret = collection.find_one() + self.assertEqual(ret["data"], str(2**520)) + def test_command_errors_w_custom_type_decoder(self): db = self.db - test_doc = {'_id': 1, 'data': 'a'} - test = db.get_collection('test', - codec_options=UNINT_DECODER_CODECOPTS) + test_doc = {"_id": 1, "data": "a"} + test = db.get_collection("test", codec_options=UNINT_DECODER_CODECOPTS) result = test.insert_one(test_doc) - self.assertEqual(result.inserted_id, test_doc['_id']) + self.assertEqual(result.inserted_id, test_doc["_id"]) with self.assertRaises(DuplicateKeyError): test.insert_one(test_doc) def test_find_w_custom_type_decoder(self): db = self.db - input_docs = [ - {'x': Int64(k)} for k in [1, 2, 3]] + input_docs = [{"x": Int64(k)} for k in [1, 2, 3]] for doc in input_docs: db.test.insert_one(doc) - test = db.get_collection( - 'test', codec_options=UNINT_DECODER_CODECOPTS) + test = db.get_collection("test", codec_options=UNINT_DECODER_CODECOPTS) for doc in test.find({}, batch_size=1): - self.assertIsInstance(doc['x'], UndecipherableInt64Type) + self.assertIsInstance(doc["x"], UndecipherableInt64Type) def test_find_w_custom_type_decoder_and_document_class(self): def run_test(doc_cls): db = self.db - input_docs = [ - {'x': Int64(k)} for k in [1, 2, 3]] + input_docs = [{"x": Int64(k)} for k in [1, 2, 3]] for doc in input_docs: db.test.insert_one(doc) - test = db.get_collection('test', codec_options=CodecOptions( - type_registry=TypeRegistry([UndecipherableIntDecoder()]), - document_class=doc_cls)) + test = db.get_collection( + "test", + codec_options=CodecOptions( + type_registry=TypeRegistry([UndecipherableIntDecoder()]), document_class=doc_cls + ), + ) for doc in test.find({}, batch_size=1): self.assertIsInstance(doc, doc_cls) - self.assertIsInstance(doc['x'], UndecipherableInt64Type) + self.assertIsInstance(doc["x"], UndecipherableInt64Type) for doc_cls in [RawBSONDocument, OrderedDict]: run_test(doc_cls) - @client_context.require_version_max(4, 1, 0, -1) - def test_group_w_custom_type(self): - db = self.db - test = db.get_collection('test', codec_options=UNINT_CODECOPTS) - test.insert_many([ - {'sku': 'a', 'qty': UndecipherableInt64Type(2)}, - {'sku': 'b', 'qty': UndecipherableInt64Type(5)}, - {'sku': 'a', 'qty': UndecipherableInt64Type(1)}]) - - self.assertEqual([{'sku': 'b', 'qty': UndecipherableInt64Type(5)},], - test.group(["sku", "qty"], {"sku": "b"}, {}, - "function (obj, prev) { }")) - def test_aggregate_w_custom_type_decoder(self): db = self.db - db.test.insert_many([ - {'status': 'in progress', 'qty': Int64(1)}, - {'status': 'complete', 'qty': Int64(10)}, - {'status': 'in progress', 'qty': Int64(1)}, - {'status': 'complete', 'qty': Int64(10)}, - {'status': 'in progress', 'qty': Int64(1)},]) - test = db.get_collection( - 'test', codec_options=UNINT_DECODER_CODECOPTS) - - pipeline = [ - {'$match': {'status': 'complete'}}, - {'$group': {'_id': "$status", 'total_qty': {"$sum": "$qty"}}},] + db.test.insert_many( + [ + {"status": "in progress", "qty": Int64(1)}, + {"status": "complete", "qty": Int64(10)}, + {"status": "in progress", "qty": Int64(1)}, + {"status": "complete", "qty": Int64(10)}, + {"status": "in progress", "qty": Int64(1)}, + ] + ) + test = db.get_collection("test", codec_options=UNINT_DECODER_CODECOPTS) + + pipeline: list = [ + {"$match": {"status": "complete"}}, + {"$group": {"_id": "$status", "total_qty": {"$sum": "$qty"}}}, + ] result = test.aggregate(pipeline) res = list(result)[0] - self.assertEqual(res['_id'], 'complete') - self.assertIsInstance(res['total_qty'], UndecipherableInt64Type) - self.assertEqual(res['total_qty'].value, 20) + self.assertEqual(res["_id"], "complete") + self.assertIsInstance(res["total_qty"], UndecipherableInt64Type) + self.assertEqual(res["total_qty"].value, 20) def test_distinct_w_custom_type(self): self.db.drop_collection("test") - test = self.db.get_collection('test', codec_options=UNINT_CODECOPTS) + test = self.db.get_collection("test", codec_options=UNINT_CODECOPTS) values = [ UndecipherableInt64Type(1), UndecipherableInt64Type(2), UndecipherableInt64Type(3), - {"b": UndecipherableInt64Type(3)}] + {"b": UndecipherableInt64Type(3)}, + ] test.insert_many({"a": val} for val in values) self.assertEqual(values, test.distinct("a")) - def test_map_reduce_w_custom_type(self): - test = self.db.get_collection( - 'test', codec_options=UPPERSTR_DECODER_CODECOPTS) - - test.insert_many([ - {'_id': 1, 'sku': 'abcd', 'qty': 1}, - {'_id': 2, 'sku': 'abcd', 'qty': 2}, - {'_id': 3, 'sku': 'abcd', 'qty': 3}]) - - map = Code("function () {" - " emit(this.sku, this.qty);" - "}") - reduce = Code("function (key, values) {" - " return Array.sum(values);" - "}") - - result = test.map_reduce(map, reduce, out={'inline': 1}) - self.assertTrue(isinstance(result, dict)) - self.assertTrue('results' in result) - self.assertEqual(result['results'][0], {'_id': 'ABCD', 'value': 6}) - - result = test.inline_map_reduce(map, reduce) - self.assertTrue(isinstance(result, list)) - self.assertEqual(1, len(result)) - self.assertEqual(result[0]["_id"], 'ABCD') - - full_result = test.inline_map_reduce(map, reduce, - full_response=True) - result = full_result['results'] - self.assertTrue(isinstance(result, list)) - self.assertEqual(1, len(result)) - self.assertEqual(result[0]["_id"], 'ABCD') - def test_find_one_and__w_custom_type_decoder(self): db = self.db - c = db.get_collection('test', codec_options=UNINT_DECODER_CODECOPTS) - c.insert_one({'_id': 1, 'x': Int64(1)}) - - doc = c.find_one_and_update({'_id': 1}, {'$inc': {'x': 1}}, - return_document=ReturnDocument.AFTER) - self.assertEqual(doc['_id'], 1) - self.assertIsInstance(doc['x'], UndecipherableInt64Type) - self.assertEqual(doc['x'].value, 2) - - doc = c.find_one_and_replace({'_id': 1}, {'x': Int64(3), 'y': True}, - return_document=ReturnDocument.AFTER) - self.assertEqual(doc['_id'], 1) - self.assertIsInstance(doc['x'], UndecipherableInt64Type) - self.assertEqual(doc['x'].value, 3) - self.assertEqual(doc['y'], True) - - doc = c.find_one_and_delete({'y': True}) - self.assertEqual(doc['_id'], 1) - self.assertIsInstance(doc['x'], UndecipherableInt64Type) - self.assertEqual(doc['x'].value, 3) + c = db.get_collection("test", codec_options=UNINT_DECODER_CODECOPTS) + c.insert_one({"_id": 1, "x": Int64(1)}) + + doc = c.find_one_and_update( + {"_id": 1}, {"$inc": {"x": 1}}, return_document=ReturnDocument.AFTER + ) + self.assertEqual(doc["_id"], 1) + self.assertIsInstance(doc["x"], UndecipherableInt64Type) + self.assertEqual(doc["x"].value, 2) + + doc = c.find_one_and_replace( + {"_id": 1}, {"x": Int64(3), "y": True}, return_document=ReturnDocument.AFTER + ) + self.assertEqual(doc["_id"], 1) + self.assertIsInstance(doc["x"], UndecipherableInt64Type) + self.assertEqual(doc["x"].value, 3) + self.assertEqual(doc["y"], True) + + doc = c.find_one_and_delete({"y": True}) + self.assertEqual(doc["_id"], 1) + self.assertIsInstance(doc["x"], UndecipherableInt64Type) + self.assertEqual(doc["x"].value, 3) self.assertIsNone(c.find_one()) - @ignore_deprecations - def test_find_and_modify_w_custom_type_decoder(self): - db = self.db - c = db.get_collection('test', codec_options=UNINT_DECODER_CODECOPTS) - c.insert_one({'_id': 1, 'x': Int64(1)}) - - doc = c.find_and_modify({'_id': 1}, {'$inc': {'x': Int64(10)}}) - self.assertEqual(doc['_id'], 1) - self.assertIsInstance(doc['x'], UndecipherableInt64Type) - self.assertEqual(doc['x'].value, 1) - - doc = c.find_one() - self.assertEqual(doc['_id'], 1) - self.assertIsInstance(doc['x'], UndecipherableInt64Type) - self.assertEqual(doc['x'].value, 11) - class TestGridFileCustomType(IntegrationTest): def setUp(self): - self.db.drop_collection('fs.files') - self.db.drop_collection('fs.chunks') + self.db.drop_collection("fs.files") + self.db.drop_collection("fs.chunks") def test_grid_out_custom_opts(self): db = self.db.with_options(codec_options=UPPERSTR_DECODER_CODECOPTS) - one = GridIn(db.fs, _id=5, filename="my_file", - contentType="text/html", chunkSize=1000, aliases=["foo"], - metadata={"foo": 'red', "bar": 'blue'}, bar=3, - baz="hello") + one = GridIn( + db.fs, + _id=5, + filename="my_file", + contentType="text/html", + chunkSize=1000, + aliases=["foo"], + metadata={"foo": "red", "bar": "blue"}, + bar=3, + baz="hello", + ) one.write(b"hello world") one.close() @@ -733,25 +785,36 @@ def test_grid_out_custom_opts(self): self.assertEqual(1000, two.chunk_size) self.assertTrue(isinstance(two.upload_date, datetime.datetime)) self.assertEqual(["foo"], two.aliases) - self.assertEqual({"foo": 'red', "bar": 'blue'}, two.metadata) + self.assertEqual({"foo": "red", "bar": "blue"}, two.metadata) self.assertEqual(3, two.bar) - self.assertEqual("5eb63bbbe01eeed093cb22bb8f5acdc3", two.md5) - - for attr in ["_id", "name", "content_type", "length", "chunk_size", - "upload_date", "aliases", "metadata", "md5"]: + self.assertEqual(None, two.md5) + + for attr in [ + "_id", + "name", + "content_type", + "length", + "chunk_size", + "upload_date", + "aliases", + "metadata", + "md5", + ]: self.assertRaises(AttributeError, setattr, two, attr, 5) -class ChangeStreamsWCustomTypesTestMixin(object): +class ChangeStreamsWCustomTypesTestMixin: + @no_type_check def change_stream(self, *args, **kwargs): return self.watched_target.watch(*args, **kwargs) - def insert_and_check(self, change_stream, insert_doc, - expected_doc): + @no_type_check + def insert_and_check(self, change_stream, insert_doc, expected_doc): self.input_target.insert_one(insert_doc) change = next(change_stream) - self.assertEqual(change['fullDocument'], expected_doc) + self.assertEqual(change["fullDocument"], expected_doc) + @no_type_check def kill_change_stream_cursor(self, change_stream): # Cause a cursor not found error on the next getMore. cursor = change_stream._cursor @@ -759,19 +822,23 @@ def kill_change_stream_cursor(self, change_stream): client = self.input_target.database.client client._close_cursor_now(cursor.cursor_id, address) + @no_type_check def test_simple(self): - codecopts = CodecOptions(type_registry=TypeRegistry([ - UndecipherableIntEncoder(), UppercaseTextDecoder()])) + codecopts = CodecOptions( + type_registry=TypeRegistry([UndecipherableIntEncoder(), UppercaseTextDecoder()]) + ) self.create_targets(codec_options=codecopts) input_docs = [ - {'_id': UndecipherableInt64Type(1), 'data': 'hello'}, - {'_id': 2, 'data': 'world'}, - {'_id': UndecipherableInt64Type(3), 'data': '!'},] + {"_id": UndecipherableInt64Type(1), "data": "hello"}, + {"_id": 2, "data": "world"}, + {"_id": UndecipherableInt64Type(3), "data": "!"}, + ] expected_docs = [ - {'_id': 1, 'data': 'HELLO'}, - {'_id': 2, 'data': 'WORLD'}, - {'_id': 3, 'data': '!'},] + {"_id": 1, "data": "HELLO"}, + {"_id": 2, "data": "WORLD"}, + {"_id": 3, "data": "!"}, + ] change_stream = self.change_stream() @@ -781,46 +848,48 @@ def test_simple(self): self.kill_change_stream_cursor(change_stream) self.insert_and_check(change_stream, input_docs[2], expected_docs[2]) + @no_type_check def test_custom_type_in_pipeline(self): - codecopts = CodecOptions(type_registry=TypeRegistry([ - UndecipherableIntEncoder(), UppercaseTextDecoder()])) + codecopts = CodecOptions( + type_registry=TypeRegistry([UndecipherableIntEncoder(), UppercaseTextDecoder()]) + ) self.create_targets(codec_options=codecopts) input_docs = [ - {'_id': UndecipherableInt64Type(1), 'data': 'hello'}, - {'_id': 2, 'data': 'world'}, - {'_id': UndecipherableInt64Type(3), 'data': '!'}] - expected_docs = [ - {'_id': 2, 'data': 'WORLD'}, - {'_id': 3, 'data': '!'}] + {"_id": UndecipherableInt64Type(1), "data": "hello"}, + {"_id": 2, "data": "world"}, + {"_id": UndecipherableInt64Type(3), "data": "!"}, + ] + expected_docs = [{"_id": 2, "data": "WORLD"}, {"_id": 3, "data": "!"}] # UndecipherableInt64Type should be encoded with the TypeRegistry. change_stream = self.change_stream( - [{'$match': {'documentKey._id': { - '$gte': UndecipherableInt64Type(2)}}}]) + [{"$match": {"documentKey._id": {"$gte": UndecipherableInt64Type(2)}}}] + ) self.input_target.insert_one(input_docs[0]) self.insert_and_check(change_stream, input_docs[1], expected_docs[0]) self.kill_change_stream_cursor(change_stream) self.insert_and_check(change_stream, input_docs[2], expected_docs[1]) + @no_type_check def test_break_resume_token(self): # Get one document from a change stream to determine resumeToken type. self.create_targets() change_stream = self.change_stream() self.input_target.insert_one({"data": "test"}) change = next(change_stream) - resume_token_decoder = type_obfuscating_decoder_factory( - type(change['_id']['_data'])) + resume_token_decoder = type_obfuscating_decoder_factory(type(change["_id"]["_data"])) # Custom-decoding the resumeToken type breaks resume tokens. - codecopts = CodecOptions(type_registry=TypeRegistry([ - resume_token_decoder(), UndecipherableIntEncoder()])) + codecopts = CodecOptions( + type_registry=TypeRegistry([resume_token_decoder(), UndecipherableIntEncoder()]) + ) # Re-create targets, change stream and proceed. self.create_targets(codec_options=codecopts) - docs = [{'_id': 1}, {'_id': 2}, {'_id': 3}] + docs = [{"_id": 1}, {"_id": 2}, {"_id": 3}] change_stream = self.change_stream() self.insert_and_check(change_stream, docs[0], docs[0]) @@ -829,91 +898,88 @@ def test_break_resume_token(self): self.kill_change_stream_cursor(change_stream) self.insert_and_check(change_stream, docs[2], docs[2]) + @no_type_check def test_document_class(self): def run_test(doc_cls): - codecopts = CodecOptions(type_registry=TypeRegistry([ - UppercaseTextDecoder(), UndecipherableIntEncoder()]), - document_class=doc_cls) + codecopts = CodecOptions( + type_registry=TypeRegistry([UppercaseTextDecoder(), UndecipherableIntEncoder()]), + document_class=doc_cls, + ) self.create_targets(codec_options=codecopts) change_stream = self.change_stream() - doc = {'a': UndecipherableInt64Type(101), 'b': 'xyz'} + doc = {"a": UndecipherableInt64Type(101), "b": "xyz"} self.input_target.insert_one(doc) change = next(change_stream) self.assertIsInstance(change, doc_cls) - self.assertEqual(change['fullDocument']['a'], 101) - self.assertEqual(change['fullDocument']['b'], 'XYZ') + self.assertEqual(change["fullDocument"]["a"], 101) + self.assertEqual(change["fullDocument"]["b"], "XYZ") for doc_cls in [OrderedDict, RawBSONDocument]: run_test(doc_cls) -class TestCollectionChangeStreamsWCustomTypes( - IntegrationTest, ChangeStreamsWCustomTypesTestMixin): +class TestCollectionChangeStreamsWCustomTypes(IntegrationTest, ChangeStreamsWCustomTypesTestMixin): @classmethod - @client_context.require_version_min(3, 6, 0) - @client_context.require_no_mmap - @client_context.require_no_standalone + @client_context.require_change_streams def setUpClass(cls): - super(TestCollectionChangeStreamsWCustomTypes, cls).setUpClass() + super().setUpClass() + cls.db.test.delete_many({}) def tearDown(self): self.input_target.drop() def create_targets(self, *args, **kwargs): - self.watched_target = self.db.get_collection( - 'test', *args, **kwargs) + self.watched_target = self.db.get_collection("test", *args, **kwargs) self.input_target = self.watched_target # Ensure the collection exists and is empty. self.input_target.insert_one({}) self.input_target.delete_many({}) -class TestDatabaseChangeStreamsWCustomTypes( - IntegrationTest, ChangeStreamsWCustomTypesTestMixin): +class TestDatabaseChangeStreamsWCustomTypes(IntegrationTest, ChangeStreamsWCustomTypesTestMixin): @classmethod @client_context.require_version_min(4, 0, 0) - @client_context.require_no_mmap - @client_context.require_no_standalone + @client_context.require_change_streams def setUpClass(cls): - super(TestDatabaseChangeStreamsWCustomTypes, cls).setUpClass() + super().setUpClass() + cls.db.test.delete_many({}) def tearDown(self): self.input_target.drop() self.client.drop_database(self.watched_target) def create_targets(self, *args, **kwargs): - self.watched_target = self.client.get_database( - self.db.name, *args, **kwargs) + self.watched_target = self.client.get_database(self.db.name, *args, **kwargs) self.input_target = self.watched_target.test # Insert a record to ensure db, coll are created. - self.input_target.insert_one({'data': 'dummy'}) + self.input_target.insert_one({"data": "dummy"}) -class TestClusterChangeStreamsWCustomTypes( - IntegrationTest, ChangeStreamsWCustomTypesTestMixin): +class TestClusterChangeStreamsWCustomTypes(IntegrationTest, ChangeStreamsWCustomTypesTestMixin): @classmethod @client_context.require_version_min(4, 0, 0) - @client_context.require_no_mmap - @client_context.require_no_standalone + @client_context.require_change_streams def setUpClass(cls): - super(TestClusterChangeStreamsWCustomTypes, cls).setUpClass() + super().setUpClass() + cls.db.test.delete_many({}) def tearDown(self): self.input_target.drop() self.client.drop_database(self.db) def create_targets(self, *args, **kwargs): - codec_options = kwargs.pop('codec_options', None) + codec_options = kwargs.pop("codec_options", None) if codec_options: - kwargs['type_registry'] = codec_options.type_registry - kwargs['document_class'] = codec_options.document_class + kwargs["type_registry"] = codec_options.type_registry + kwargs["document_class"] = codec_options.document_class self.watched_target = rs_client(*args, **kwargs) + self.addCleanup(self.watched_target.close) self.input_target = self.watched_target[self.db.name].test # Insert a record to ensure db, coll are created. - self.input_target.insert_one({'data': 'dummy'}) + self.input_target.insert_one({"data": "dummy"}) if __name__ == "__main__": diff --git a/test/test_data_lake.py b/test/test_data_lake.py new file mode 100644 index 0000000000..283ef074cb --- /dev/null +++ b/test/test_data_lake.py @@ -0,0 +1,125 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test Atlas Data Lake.""" +from __future__ import annotations + +import os +import sys + +sys.path[0:0] = [""] + +from test import IntegrationTest, client_context, unittest +from test.crud_v2_format import TestCrudV2 +from test.utils import ( + OvertCommandListener, + SpecTestCreator, + rs_client_noauth, + rs_or_single_client, +) + +# Location of JSON test specifications. +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "data_lake") + + +class TestDataLakeMustConnect(unittest.TestCase): + def test_connected_to_data_lake(self): + data_lake = os.environ.get("TEST_DATA_LAKE") + if not data_lake: + self.skipTest("TEST_DATA_LAKE is not set") + + self.assertTrue( + client_context.is_data_lake and client_context.connected, + "client context must be connected to data lake when DATA_LAKE is set. Failed attempts:\n{}".format( + client_context.connection_attempt_info() + ), + ) + + +class TestDataLakeProse(IntegrationTest): + # Default test database and collection names. + TEST_DB = "test" + TEST_COLLECTION = "driverdata" + + @classmethod + @client_context.require_data_lake + def setUpClass(cls): + super().setUpClass() + + # Test killCursors + def test_1(self): + listener = OvertCommandListener() + client = rs_or_single_client(event_listeners=[listener]) + cursor = client[self.TEST_DB][self.TEST_COLLECTION].find({}, batch_size=2) + next(cursor) + + # find command assertions + find_cmd = listener.succeeded_events[-1] + self.assertEqual(find_cmd.command_name, "find") + cursor_id = find_cmd.reply["cursor"]["id"] + cursor_ns = find_cmd.reply["cursor"]["ns"] + + # killCursors command assertions + cursor.close() + started = listener.started_events[-1] + self.assertEqual(started.command_name, "killCursors") + succeeded = listener.succeeded_events[-1] + self.assertEqual(succeeded.command_name, "killCursors") + + self.assertIn(cursor_id, started.command["cursors"]) + target_ns = ".".join([started.command["$db"], started.command["killCursors"]]) + self.assertEqual(cursor_ns, target_ns) + + self.assertIn(cursor_id, succeeded.reply["cursorsKilled"]) + + # Test no auth + def test_2(self): + client = rs_client_noauth() + client.admin.command("ping") + + # Test with auth + def test_3(self): + for mechanism in ["SCRAM-SHA-1", "SCRAM-SHA-256"]: + client = rs_or_single_client(authMechanism=mechanism) + client[self.TEST_DB][self.TEST_COLLECTION].find_one() + + +class DataLakeTestSpec(TestCrudV2): + # Default test database and collection names. + TEST_DB = "test" + TEST_COLLECTION = "driverdata" + + @classmethod + @client_context.require_data_lake + def setUpClass(cls): + super().setUpClass() + + def setup_scenario(self, scenario_def): + # Spec tests MUST NOT insert data/drop collection for + # data lake testing. + pass + + +def create_test(scenario_def, test, name): + def run_scenario(self): + self.run_scenario(scenario_def, test) + + return run_scenario + + +SpecTestCreator(create_test, DataLakeTestSpec, _TEST_PATH).create_tests() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_database.py b/test/test_database.py index 76a549c95f..b141bb35fb 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -13,62 +13,52 @@ # limitations under the License. """Test the database module.""" +from __future__ import annotations -import datetime import re import sys -import warnings +from typing import Any, Iterable, List, Mapping, Union + +from pymongo.command_cursor import CommandCursor sys.path[0:0] = [""] -from bson.code import Code +from test import IntegrationTest, client_context, unittest +from test.test_custom_types import DECIMAL_CODECOPTS +from test.utils import ( + IMPOSSIBLE_WRITE_CONCERN, + OvertCommandListener, + rs_or_single_client, + wait_until, +) + from bson.codec_options import CodecOptions -from bson.int64 import Int64 -from bson.regex import Regex from bson.dbref import DBRef +from bson.int64 import Int64 from bson.objectid import ObjectId -from bson.py3compat import string_type, text_type, PY3 +from bson.regex import Regex from bson.son import SON -from pymongo import (ALL, - auth, - OFF, - SLOW_ONLY, - helpers) +from pymongo import auth, helpers from pymongo.collection import Collection from pymongo.database import Database -from pymongo.errors import (CollectionInvalid, - ConfigurationError, - ExecutionTimeout, - InvalidName, - OperationFailure, - WriteConcernError) +from pymongo.errors import ( + CollectionInvalid, + ExecutionTimeout, + InvalidName, + InvalidOperation, + OperationFailure, + WriteConcernError, +) from pymongo.mongo_client import MongoClient from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference -from pymongo.saslprep import HAVE_STRINGPREP from pymongo.write_concern import WriteConcern -from test import (client_context, - SkipTest, - unittest, - IntegrationTest) -from test.utils import (ignore_deprecations, - remove_all_users, - rs_or_single_client_noauth, - rs_or_single_client, - server_started_with_auth, - wait_until, - IMPOSSIBLE_WRITE_CONCERN, - OvertCommandListener) -from test.test_custom_types import DECIMAL_CODECOPTS - - -if PY3: - long = int class TestDatabaseNoConnect(unittest.TestCase): - """Test Database features on a client that does not connect. - """ + """Test Database features on a client that does not connect.""" + + client: MongoClient @classmethod def setUpClass(cls): @@ -79,18 +69,17 @@ def test_name(self): self.assertRaises(InvalidName, Database, self.client, "my db") self.assertRaises(InvalidName, Database, self.client, 'my"db') self.assertRaises(InvalidName, Database, self.client, "my\x00db") - self.assertRaises(InvalidName, Database, - self.client, u"my\u0000db") + self.assertRaises(InvalidName, Database, self.client, "my\u0000db") self.assertEqual("name", Database(self.client, "name").name) def test_get_collection(self): codec_options = CodecOptions(tz_aware=True) write_concern = WriteConcern(w=2, j=True) - read_concern = ReadConcern('majority') + read_concern = ReadConcern("majority") coll = self.client.pymongo_test.get_collection( - 'foo', codec_options, ReadPreference.SECONDARY, write_concern, - read_concern) - self.assertEqual('foo', coll.name) + "foo", codec_options, ReadPreference.SECONDARY, write_concern, read_concern + ) + self.assertEqual("foo", coll.name) self.assertEqual(codec_options, coll.codec_options) self.assertEqual(ReadPreference.SECONDARY, coll.read_preference) self.assertEqual(write_concern, coll.write_concern) @@ -98,7 +87,7 @@ def test_get_collection(self): def test_getattr(self): db = self.client.pymongo_test - self.assertTrue(isinstance(db['_does_not_exist'], Collection)) + self.assertTrue(isinstance(db["_does_not_exist"], Collection)) with self.assertRaises(AttributeError) as context: db._does_not_exist @@ -106,24 +95,41 @@ def test_getattr(self): # Message should be: "AttributeError: Database has no attribute # '_does_not_exist'. To access the _does_not_exist collection, # use database['_does_not_exist']". - self.assertIn("has no attribute '_does_not_exist'", - str(context.exception)) + self.assertIn("has no attribute '_does_not_exist'", str(context.exception)) def test_iteration(self): - self.assertRaises(TypeError, next, self.client.pymongo_test) + db = self.client.pymongo_test + if "PyPy" in sys.version and sys.version_info < (3, 8, 15): + msg = "'NoneType' object is not callable" + else: + msg = "'Database' object is not iterable" + # Iteration fails + with self.assertRaisesRegex(TypeError, msg): + for _ in db: # type: ignore[misc] # error: "None" not callable [misc] + break + # Index fails + with self.assertRaises(TypeError): + _ = db[0] + # next fails + with self.assertRaisesRegex(TypeError, "'Database' object is not iterable"): + _ = next(db) + # .next() fails + with self.assertRaisesRegex(TypeError, "'Database' object is not iterable"): + _ = db.next() + # Do not implement typing.Iterable. + self.assertNotIsInstance(db, Iterable) class TestDatabase(IntegrationTest): - def test_equality(self): - self.assertNotEqual(Database(self.client, "test"), - Database(self.client, "mike")) - self.assertEqual(Database(self.client, "test"), - Database(self.client, "test")) + self.assertNotEqual(Database(self.client, "test"), Database(self.client, "mike")) + self.assertEqual(Database(self.client, "test"), Database(self.client, "test")) # Explicitly test inequality - self.assertFalse(Database(self.client, "test") != - Database(self.client, "test")) + self.assertFalse(Database(self.client, "test") != Database(self.client, "test")) + + def test_hashable(self): + self.assertIn(self.client.test, {Database(self.client, "test")}) def test_get_coll(self): db = Database(self.client, "pymongo_test") @@ -133,9 +139,10 @@ def test_get_coll(self): self.assertEqual(db.test.mike, db["test.mike"]) def test_repr(self): - self.assertEqual(repr(Database(self.client, "pymongo_test")), - "Database(%r, %s)" % (self.client, - repr(u"pymongo_test"))) + self.assertEqual( + repr(Database(self.client, "pymongo_test")), + "Database({!r}, {})".format(self.client, repr("pymongo_test")), + ) def test_create_collection(self): db = Database(self.client, "pymongo_test") @@ -150,28 +157,30 @@ def test_create_collection(self): self.assertRaises(InvalidName, db.create_collection, "coll..ection") test = db.create_collection("test") - self.assertTrue(u"test" in db.list_collection_names()) - test.insert_one({"hello": u"world"}) - self.assertEqual(db.test.find_one()["hello"], "world") + self.assertTrue("test" in db.list_collection_names()) + test.insert_one({"hello": "world"}) + self.assertEqual(db.test.find_one()["hello"], "world") # type: ignore db.drop_collection("test.foo") db.create_collection("test.foo") - self.assertTrue(u"test.foo" in db.list_collection_names()) + self.assertTrue("test.foo" in db.list_collection_names()) self.assertRaises(CollectionInvalid, db.create_collection, "test.foo") - def _test_collection_names(self, meth, **no_system_kwargs): + def test_list_collection_names(self): db = Database(self.client, "pymongo_test") - db.test.insert_one({"dummy": u"object"}) - db.test.mike.insert_one({"dummy": u"object"}) + db.test.insert_one({"dummy": "object"}) + db.test.mike.insert_one({"dummy": "object"}) - colls = getattr(db, meth)() + colls = db.list_collection_names() self.assertTrue("test" in colls) self.assertTrue("test.mike" in colls) for coll in colls: self.assertTrue("$" not in coll) db.systemcoll.test.insert_one({}) - no_system_collections = getattr(db, meth)(**no_system_kwargs) + no_system_collections = db.list_collection_names( + filter={"name": {"$regex": r"^(?!system\.)"}} + ) for coll in no_system_collections: self.assertTrue(not coll.startswith("system.")) self.assertIn("systemcoll.test", no_system_collections) @@ -182,22 +191,12 @@ def _test_collection_names(self, meth, **no_system_kwargs): db["coll" + str(i)].insert_one({}) # No Error try: - getattr(db, meth)() + db.list_collection_names() finally: self.client.drop_database("many_collections") - def test_collection_names(self): - self._test_collection_names( - 'collection_names', include_system_collections=False) - - def test_list_collection_names(self): - self._test_collection_names( - 'list_collection_names', filter={ - "name": {"$regex": r"^(?!system\.)"}}) - def test_list_collection_names_filter(self): listener = OvertCommandListener() - results = listener.results client = rs_or_single_client(event_listeners=[listener]) db = client[self.db.name] db.capped.drop() @@ -205,33 +204,43 @@ def test_list_collection_names_filter(self): db.capped.insert_one({}) db.non_capped.insert_one({}) self.addCleanup(client.drop_database, db.name) - + filter: Union[None, dict] # Should not send nameOnly. - for filter in ({'options.capped': True}, - {'options.capped': True, 'name': 'capped'}): - results.clear() + for filter in ({"options.capped": True}, {"options.capped": True, "name": "capped"}): + listener.reset() names = db.list_collection_names(filter=filter) self.assertEqual(names, ["capped"]) - self.assertNotIn("nameOnly", results["started"][0].command) + self.assertNotIn("nameOnly", listener.started_events[0].command) # Should send nameOnly (except on 2.6). - for filter in (None, {}, {'name': {'$in': ['capped', 'non_capped']}}): - results.clear() + for filter in (None, {}, {"name": {"$in": ["capped", "non_capped"]}}): + listener.reset() names = db.list_collection_names(filter=filter) self.assertIn("capped", names) self.assertIn("non_capped", names) - command = results["started"][0].command - if client_context.version >= (3, 0): - self.assertIn("nameOnly", command) - self.assertTrue(command["nameOnly"]) - else: - self.assertNotIn("nameOnly", command) + command = listener.started_events[0].command + self.assertIn("nameOnly", command) + self.assertTrue(command["nameOnly"]) + + def test_check_exists(self): + listener = OvertCommandListener() + client = rs_or_single_client(event_listeners=[listener]) + self.addCleanup(client.close) + db = client[self.db.name] + db.drop_collection("unique") + db.create_collection("unique", check_exists=True) + self.assertIn("listCollections", listener.started_command_names()) + listener.reset() + db.drop_collection("unique") + db.create_collection("unique", check_exists=False) + self.assertTrue(len(listener.started_events) > 0) + self.assertNotIn("listCollections", listener.started_command_names()) def test_list_collections(self): self.client.drop_database("pymongo_test") db = Database(self.client, "pymongo_test") - db.test.insert_one({"dummy": u"object"}) - db.test.mike.insert_one({"dummy": u"object"}) + db.test.insert_one({"dummy": "object"}) + db.test.mike.insert_one({"dummy": "object"}) results = db.list_collections() colls = [result["name"] for result in results] @@ -245,7 +254,7 @@ def test_list_collections(self): self.assertTrue("$" not in coll) # Duplicate check. - coll_cnt = {} + coll_cnt: dict = {} for coll in colls: try: # Found duplicate. @@ -253,11 +262,13 @@ def test_list_collections(self): self.assertTrue(False) except KeyError: coll_cnt[coll] = 1 - coll_cnt = {} + coll_cnt: dict = {} # Checking if is there any collection which don't exists. - if (len(set(colls) - set(["test","test.mike"])) == 0 or - len(set(colls) - set(["test","test.mike","system.indexes"])) == 0): + if ( + len(set(colls) - {"test", "test.mike"}) == 0 + or len(set(colls) - {"test", "test.mike", "system.indexes"}) == 0 + ): self.assertTrue(True) else: self.assertTrue(False) @@ -271,7 +282,7 @@ def test_list_collections(self): db.drop_collection("test") db.create_collection("test", capped=True, size=4096) - results = db.list_collections(filter={'options.capped': True}) + results = db.list_collections(filter={"options.capped": True}) colls = [result["name"] for result in results] # Checking only capped collections are present @@ -294,24 +305,22 @@ def test_list_collections(self): coll_cnt = {} # Checking if is there any collection which don't exists. - if (len(set(colls) - set(["test"])) == 0 or - len(set(colls) - set(["test","system.indexes"])) == 0): + if len(set(colls) - {"test"}) == 0 or len(set(colls) - {"test", "system.indexes"}) == 0: self.assertTrue(True) else: self.assertTrue(False) self.client.drop_database("pymongo_test") - def test_collection_names_single_socket(self): - # Test that Database.collection_names only requires one socket. + def test_list_collection_names_single_socket(self): client = rs_or_single_client(maxPoolSize=1) - client.drop_database('test_collection_names_single_socket') + client.drop_database("test_collection_names_single_socket") db = client.test_collection_names_single_socket for i in range(200): db.create_collection(str(i)) db.list_collection_names() # Must not hang. - client.drop_database('test_collection_names_single_socket') + client.drop_database("test_collection_names_single_socket") def test_drop_collection(self): db = Database(self.client, "pymongo_test") @@ -319,22 +328,22 @@ def test_drop_collection(self): self.assertRaises(TypeError, db.drop_collection, 5) self.assertRaises(TypeError, db.drop_collection, None) - db.test.insert_one({"dummy": u"object"}) + db.test.insert_one({"dummy": "object"}) self.assertTrue("test" in db.list_collection_names()) db.drop_collection("test") self.assertFalse("test" in db.list_collection_names()) - db.test.insert_one({"dummy": u"object"}) + db.test.insert_one({"dummy": "object"}) self.assertTrue("test" in db.list_collection_names()) - db.drop_collection(u"test") + db.drop_collection("test") self.assertFalse("test" in db.list_collection_names()) - db.test.insert_one({"dummy": u"object"}) + db.test.insert_one({"dummy": "object"}) self.assertTrue("test" in db.list_collection_names()) db.drop_collection(db.test) self.assertFalse("test" in db.list_collection_names()) - db.test.insert_one({"dummy": u"object"}) + db.test.insert_one({"dummy": "object"}) self.assertTrue("test" in db.list_collection_names()) db.test.drop() self.assertFalse("test" in db.list_collection_names()) @@ -342,11 +351,10 @@ def test_drop_collection(self): db.drop_collection(db.test.doesnotexist) - if client_context.version.at_least(3, 3, 9) and client_context.is_rs: - db_wc = Database(self.client, 'pymongo_test', - write_concern=IMPOSSIBLE_WRITE_CONCERN) + if client_context.is_rs: + db_wc = Database(self.client, "pymongo_test", write_concern=IMPOSSIBLE_WRITE_CONCERN) with self.assertRaises(WriteConcernError): - db_wc.drop_collection('test') + db_wc.drop_collection("test") def test_validate_collection(self): db = self.client.pymongo_test @@ -354,12 +362,10 @@ def test_validate_collection(self): self.assertRaises(TypeError, db.validate_collection, 5) self.assertRaises(TypeError, db.validate_collection, None) - db.test.insert_one({"dummy": u"object"}) + db.test.insert_one({"dummy": "object"}) - self.assertRaises(OperationFailure, db.validate_collection, - "test.doesnotexist") - self.assertRaises(OperationFailure, db.validate_collection, - db.test.doesnotexist) + self.assertRaises(OperationFailure, db.validate_collection, "test.doesnotexist") + self.assertRaises(OperationFailure, db.validate_collection, db.test.doesnotexist) self.assertTrue(db.validate_collection("test")) self.assertTrue(db.validate_collection(db.test)) @@ -368,107 +374,21 @@ def test_validate_collection(self): self.assertTrue(db.validate_collection(db.test, scandata=True, full=True)) self.assertTrue(db.validate_collection(db.test, True, True)) - @client_context.require_no_mongos - def test_profiling_levels(self): - db = self.client.pymongo_test - self.assertEqual(db.profiling_level(), OFF) # default - - self.assertRaises(ValueError, db.set_profiling_level, 5.5) - self.assertRaises(ValueError, db.set_profiling_level, None) - self.assertRaises(ValueError, db.set_profiling_level, -1) - self.assertRaises(TypeError, db.set_profiling_level, SLOW_ONLY, 5.5) - self.assertRaises(TypeError, db.set_profiling_level, SLOW_ONLY, '1') - - db.set_profiling_level(SLOW_ONLY) - self.assertEqual(db.profiling_level(), SLOW_ONLY) - - db.set_profiling_level(ALL) - self.assertEqual(db.profiling_level(), ALL) - - db.set_profiling_level(OFF) - self.assertEqual(db.profiling_level(), OFF) - - db.set_profiling_level(SLOW_ONLY, 50) - self.assertEqual(50, db.command("profile", -1)['slowms']) - - db.set_profiling_level(ALL, -1) - self.assertEqual(-1, db.command("profile", -1)['slowms']) - - db.set_profiling_level(OFF, 100) # back to default - self.assertEqual(100, db.command("profile", -1)['slowms']) - - @client_context.require_no_mongos - def test_profiling_info(self): + @client_context.require_version_min(4, 3, 3) + def test_validate_collection_background(self): db = self.client.pymongo_test - - db.system.profile.drop() - db.set_profiling_level(ALL) - db.test.find_one() - db.set_profiling_level(OFF) - - info = db.profiling_info() - self.assertTrue(isinstance(info, list)) - - # Check if we're going to fail because of SERVER-4754, in which - # profiling info isn't collected if mongod was started with --auth - if server_started_with_auth(self.client): - raise SkipTest( - "We need SERVER-4754 fixed for the rest of this test to pass" - ) - - self.assertTrue(len(info) >= 1) - # These basically clue us in to server changes. - self.assertTrue(isinstance(info[0]['responseLength'], int)) - self.assertTrue(isinstance(info[0]['millis'], int)) - self.assertTrue(isinstance(info[0]['client'], string_type)) - self.assertTrue(isinstance(info[0]['user'], string_type)) - self.assertTrue(isinstance(info[0]['ns'], string_type)) - self.assertTrue(isinstance(info[0]['op'], string_type)) - self.assertTrue(isinstance(info[0]["ts"], datetime.datetime)) - - @client_context.require_no_mongos - @ignore_deprecations - def test_errors(self): - # We must call getlasterror, etc. on same socket as last operation. - db = rs_or_single_client(maxPoolSize=1).pymongo_test - db.reset_error_history() - self.assertEqual(None, db.error()) - if client_context.supports_getpreverror: - self.assertEqual(None, db.previous_error()) - - db.test.insert_one({"_id": 1}) - unacked = db.test.with_options(write_concern=WriteConcern(w=0)) - - unacked.insert_one({"_id": 1}) - self.assertTrue(db.error()) - if client_context.supports_getpreverror: - self.assertTrue(db.previous_error()) - - unacked.insert_one({"_id": 1}) - self.assertTrue(db.error()) - - if client_context.supports_getpreverror: - prev_error = db.previous_error() - self.assertEqual(prev_error["nPrev"], 1) - del prev_error["nPrev"] - prev_error.pop("lastOp", None) - error = db.error() - error.pop("lastOp", None) - # getLastError includes "connectionId" in recent - # server versions, getPrevError does not. - error.pop("connectionId", None) - self.assertEqualReply(error, prev_error) - - db.test.find_one() - self.assertEqual(None, db.error()) - if client_context.supports_getpreverror: - self.assertTrue(db.previous_error()) - self.assertEqual(db.previous_error()["nPrev"], 2) - - db.reset_error_history() - self.assertEqual(None, db.error()) - if client_context.supports_getpreverror: - self.assertEqual(None, db.previous_error()) + db.test.insert_one({"dummy": "object"}) + coll = db.test + self.assertTrue(db.validate_collection(coll, background=False)) + # The inMemory storage engine does not support background=True. + if client_context.storage_engine != "inMemory": + self.assertTrue(db.validate_collection(coll, background=True)) + self.assertTrue(db.validate_collection(coll, scandata=True, background=True)) + # The server does not support background=True with full=True. + # Assert that we actually send the background option by checking + # that this combination fails. + with self.assertRaises(OperationFailure): + db.validate_collection(coll, full=True, background=True) def test_command(self): self.maxDiff = None @@ -480,229 +400,46 @@ def test_command(self): self.assertEqualReply(second, third) # We use 'aggregate' as our example command, since it's an easy way to - # retrieve a BSON regex from a collection using a command. But until - # MongoDB 2.3.2, aggregation turned regexes into strings: SERVER-6470. - # Note: MongoDB 3.5.2 requires the 'cursor' or 'explain' option for - # aggregate. - @client_context.require_version_max(3, 5, 0) + # retrieve a BSON regex from a collection using a command. def test_command_with_regex(self): db = self.client.pymongo_test db.test.drop() - db.test.insert_one({'r': re.compile('.*')}) - db.test.insert_one({'r': Regex('.*')}) + db.test.insert_one({"r": re.compile(".*")}) + db.test.insert_one({"r": Regex(".*")}) + + result = db.command("aggregate", "test", pipeline=[], cursor={}) + for doc in result["cursor"]["firstBatch"]: + self.assertTrue(isinstance(doc["r"], Regex)) + + def test_cursor_command(self): + db = self.client.pymongo_test + db.test.drop() + + docs = [{"_id": i, "doc": i} for i in range(3)] + db.test.insert_many(docs) + + cursor = db.cursor_command("find", "test") + + self.assertIsInstance(cursor, CommandCursor) + + result_docs = list(cursor) + self.assertEqual(docs, result_docs) - result = db.command('aggregate', 'test', pipeline=[]) - for doc in result['result']: - self.assertTrue(isinstance(doc['r'], Regex)) + def test_cursor_command_invalid(self): + self.assertRaises(InvalidOperation, self.db.cursor_command, "usersInfo", "test") def test_password_digest(self): self.assertRaises(TypeError, auth._password_digest, 5) self.assertRaises(TypeError, auth._password_digest, True) self.assertRaises(TypeError, auth._password_digest, None) - self.assertTrue(isinstance(auth._password_digest("mike", "password"), - text_type)) - self.assertEqual(auth._password_digest("mike", "password"), - u"cd7e45b3b2767dc2fa9b6b548457ed00") - self.assertEqual(auth._password_digest("mike", "password"), - auth._password_digest(u"mike", u"password")) - self.assertEqual(auth._password_digest("Gustave", u"Dor\xe9"), - u"81e0e2364499209f466e75926a162d73") - - @client_context.require_auth - def test_authenticate_add_remove_user(self): - # "self.client" is logged in as root. - auth_db = self.client.pymongo_test - - def check_auth(username, password): - c = rs_or_single_client_noauth( - username=username, - password=password, - authSource="pymongo_test") - - c.pymongo_test.collection.find_one() - - # Configuration errors - self.assertRaises(ValueError, auth_db.add_user, "user", '') - self.assertRaises(TypeError, auth_db.add_user, "user", 'password', 15) - self.assertRaises(TypeError, auth_db.add_user, - "user", 'password', 'True') - self.assertRaises(ConfigurationError, auth_db.add_user, - "user", 'password', True, roles=['read']) - - with warnings.catch_warnings(): - warnings.simplefilter("error", DeprecationWarning) - self.assertRaises(DeprecationWarning, auth_db.add_user, - "user", "password") - self.assertRaises(DeprecationWarning, auth_db.add_user, - "user", "password", True) - - with ignore_deprecations(): - self.assertRaises(ConfigurationError, auth_db.add_user, - "user", "password", digestPassword=True) - - # Add / authenticate / remove - auth_db.add_user("mike", "password", roles=["read"]) - self.addCleanup(remove_all_users, auth_db) - self.assertRaises(TypeError, check_auth, 5, "password") - self.assertRaises(TypeError, check_auth, "mike", 5) - self.assertRaises(OperationFailure, - check_auth, "mike", "not a real password") - self.assertRaises(OperationFailure, check_auth, "faker", "password") - check_auth("mike", "password") - - if not client_context.version.at_least(3, 7, 2) or HAVE_STRINGPREP: - # Unicode name and password. - check_auth(u"mike", u"password") - - auth_db.remove_user("mike") - self.assertRaises( - OperationFailure, check_auth, "mike", "password") - - # Add / authenticate / change password - self.assertRaises( - OperationFailure, check_auth, "Gustave", u"Dor\xe9") - auth_db.add_user("Gustave", u"Dor\xe9", roles=["read"]) - check_auth("Gustave", u"Dor\xe9") - - # Change password. - auth_db.add_user("Gustave", "password", roles=["read"]) - self.assertRaises( - OperationFailure, check_auth, "Gustave", u"Dor\xe9") - check_auth("Gustave", u"password") - - @client_context.require_auth - @ignore_deprecations - def test_make_user_readonly(self): - # "self.client" is logged in as root. - auth_db = self.client.pymongo_test - - # Make a read-write user. - auth_db.add_user('jesse', 'pw') - self.addCleanup(remove_all_users, auth_db) - - # Check that we're read-write by default. - c = rs_or_single_client_noauth(username='jesse', - password='pw', - authSource='pymongo_test') - - c.pymongo_test.collection.insert_one({}) - - # Make the user read-only. - auth_db.add_user('jesse', 'pw', read_only=True) - - c = rs_or_single_client_noauth(username='jesse', - password='pw', - authSource='pymongo_test') - - self.assertRaises(OperationFailure, - c.pymongo_test.collection.insert_one, - {}) - - @client_context.require_auth - @ignore_deprecations - def test_default_roles(self): - # "self.client" is logged in as root. - auth_admin = self.client.admin - auth_admin.add_user('test_default_roles', 'pass') - self.addCleanup(client_context.drop_user, 'admin', 'test_default_roles') - info = auth_admin.command( - 'usersInfo', 'test_default_roles')['users'][0] - - self.assertEqual("root", info['roles'][0]['role']) - - # Read only "admin" user - auth_admin.add_user('ro-admin', 'pass', read_only=True) - self.addCleanup(client_context.drop_user, 'admin', 'ro-admin') - info = auth_admin.command('usersInfo', 'ro-admin')['users'][0] - self.assertEqual("readAnyDatabase", info['roles'][0]['role']) - - # "Non-admin" user - auth_db = self.client.pymongo_test - auth_db.add_user('user', 'pass') - self.addCleanup(remove_all_users, auth_db) - info = auth_db.command('usersInfo', 'user')['users'][0] - self.assertEqual("dbOwner", info['roles'][0]['role']) - - # Read only "Non-admin" user - auth_db.add_user('ro-user', 'pass', read_only=True) - info = auth_db.command('usersInfo', 'ro-user')['users'][0] - self.assertEqual("read", info['roles'][0]['role']) - - @client_context.require_auth - @ignore_deprecations - def test_new_user_cmds(self): - # "self.client" is logged in as root. - auth_db = self.client.pymongo_test - auth_db.add_user("amalia", "password", roles=["userAdmin"]) - self.addCleanup(client_context.drop_user, "pymongo_test", "amalia") - - db = rs_or_single_client_noauth(username="amalia", - password="password", - authSource="pymongo_test").pymongo_test - - # This tests the ability to update user attributes. - db.add_user("amalia", "new_password", - customData={"secret": "koalas"}) - - user_info = db.command("usersInfo", "amalia") - self.assertTrue(user_info["users"]) - amalia_user = user_info["users"][0] - self.assertEqual(amalia_user["user"], "amalia") - self.assertEqual(amalia_user["customData"], {"secret": "koalas"}) - - @client_context.require_auth - @ignore_deprecations - def test_authenticate_multiple(self): - # "self.client" is logged in as root. - self.client.drop_database("pymongo_test") - self.client.drop_database("pymongo_test1") - admin_db_auth = self.client.admin - users_db_auth = self.client.pymongo_test - - # Non-root client. - client = rs_or_single_client_noauth() - admin_db = client.admin - users_db = client.pymongo_test - other_db = client.pymongo_test1 - - self.assertRaises(OperationFailure, users_db.test.find_one) - - admin_db_auth.add_user( - 'ro-admin', - 'pass', - roles=["userAdmin", "readAnyDatabase"]) - - self.addCleanup(client_context.drop_user, 'admin', 'ro-admin') - users_db_auth.add_user( - 'user', 'pass', roles=["userAdmin", "readWrite"]) - self.addCleanup(remove_all_users, users_db_auth) - - # Regular user should be able to query its own db, but - # no other. - users_db.authenticate('user', 'pass') - self.assertEqual(0, users_db.test.count_documents({})) - self.assertRaises(OperationFailure, other_db.test.find_one) - - # Admin read-only user should be able to query any db, - # but not write. - admin_db.authenticate('ro-admin', 'pass') - self.assertEqual(None, other_db.test.find_one()) - self.assertRaises(OperationFailure, - other_db.test.insert_one, {}) - - # Close all sockets. - client.close() - - # We should still be able to write to the regular user's db. - self.assertTrue(users_db.test.delete_many({})) - - # And read from other dbs... - self.assertEqual(0, other_db.test.count_documents({})) - - # But still not write to other dbs. - self.assertRaises(OperationFailure, - other_db.test.insert_one, {}) + self.assertTrue(isinstance(auth._password_digest("mike", "password"), str)) + self.assertEqual( + auth._password_digest("mike", "password"), "cd7e45b3b2767dc2fa9b6b548457ed00" + ) + self.assertEqual( + auth._password_digest("Gustave", "Dor\xe9"), "81e0e2364499209f466e75926a162d73" + ) def test_id_ordering(self): # PyMongo attempts to have _id show up first @@ -713,14 +450,14 @@ def test_id_ordering(self): # with hash randomization enabled (e.g. tox). db = self.client.pymongo_test db.test.drop() - db.test.insert_one(SON([("hello", "world"), - ("_id", 5)])) + db.test.insert_one(SON([("hello", "world"), ("_id", 5)])) db = self.client.get_database( - "pymongo_test", codec_options=CodecOptions(document_class=SON)) + "pymongo_test", codec_options=CodecOptions(document_class=SON[str, Any]) + ) cursor = db.test.find() for x in cursor: - for (k, v) in x.items(): + for (k, _v) in x.items(): self.assertEqual(k, "_id") break @@ -736,10 +473,8 @@ def test_deref(self): obj = {"x": True} key = db.test.insert_one(obj).inserted_id self.assertEqual(obj, db.dereference(DBRef("test", key))) - self.assertEqual(obj, - db.dereference(DBRef("test", key, "pymongo_test"))) - self.assertRaises(ValueError, - db.dereference, DBRef("test", key, "foo")) + self.assertEqual(obj, db.dereference(DBRef("test", key, "pymongo_test"))) + self.assertRaises(ValueError, db.dereference, DBRef("test", key, "foo")) self.assertEqual(None, db.dereference(DBRef("test", 4))) obj = {"_id": 4} @@ -752,57 +487,30 @@ def test_deref_kwargs(self): db.test.insert_one({"_id": 4, "foo": "bar"}) db = self.client.get_database( - "pymongo_test", codec_options=CodecOptions(document_class=SON)) - self.assertEqual(SON([("foo", "bar")]), - db.dereference(DBRef("test", 4), - projection={"_id": False})) - - @client_context.require_no_auth - @client_context.require_version_max(4, 1, 0) - def test_eval(self): - db = self.client.pymongo_test - db.test.drop() - - with ignore_deprecations(): - self.assertRaises(TypeError, db.eval, None) - self.assertRaises(TypeError, db.eval, 5) - self.assertRaises(TypeError, db.eval, []) - - self.assertEqual(3, db.eval("function (x) {return x;}", 3)) - self.assertEqual(3, db.eval(u"function (x) {return x;}", 3)) - - self.assertEqual(None, - db.eval("function (x) {db.test.save({y:x});}", 5)) - self.assertEqual(db.test.find_one()["y"], 5) - - self.assertEqual(5, db.eval("function (x, y) {return x + y;}", 2, 3)) - self.assertEqual(5, db.eval("function () {return 5;}")) - self.assertEqual(5, db.eval("2 + 3;")) - - self.assertEqual(5, db.eval(Code("2 + 3;"))) - self.assertRaises(OperationFailure, db.eval, Code("return i;")) - self.assertEqual(2, db.eval(Code("return i;", {"i": 2}))) - self.assertEqual(5, db.eval(Code("i + 3;", {"i": 2}))) - - self.assertRaises(OperationFailure, db.eval, "5 ++ 5;") + "pymongo_test", codec_options=CodecOptions(document_class=SON[str, Any]) + ) + self.assertEqual( + SON([("foo", "bar")]), db.dereference(DBRef("test", 4), projection={"_id": False}) + ) # TODO some of these tests belong in the collection level testing. def test_insert_find_one(self): db = self.client.pymongo_test db.test.drop() - a_doc = SON({"hello": u"world"}) + a_doc = SON({"hello": "world"}) a_key = db.test.insert_one(a_doc).inserted_id self.assertTrue(isinstance(a_doc["_id"], ObjectId)) self.assertEqual(a_doc["_id"], a_key) self.assertEqual(a_doc, db.test.find_one({"_id": a_doc["_id"]})) self.assertEqual(a_doc, db.test.find_one(a_key)) self.assertEqual(None, db.test.find_one(ObjectId())) - self.assertEqual(a_doc, db.test.find_one({"hello": u"world"})) - self.assertEqual(None, db.test.find_one({"hello": u"test"})) + self.assertEqual(a_doc, db.test.find_one({"hello": "world"})) + self.assertEqual(None, db.test.find_one({"hello": "test"})) b = db.test.find_one() - b["hello"] = u"mike" + assert b is not None + b["hello"] = "mike" db.test.replace_one({"_id": b["_id"]}, b) self.assertNotEqual(a_doc, db.test.find_one(a_key)) @@ -817,13 +525,13 @@ def test_insert_find_one(self): def test_long(self): db = self.client.pymongo_test db.test.drop() - db.test.insert_one({"x": long(9223372036854775807)}) - retrieved = db.test.find_one()['x'] + db.test.insert_one({"x": 9223372036854775807}) + retrieved = db.test.find_one()["x"] # type: ignore self.assertEqual(Int64(9223372036854775807), retrieved) self.assertIsInstance(retrieved, Int64) db.test.delete_many({}) db.test.insert_one({"x": Int64(1)}) - retrieved = db.test.find_one()['x'] + retrieved = db.test.find_one()["x"] # type: ignore self.assertEqual(Int64(1), retrieved) self.assertIsInstance(retrieved, Int64) @@ -845,8 +553,8 @@ def test_delete(self): length += 1 self.assertEqual(length, 2) - db.test.delete_one(db.test.find_one()) - db.test.delete_one(db.test.find_one()) + db.test.delete_one(db.test.find_one()) # type: ignore[arg-type] + db.test.delete_one(db.test.find_one()) # type: ignore[arg-type] self.assertEqual(db.test.find_one(), None) db.test.insert_one({"x": 1}) @@ -861,122 +569,74 @@ def test_delete(self): db.test.delete_many({}) self.assertFalse(db.test.find_one()) - @client_context.require_no_auth - @client_context.require_version_max(4, 1, 0) - def test_system_js(self): - db = self.client.pymongo_test - db.system.js.delete_many({}) - - self.assertEqual(0, db.system.js.count_documents({})) - db.system_js.add = "function(a, b) { return a + b; }" - self.assertEqual('add', db.system.js.find_one()['_id']) - self.assertEqual(1, db.system.js.count_documents({})) - self.assertEqual(6, db.system_js.add(1, 5)) - del db.system_js.add - self.assertEqual(0, db.system.js.count_documents({})) - - db.system_js['add'] = "function(a, b) { return a + b; }" - self.assertEqual('add', db.system.js.find_one()['_id']) - self.assertEqual(1, db.system.js.count_documents({})) - self.assertEqual(6, db.system_js['add'](1, 5)) - del db.system_js['add'] - self.assertEqual(0, db.system.js.count_documents({})) - self.assertRaises(OperationFailure, db.system_js.add, 1, 5) - - # TODO right now CodeWScope doesn't work w/ system js - # db.system_js.scope = Code("return hello;", {"hello": 8}) - # self.assertEqual(8, db.system_js.scope()) - - self.assertRaises(OperationFailure, db.system_js.non_existant) - - def test_system_js_list(self): - db = self.client.pymongo_test - db.system.js.delete_many({}) - self.assertEqual([], db.system_js.list()) - - db.system_js.foo = "function() { return 'blah'; }" - self.assertEqual(["foo"], db.system_js.list()) - - db.system_js.bar = "function() { return 'baz'; }" - self.assertEqual(set(["foo", "bar"]), set(db.system_js.list())) - - del db.system_js.foo - self.assertEqual(["bar"], db.system_js.list()) - def test_command_response_without_ok(self): # Sometimes (SERVER-10891) the server's response to a badly-formatted # command document will have no 'ok' field. We should raise # OperationFailure instead of KeyError. - self.assertRaises(OperationFailure, - helpers._check_command_response, {}) + self.assertRaises(OperationFailure, helpers._check_command_response, {}, None) try: - helpers._check_command_response({'$err': 'foo'}) + helpers._check_command_response({"$err": "foo"}, None) except OperationFailure as e: - self.assertEqual(e.args[0], 'foo') + self.assertEqual(e.args[0], "foo, full error: {'$err': 'foo'}") else: self.fail("_check_command_response didn't raise OperationFailure") def test_mongos_response(self): error_document = { - 'ok': 0, - 'errmsg': 'outer', - 'raw': {'shard0/host0,host1': {'ok': 0, 'errmsg': 'inner'}}} + "ok": 0, + "errmsg": "outer", + "raw": {"shard0/host0,host1": {"ok": 0, "errmsg": "inner"}}, + } with self.assertRaises(OperationFailure) as context: - helpers._check_command_response(error_document) + helpers._check_command_response(error_document, None) - self.assertEqual('inner', str(context.exception)) + self.assertIn("inner", str(context.exception)) # If a shard has no primary and you run a command like dbstats, which # cannot be run on a secondary, mongos's response includes empty "raw" # errors. See SERVER-15428. - error_document = { - 'ok': 0, - 'errmsg': 'outer', - 'raw': {'shard0/host0,host1': {}}} + error_document = {"ok": 0, "errmsg": "outer", "raw": {"shard0/host0,host1": {}}} with self.assertRaises(OperationFailure) as context: - helpers._check_command_response(error_document) + helpers._check_command_response(error_document, None) - self.assertEqual('outer', str(context.exception)) + self.assertIn("outer", str(context.exception)) # Raw error has ok: 0 but no errmsg. Not a known case, but test it. - error_document = { - 'ok': 0, - 'errmsg': 'outer', - 'raw': {'shard0/host0,host1': {'ok': 0}}} + error_document = {"ok": 0, "errmsg": "outer", "raw": {"shard0/host0,host1": {"ok": 0}}} with self.assertRaises(OperationFailure) as context: - helpers._check_command_response(error_document) + helpers._check_command_response(error_document, None) - self.assertEqual('outer', str(context.exception)) + self.assertIn("outer", str(context.exception)) @client_context.require_test_commands @client_context.require_no_mongos def test_command_max_time_ms(self): - self.client.admin.command("configureFailPoint", - "maxTimeAlwaysTimeOut", - mode="alwaysOn") + self.client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="alwaysOn") try: db = self.client.pymongo_test - db.command('count', 'test') - self.assertRaises(ExecutionTimeout, db.command, - 'count', 'test', maxTimeMS=1) - pipeline = [{'$project': {'name': 1, 'count': 1}}] + db.command("count", "test") + self.assertRaises(ExecutionTimeout, db.command, "count", "test", maxTimeMS=1) + pipeline = [{"$project": {"name": 1, "count": 1}}] # Database command helper. - db.command('aggregate', 'test', pipeline=pipeline, cursor={}) - self.assertRaises(ExecutionTimeout, db.command, - 'aggregate', 'test', - pipeline=pipeline, cursor={}, maxTimeMS=1) + db.command("aggregate", "test", pipeline=pipeline, cursor={}) + self.assertRaises( + ExecutionTimeout, + db.command, + "aggregate", + "test", + pipeline=pipeline, + cursor={}, + maxTimeMS=1, + ) # Collection helper. db.test.aggregate(pipeline=pipeline) - self.assertRaises(ExecutionTimeout, - db.test.aggregate, pipeline, maxTimeMS=1) + self.assertRaises(ExecutionTimeout, db.test.aggregate, pipeline, maxTimeMS=1) finally: - self.client.admin.command("configureFailPoint", - "maxTimeAlwaysTimeOut", - mode="off") + self.client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="off") def test_with_options(self): codec_options = DECIMAL_CODECOPTS @@ -985,13 +645,22 @@ def test_with_options(self): read_concern = ReadConcern(level="majority") # List of all options to compare. - allopts = ['name', 'client', 'codec_options', - 'read_preference', 'write_concern', 'read_concern'] + allopts = [ + "name", + "client", + "codec_options", + "read_preference", + "write_concern", + "read_concern", + ] db1 = self.client.get_database( - 'with_options_test', codec_options=codec_options, - read_preference=read_preference, write_concern=write_concern, - read_concern=read_concern) + "with_options_test", + codec_options=codec_options, + read_preference=read_preference, + write_concern=write_concern, + read_concern=read_concern, + ) # Case 1: swap no options db2 = db1.with_options() @@ -999,44 +668,37 @@ def test_with_options(self): self.assertEqual(getattr(db1, opt), getattr(db2, opt)) # Case 2: swap all options - newopts = {'codec_options': CodecOptions(), - 'read_preference': ReadPreference.PRIMARY, - 'write_concern': WriteConcern(w=1), - 'read_concern': ReadConcern(level="local")} - db2 = db1.with_options(**newopts) + newopts = { + "codec_options": CodecOptions(), + "read_preference": ReadPreference.PRIMARY, + "write_concern": WriteConcern(w=1), + "read_concern": ReadConcern(level="local"), + } + db2 = db1.with_options(**newopts) # type: ignore[arg-type] for opt in newopts: - self.assertEqual( - getattr(db2, opt), newopts.get(opt, getattr(db1, opt))) - - def test_current_op_codec_options(self): - class MySON(SON): - pass - opts = CodecOptions(document_class=MySON) - db = self.client.get_database("pymongo_test", codec_options=opts) - current_op = db.current_op(True) - self.assertTrue(current_op['inprog']) - self.assertIsInstance(current_op, MySON) + self.assertEqual(getattr(db2, opt), newopts.get(opt, getattr(db1, opt))) class TestDatabaseAggregation(IntegrationTest): def setUp(self): - self.pipeline = [{"$listLocalSessions": {}}, - {"$limit": 1}, - {"$addFields": {"dummy": "dummy field"}}, - {"$project": {"_id": 0, "dummy": 1}}] + self.pipeline: List[Mapping[str, Any]] = [ + {"$listLocalSessions": {}}, + {"$limit": 1}, + {"$addFields": {"dummy": "dummy field"}}, + {"$project": {"_id": 0, "dummy": 1}}, + ] self.result = {"dummy": "dummy field"} self.admin = self.client.admin - @client_context.require_version_min(3, 6, 0) def test_database_aggregation(self): with self.admin.aggregate(self.pipeline) as cursor: result = next(cursor) self.assertEqual(result, self.result) - @client_context.require_version_min(3, 6, 0) @client_context.require_no_mongos def test_database_aggregation_fake_cursor(self): coll_name = "test_output" + write_stage: dict if client_context.version < (4, 3): db_name = "admin" write_stage = {"$out": coll_name} @@ -1044,8 +706,7 @@ def test_database_aggregation_fake_cursor(self): # SERVER-43287 disallows writing with $out to the admin db, use # $merge instead. db_name = "pymongo_test" - write_stage = { - "$merge": {"into": {"db": db_name, "coll": coll_name}}} + write_stage = {"$merge": {"into": {"db": db_name, "coll": coll_name}}} output_coll = self.client[db_name][coll_name] output_coll.drop() self.addCleanup(output_coll.drop) @@ -1060,12 +721,9 @@ def test_database_aggregation_fake_cursor(self): result = wait_until(output_coll.find_one, "read unacknowledged write") self.assertEqual(result["dummy"], self.result["dummy"]) - @client_context.require_version_max(3, 6, 0, -1) - def test_database_aggregation_unsupported(self): - err_msg = r"Database.aggregate\(\) is only supported on MongoDB 3.6\+." - with self.assertRaisesRegex(ConfigurationError, err_msg): - with self.admin.aggregate(self.pipeline) as _: - pass + def test_bool(self): + with self.assertRaises(NotImplementedError): + bool(Database(self.client, "test")) if __name__ == "__main__": diff --git a/test/test_dbref.py b/test/test_dbref.py index 4996a2f00e..d170f43f56 100644 --- a/test/test_dbref.py +++ b/test/test_dbref.py @@ -13,16 +13,20 @@ # limitations under the License. """Tests for the dbref module.""" +from __future__ import annotations import pickle import sys +from typing import Any + sys.path[0:0] = [""] -from bson.dbref import DBRef -from bson.objectid import ObjectId +from copy import deepcopy from test import unittest -from copy import deepcopy +from bson import decode, encode +from bson.dbref import DBRef +from bson.objectid import ObjectId class TestDBRef(unittest.TestCase): @@ -36,18 +40,17 @@ def test_creation(self): self.assertRaises(TypeError, DBRef, None, a) self.assertRaises(TypeError, DBRef, "coll", a, 5) self.assertTrue(DBRef("coll", a)) - self.assertTrue(DBRef(u"coll", a)) - self.assertTrue(DBRef(u"coll", 5)) - self.assertTrue(DBRef(u"coll", 5, "database")) + self.assertTrue(DBRef("coll", 5)) + self.assertTrue(DBRef("coll", 5, "database")) def test_read_only(self): a = DBRef("coll", ObjectId()) def foo(): - a.collection = "blah" + a.collection = "blah" # type: ignore[misc] def bar(): - a.id = "aoeu" + a.id = "aoeu" # type: ignore[misc] self.assertEqual("coll", a.collection) a.id @@ -56,55 +59,45 @@ def bar(): self.assertRaises(AttributeError, bar) def test_repr(self): - self.assertEqual(repr(DBRef("coll", - ObjectId("1234567890abcdef12345678"))), - "DBRef('coll', ObjectId('1234567890abcdef12345678'))") - self.assertEqual(repr(DBRef(u"coll", - ObjectId("1234567890abcdef12345678"))), - "DBRef(%s, ObjectId('1234567890abcdef12345678'))" - % (repr(u'coll'),) - ) - self.assertEqual(repr(DBRef("coll", 5, foo="bar")), - "DBRef('coll', 5, foo='bar')") - self.assertEqual(repr(DBRef("coll", - ObjectId("1234567890abcdef12345678"), "foo")), - "DBRef('coll', ObjectId('1234567890abcdef12345678'), " - "'foo')") + self.assertEqual( + repr(DBRef("coll", ObjectId("1234567890abcdef12345678"))), + "DBRef('coll', ObjectId('1234567890abcdef12345678'))", + ) + self.assertEqual( + repr(DBRef("coll", ObjectId("1234567890abcdef12345678"))), + "DBRef({}, ObjectId('1234567890abcdef12345678'))".format(repr("coll")), + ) + self.assertEqual(repr(DBRef("coll", 5, foo="bar")), "DBRef('coll', 5, foo='bar')") + self.assertEqual( + repr(DBRef("coll", ObjectId("1234567890abcdef12345678"), "foo")), + "DBRef('coll', ObjectId('1234567890abcdef12345678'), 'foo')", + ) def test_equality(self): obj_id = ObjectId("1234567890abcdef12345678") - self.assertEqual(DBRef('foo', 5), DBRef('foo', 5)) - self.assertEqual(DBRef("coll", obj_id), DBRef(u"coll", obj_id)) - self.assertNotEqual(DBRef("coll", obj_id), - DBRef(u"coll", obj_id, "foo")) + self.assertEqual(DBRef("foo", 5), DBRef("foo", 5)) + self.assertEqual(DBRef("coll", obj_id), DBRef("coll", obj_id)) + self.assertNotEqual(DBRef("coll", obj_id), DBRef("coll", obj_id, "foo")) self.assertNotEqual(DBRef("coll", obj_id), DBRef("col", obj_id)) - self.assertNotEqual(DBRef("coll", obj_id), - DBRef("coll", ObjectId(b"123456789011"))) + self.assertNotEqual(DBRef("coll", obj_id), DBRef("coll", ObjectId(b"123456789011"))) self.assertNotEqual(DBRef("coll", obj_id), 4) - self.assertEqual(DBRef("coll", obj_id, "foo"), - DBRef(u"coll", obj_id, "foo")) - self.assertNotEqual(DBRef("coll", obj_id, "foo"), - DBRef(u"coll", obj_id, "bar")) + self.assertNotEqual(DBRef("coll", obj_id, "foo"), DBRef("coll", obj_id, "bar")) # Explicitly test inequality - self.assertFalse(DBRef('foo', 5) != DBRef('foo', 5)) - self.assertFalse(DBRef("coll", obj_id) != DBRef(u"coll", obj_id)) - self.assertFalse(DBRef("coll", obj_id, "foo") != - DBRef(u"coll", obj_id, "foo")) + self.assertFalse(DBRef("foo", 5) != DBRef("foo", 5)) + self.assertFalse(DBRef("coll", obj_id) != DBRef("coll", obj_id)) + self.assertFalse(DBRef("coll", obj_id, "foo") != DBRef("coll", obj_id, "foo")) def test_kwargs(self): - self.assertEqual(DBRef("coll", 5, foo="bar"), - DBRef("coll", 5, foo="bar")) + self.assertEqual(DBRef("coll", 5, foo="bar"), DBRef("coll", 5, foo="bar")) self.assertNotEqual(DBRef("coll", 5, foo="bar"), DBRef("coll", 5)) - self.assertNotEqual(DBRef("coll", 5, foo="bar"), - DBRef("coll", 5, foo="baz")) + self.assertNotEqual(DBRef("coll", 5, foo="bar"), DBRef("coll", 5, foo="baz")) self.assertEqual("bar", DBRef("coll", 5, foo="bar").foo) - self.assertRaises(AttributeError, getattr, - DBRef("coll", 5, foo="bar"), "bar") + self.assertRaises(AttributeError, getattr, DBRef("coll", 5, foo="bar"), "bar") def test_deepcopy(self): - a = DBRef('coll', 'asdf', 'db', x=[1]) + a = DBRef("coll", "asdf", "db", x=[1]) b = deepcopy(a) self.assertEqual(a, b) @@ -117,22 +110,127 @@ def test_deepcopy(self): self.assertEqual(b.x, [2]) def test_pickling(self): - dbr = DBRef('coll', 5, foo='bar') + dbr = DBRef("coll", 5, foo="bar") for protocol in [0, 1, 2, -1]: pkl = pickle.dumps(dbr, protocol=protocol) dbr2 = pickle.loads(pkl) self.assertEqual(dbr, dbr2) def test_dbref_hash(self): - dbref_1a = DBRef('collection', 'id', 'database') - dbref_1b = DBRef('collection', 'id', 'database') + dbref_1a = DBRef("collection", "id", "database") + dbref_1b = DBRef("collection", "id", "database") self.assertEqual(hash(dbref_1a), hash(dbref_1b)) - dbref_2a = DBRef('collection', 'id', 'database', custom='custom') - dbref_2b = DBRef('collection', 'id', 'database', custom='custom') + dbref_2a = DBRef("collection", "id", "database", custom="custom") + dbref_2b = DBRef("collection", "id", "database", custom="custom") self.assertEqual(hash(dbref_2a), hash(dbref_2b)) self.assertNotEqual(hash(dbref_1a), hash(dbref_2a)) + +# https://github.com/mongodb/specifications/blob/master/source/dbref.rst#test-plan +class TestDBRefSpec(unittest.TestCase): + def test_decoding_1_2_3(self): + doc: Any + for doc in [ + # 1, Valid documents MUST be decoded to a DBRef: + {"$ref": "coll0", "$id": ObjectId("60a6fe9a54f4180c86309efa")}, + {"$ref": "coll0", "$id": 1}, + {"$ref": "coll0", "$id": None}, + {"$ref": "coll0", "$id": 1, "$db": "db0"}, + # 2, Valid documents with extra fields: + {"$ref": "coll0", "$id": 1, "$db": "db0", "foo": "bar"}, + {"$ref": "coll0", "$id": 1, "foo": True, "bar": False}, + {"$ref": "coll0", "$id": 1, "meta": {"foo": 1, "bar": 2}}, + {"$ref": "coll0", "$id": 1, "$foo": "bar"}, + {"$ref": "coll0", "$id": 1, "foo.bar": 0}, + # 3, Valid documents with out of order fields: + {"$id": 1, "$ref": "coll0"}, + {"$db": "db0", "$ref": "coll0", "$id": 1}, + {"foo": 1, "$id": 1, "$ref": "coll0"}, + {"foo": 1, "$ref": "coll0", "$id": 1, "$db": "db0"}, + {"foo": 1, "$ref": "coll0", "$id": 1, "$db": "db0", "bar": 1}, + ]: + with self.subTest(doc=doc): + decoded = decode(encode({"dbref": doc})) + dbref = decoded["dbref"] + self.assertIsInstance(dbref, DBRef) + self.assertEqual(dbref.collection, doc["$ref"]) + self.assertEqual(dbref.id, doc["$id"]) + self.assertEqual(dbref.database, doc.get("$db")) + for extra in set(doc.keys()) - {"$ref", "$id", "$db"}: + self.assertEqual(getattr(dbref, extra), doc[extra]) + + def test_decoding_4_5(self): + for doc in [ + # 4, Documents missing required fields MUST NOT be decoded to a + # DBRef: + {"$ref": "coll0"}, + {"$id": ObjectId("60a6fe9a54f4180c86309efa")}, + {"$db": "db0"}, + # 5, Documents with invalid types for $ref or $db MUST NOT be + # decoded to a DBRef + {"$ref": True, "$id": 1}, + {"$ref": "coll0", "$id": 1, "$db": 1}, + ]: + with self.subTest(doc=doc): + decoded = decode(encode({"dbref": doc})) + dbref = decoded["dbref"] + self.assertIsInstance(dbref, dict) + + def test_encoding_1_2(self): + doc: Any + for doc in [ + # 1, Encoding DBRefs with basic fields: + {"$ref": "coll0", "$id": ObjectId("60a6fe9a54f4180c86309efa")}, + {"$ref": "coll0", "$id": 1}, + {"$ref": "coll0", "$id": None}, + {"$ref": "coll0", "$id": 1, "$db": "db0"}, + # 2, Encoding DBRefs with extra, optional fields: + {"$ref": "coll0", "$id": 1, "$db": "db0", "foo": "bar"}, + {"$ref": "coll0", "$id": 1, "foo": True, "bar": False}, + {"$ref": "coll0", "$id": 1, "meta": {"foo": 1, "bar": 2}}, + {"$ref": "coll0", "$id": 1, "$foo": "bar"}, + {"$ref": "coll0", "$id": 1, "foo.bar": 0}, + ]: + with self.subTest(doc=doc): + # Decode the test input to a DBRef via a BSON roundtrip. + encoded_doc = encode({"dbref": doc}) + decoded = decode(encoded_doc) + dbref = decoded["dbref"] + self.assertIsInstance(dbref, DBRef) + # Encode the DBRef. + encoded_dbref = encode(decoded) + self.assertEqual(encoded_dbref, encoded_doc) + # Ensure extra fields are present. + for extra in set(doc.keys()) - {"$ref", "$id", "$db"}: + self.assertEqual(getattr(dbref, extra), doc[extra]) + + def test_encoding_3(self): + for doc in [ + # 3, Encoding DBRefs re-orders any out of order fields during + # decoding: + {"$id": 1, "$ref": "coll0"}, + {"$db": "db0", "$ref": "coll0", "$id": 1}, + {"foo": 1, "$id": 1, "$ref": "coll0"}, + {"foo": 1, "$ref": "coll0", "$id": 1, "$db": "db0"}, + {"foo": 1, "$ref": "coll0", "$id": 1, "$db": "db0", "bar": 1}, + ]: + with self.subTest(doc=doc): + # Decode the test input to a DBRef via a BSON roundtrip. + encoded_doc = encode({"dbref": doc}) + decoded = decode(encoded_doc) + dbref = decoded["dbref"] + self.assertIsInstance(dbref, DBRef) + # Encode the DBRef. + encoded_dbref = encode(decoded) + # BSON does not match because DBRef fields are reordered. + self.assertNotEqual(encoded_dbref, encoded_doc) + self.assertEqual(decode(encoded_dbref), decode(encoded_doc)) + # Ensure extra fields are present. + for extra in set(doc.keys()) - {"$ref", "$id", "$db"}: + self.assertEqual(getattr(dbref, extra), doc[extra]) + + if __name__ == "__main__": unittest.main() diff --git a/test/test_decimal128.py b/test/test_decimal128.py index e72c1273e4..46819dd587 100644 --- a/test/test_decimal128.py +++ b/test/test_decimal128.py @@ -13,54 +13,43 @@ # limitations under the License. """Tests for Decimal128.""" +from __future__ import annotations -import codecs -import glob -import json -import os.path import pickle import sys - -from binascii import unhexlify -from decimal import Decimal, DecimalException +from decimal import Decimal sys.path[0:0] = [""] -from bson import BSON -from bson.decimal128 import Decimal128, create_decimal128_context -from bson.json_util import dumps, loads -from bson.py3compat import b from test import client_context, unittest -class TestDecimal128(unittest.TestCase): +from bson.decimal128 import Decimal128, create_decimal128_context - def test_round_trip(self): - if not client_context.version.at_least(3, 3, 6): - raise unittest.SkipTest( - 'Round trip test requires MongoDB >= 3.3.6') +class TestDecimal128(unittest.TestCase): + @client_context.require_connection + def test_round_trip(self): coll = client_context.client.pymongo_test.test coll.drop() - dec128 = Decimal128.from_bid( - b'\x00@cR\xbf\xc6\x01\x00\x00\x00\x00\x00\x00\x00\x1c0') - coll.insert_one({'dec128': dec128}) - doc = coll.find_one({'dec128': dec128}) + dec128 = Decimal128.from_bid(b"\x00@cR\xbf\xc6\x01\x00\x00\x00\x00\x00\x00\x00\x1c0") + coll.insert_one({"dec128": dec128}) + doc = coll.find_one({"dec128": dec128}) + assert doc is not None self.assertIsNotNone(doc) - self.assertEqual(doc['dec128'], dec128) + self.assertEqual(doc["dec128"], dec128) def test_pickle(self): - dec128 = Decimal128.from_bid( - b'\x00@cR\xbf\xc6\x01\x00\x00\x00\x00\x00\x00\x00\x1c0') + dec128 = Decimal128.from_bid(b"\x00@cR\xbf\xc6\x01\x00\x00\x00\x00\x00\x00\x00\x1c0") for protocol in range(pickle.HIGHEST_PROTOCOL + 1): pkl = pickle.dumps(dec128, protocol=protocol) self.assertEqual(dec128, pickle.loads(pkl)) def test_special(self): - dnan = Decimal('NaN') - dnnan = Decimal('-NaN') - dsnan = Decimal('sNaN') - dnsnan = Decimal('-sNaN') + dnan = Decimal("NaN") + dnnan = Decimal("-NaN") + dsnan = Decimal("sNaN") + dnsnan = Decimal("-sNaN") dnan128 = Decimal128(dnan) dnnan128 = Decimal128(dnnan) dsnan128 = Decimal128(dsnan) @@ -80,5 +69,5 @@ def test_decimal128_context(self): self.assertEqual("0E-6176", str(ctx.copy().create_decimal("1E-6177"))) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/test_default_exports.py b/test/test_default_exports.py new file mode 100644 index 0000000000..4b02e0e318 --- /dev/null +++ b/test/test_default_exports.py @@ -0,0 +1,72 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the default exports of the top level packages.""" +from __future__ import annotations + +import inspect +import unittest + +import bson +import gridfs +import pymongo + +BSON_IGNORE = [] +GRIDFS_IGNORE = [ + "ASCENDING", + "DESCENDING", + "ClientSession", + "Collection", + "ObjectId", + "validate_string", + "Database", + "ConfigurationError", + "WriteConcern", +] +PYMONGO_IGNORE = [] +GLOBAL_INGORE = ["TYPE_CHECKING", "annotations"] + + +class TestDefaultExports(unittest.TestCase): + def check_module(self, mod, ignores): + names = dir(mod) + names.remove("__all__") + for name in mod.__all__: + if name not in names and name not in ignores: + self.fail(f"{name} was included in {mod}.__all__ but is not a valid symbol") + + for name in names: + if name not in mod.__all__ and name not in ignores: + if name in GLOBAL_INGORE: + continue + value = getattr(mod, name) + if inspect.ismodule(value): + continue + if getattr(value, "__module__", None) == "typing": + continue + if not name.startswith("_"): + self.fail(f"{name} was not included in {mod}.__all__") + + def test_pymongo(self): + self.check_module(pymongo, PYMONGO_IGNORE) + + def test_gridfs(self): + self.check_module(gridfs, GRIDFS_IGNORE) + + def test_bson(self): + self.check_module(bson, BSON_IGNORE) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index 05bef0de29..7053f20e1b 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -13,6 +13,7 @@ # limitations under the License. """Test the topology module.""" +from __future__ import annotations import os import sys @@ -20,68 +21,107 @@ sys.path[0:0] = [""] -from bson import json_util, Timestamp -from pymongo import common -from pymongo.errors import ConfigurationError -from pymongo.topology import Topology -from pymongo.topology_description import TOPOLOGY_TYPE -from pymongo.ismaster import IsMaster -from pymongo.server_description import ServerDescription, SERVER_TYPE +from test import IntegrationTest, unittest +from test.pymongo_mocks import DummyMonitor +from test.unified_format import generate_test_classes +from test.utils import ( + CMAPListener, + HeartbeatEventListener, + assertion_context, + client_context, + get_pool, + rs_or_single_client, + server_name_to_type, + single_client, + wait_until, +) +from unittest.mock import patch + +from bson import Timestamp, json_util +from pymongo import common, monitoring +from pymongo.errors import ( + AutoReconnect, + ConfigurationError, + NetworkTimeout, + NotPrimaryError, + OperationFailure, +) +from pymongo.hello import Hello, HelloCompat +from pymongo.helpers import _check_command_response, _check_write_command_response +from pymongo.server_description import SERVER_TYPE, ServerDescription from pymongo.settings import TopologySettings +from pymongo.topology import Topology, _ErrorContext +from pymongo.topology_description import TOPOLOGY_TYPE from pymongo.uri_parser import parse_uri -from test import unittest -from test.utils import MockPool - # Location of JSON test specifications. -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'discovery_and_monitoring') - - -class MockMonitor(object): - def __init__(self, server_description, topology, pool, topology_settings): - self._server_description = server_description - self._topology = topology - - def open(self): - pass +SDAM_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "discovery_and_monitoring") - def close(self): - pass - def join(self): - pass - - def request_check(self): - pass - - -def create_mock_topology(uri, monitor_class=MockMonitor): - # Some tests in the spec include URIs like mongodb://A/?connect=direct, - # but PyMongo considers any single-seed URI with no setName to be "direct". - parsed_uri = parse_uri(uri.replace('connect=direct', '')) +def create_mock_topology(uri, monitor_class=DummyMonitor): + parsed_uri = parse_uri(uri) replica_set_name = None - if 'replicaset' in parsed_uri['options']: - replica_set_name = parsed_uri['options']['replicaset'] + direct_connection = None + load_balanced = None + if "replicaset" in parsed_uri["options"]: + replica_set_name = parsed_uri["options"]["replicaset"] + if "directConnection" in parsed_uri["options"]: + direct_connection = parsed_uri["options"]["directConnection"] + if "loadBalanced" in parsed_uri["options"]: + load_balanced = parsed_uri["options"]["loadBalanced"] topology_settings = TopologySettings( - parsed_uri['nodelist'], + parsed_uri["nodelist"], replica_set_name=replica_set_name, - pool_class=MockPool, - monitor_class=monitor_class) + monitor_class=monitor_class, + direct_connection=direct_connection, + load_balanced=load_balanced, + ) c = Topology(topology_settings) c.open() return c -def got_ismaster(topology, server_address, ismaster_response): - server_description = ServerDescription( - server_address, IsMaster(ismaster_response), 0) - +def got_hello(topology, server_address, hello_response): + server_description = ServerDescription(server_address, Hello(hello_response), 0) topology.on_change(server_description) +def got_app_error(topology, app_error): + server_address = common.partition_node(app_error["address"]) + server = topology.get_server_by_address(server_address) + error_type = app_error["type"] + generation = app_error.get("generation", server.pool.gen.get_overall()) + when = app_error["when"] + max_wire_version = app_error["maxWireVersion"] + # XXX: We could get better test coverage by mocking the errors on the + # Pool/Connection. + try: + if error_type == "command": + _check_command_response(app_error["response"], max_wire_version) + _check_write_command_response(app_error["response"]) + elif error_type == "network": + raise AutoReconnect("mock non-timeout network error") + elif error_type == "timeout": + raise NetworkTimeout("mock network timeout error") + else: + raise AssertionError(f"unknown error type: {error_type}") + raise AssertionError + except (AutoReconnect, NotPrimaryError, OperationFailure) as e: + if when == "beforeHandshakeCompletes": + completed_handshake = False + elif when == "afterHandshakeCompletes": + completed_handshake = True + else: + raise AssertionError(f"Unknown when field {when}") + + topology.handle_error( + server_address, + _ErrorContext(e, max_wire_version, generation, completed_handshake, None), + ) + + def get_type(topology, hostname): description = topology.get_server_by_address((hostname, 27017)).description return description.server_type @@ -100,14 +140,12 @@ def server_type_name(server_type): def check_outcome(self, topology, outcome): - expected_servers = outcome['servers'] + expected_servers = outcome["servers"] # Check weak equality before proceeding. - self.assertEqual( - len(topology.description.server_descriptions()), - len(expected_servers)) + self.assertEqual(len(topology.description.server_descriptions()), len(expected_servers)) - if outcome.get('compatible') is False: + if outcome.get("compatible") is False: with self.assertRaises(ConfigurationError): topology.description.check_compatible() else: @@ -121,67 +159,78 @@ def check_outcome(self, topology, outcome): self.assertTrue(topology.has_server(node)) actual_server = topology.get_server_by_address(node) actual_server_description = actual_server.description - - if expected_server['type'] == 'PossiblePrimary': - # Special case, some tests in the spec include the PossiblePrimary - # type, but only single-threaded drivers need that type. We call - # possible primaries Unknown. - expected_server_type = SERVER_TYPE.Unknown - else: - expected_server_type = getattr( - SERVER_TYPE, expected_server['type']) + expected_server_type = server_name_to_type(expected_server["type"]) self.assertEqual( server_type_name(expected_server_type), - server_type_name(actual_server_description.server_type)) + server_type_name(actual_server_description.server_type), + ) - self.assertEqual( - expected_server.get('setName'), - actual_server_description.replica_set_name) + self.assertEqual(expected_server.get("setName"), actual_server_description.replica_set_name) - self.assertEqual( - expected_server.get('setVersion'), - actual_server_description.set_version) + self.assertEqual(expected_server.get("setVersion"), actual_server_description.set_version) + + self.assertEqual(expected_server.get("electionId"), actual_server_description.election_id) self.assertEqual( - expected_server.get('electionId'), - actual_server_description.election_id) + expected_server.get("topologyVersion"), actual_server_description.topology_version + ) - self.assertEqual(outcome['setName'], topology.description.replica_set_name) - self.assertEqual(outcome['logicalSessionTimeoutMinutes'], - topology.description.logical_session_timeout_minutes) - expected_topology_type = getattr(TOPOLOGY_TYPE, outcome['topologyType']) - self.assertEqual(topology_type_name(expected_topology_type), - topology_type_name(topology.description.topology_type)) + expected_pool = expected_server.get("pool") + if expected_pool: + self.assertEqual(expected_pool.get("generation"), actual_server.pool.gen.get_overall()) + + self.assertEqual(outcome["setName"], topology.description.replica_set_name) + self.assertEqual( + outcome.get("logicalSessionTimeoutMinutes"), + topology.description.logical_session_timeout_minutes, + ) + + expected_topology_type = getattr(TOPOLOGY_TYPE, outcome["topologyType"]) + self.assertEqual( + topology_type_name(expected_topology_type), + topology_type_name(topology.description.topology_type), + ) + + self.assertEqual(outcome.get("maxSetVersion"), topology.description.max_set_version) + self.assertEqual(outcome.get("maxElectionId"), topology.description.max_election_id) def create_test(scenario_def): def run_scenario(self): - c = create_mock_topology(scenario_def['uri']) + c = create_mock_topology(scenario_def["uri"]) - for phase in scenario_def['phases']: - for response in phase['responses']: - got_ismaster(c, - common.partition_node(response[0]), - response[1]) + for i, phase in enumerate(scenario_def["phases"]): + # Including the phase description makes failures easier to debug. + description = phase.get("description", str(i)) + with assertion_context(f"phase: {description}"): + for response in phase.get("responses", []): + got_hello(c, common.partition_node(response[0]), response[1]) - check_outcome(self, c, phase['outcome']) + for app_error in phase.get("applicationErrors", []): + got_app_error(c, app_error) + + check_outcome(self, c, phase["outcome"]) return run_scenario def create_tests(): - for dirpath, _, filenames in os.walk(_TEST_PATH): + for dirpath, _, filenames in os.walk(SDAM_PATH): dirname = os.path.split(dirpath)[-1] + # SDAM unified tests are handled separately. + if dirname == "unified": + continue for filename in filenames: + if os.path.splitext(filename)[1] != ".json": + continue with open(os.path.join(dirpath, filename)) as scenario_stream: scenario_def = json_util.loads(scenario_stream.read()) # Construct test from scenario. new_test = create_test(scenario_def) - test_name = 'test_%s_%s' % ( - dirname, os.path.splitext(filename)[0]) + test_name = f"test_{dirname}_{os.path.splitext(filename)[0]}" new_test.__name__ = test_name setattr(TestAllScenarios, new_test.__name__, new_test) @@ -192,17 +241,16 @@ def create_tests(): class TestClusterTimeComparison(unittest.TestCase): def test_cluster_time_comparison(self): - t = create_mock_topology('mongodb://host') + t = create_mock_topology("mongodb://host") def send_cluster_time(time, inc, should_update): old = t.max_cluster_time() - new = {'clusterTime': Timestamp(time, inc)} - got_ismaster(t, - ('host', 27017), - {'ok': 1, - 'minWireVersion': 0, - 'maxWireVersion': 6, - '$clusterTime': new}) + new = {"clusterTime": Timestamp(time, inc)} + got_hello( + t, + ("host", 27017), + {"ok": 1, "minWireVersion": 0, "maxWireVersion": 6, "$clusterTime": new}, + ) actual = t.max_cluster_time() if should_update: @@ -217,5 +265,140 @@ def send_cluster_time(time, inc, should_update): send_cluster_time(2, 3, True) +class TestIgnoreStaleErrors(IntegrationTest): + def test_ignore_stale_connection_errors(self): + N_THREADS = 5 + barrier = threading.Barrier(N_THREADS, timeout=30) + client = rs_or_single_client(minPoolSize=N_THREADS) + self.addCleanup(client.close) + + # Wait for initial discovery. + client.admin.command("ping") + pool = get_pool(client) + starting_generation = pool.gen.get_overall() + wait_until(lambda: len(pool.conns) == N_THREADS, "created conns") + + def mock_command(*args, **kwargs): + # Synchronize all threads to ensure they use the same generation. + barrier.wait() + raise AutoReconnect("mock Connection.command error") + + for sock in pool.conns: + sock.command = mock_command + + def insert_command(i): + try: + client.test.command("insert", "test", documents=[{"i": i}]) + except AutoReconnect: + pass + + threads = [] + for i in range(N_THREADS): + threads.append(threading.Thread(target=insert_command, args=(i,))) + for t in threads: + t.start() + for t in threads: + t.join() + + # Expect a single pool reset for the network error + self.assertEqual(starting_generation + 1, pool.gen.get_overall()) + + # Server should be selectable. + client.admin.command("ping") + + +class CMAPHeartbeatListener(HeartbeatEventListener, CMAPListener): + pass + + +class TestPoolManagement(IntegrationTest): + @client_context.require_failCommand_appName + def test_pool_unpause(self): + # This test implements the prose test "Connection Pool Management" + listener = CMAPHeartbeatListener() + client = single_client( + appName="SDAMPoolManagementTest", heartbeatFrequencyMS=500, event_listeners=[listener] + ) + self.addCleanup(client.close) + # Assert that ConnectionPoolReadyEvent occurs after the first + # ServerHeartbeatSucceededEvent. + listener.wait_for_event(monitoring.PoolReadyEvent, 1) + pool_ready = listener.events_by_type(monitoring.PoolReadyEvent)[0] + hb_succeeded = listener.events_by_type(monitoring.ServerHeartbeatSucceededEvent)[0] + self.assertGreater(listener.events.index(pool_ready), listener.events.index(hb_succeeded)) + + listener.reset() + fail_hello = { + "mode": {"times": 2}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "errorCode": 1234, + "appName": "SDAMPoolManagementTest", + }, + } + with self.fail_point(fail_hello): + listener.wait_for_event(monitoring.ServerHeartbeatFailedEvent, 1) + listener.wait_for_event(monitoring.PoolClearedEvent, 1) + listener.wait_for_event(monitoring.ServerHeartbeatSucceededEvent, 1) + listener.wait_for_event(monitoring.PoolReadyEvent, 1) + + +class TestServerMonitoringMode(IntegrationTest): + @client_context.require_no_serverless + @client_context.require_no_load_balancer + def setUp(self): + super().setUp() + + def test_rtt_connection_is_enabled_stream(self): + client = rs_or_single_client(serverMonitoringMode="stream") + self.addCleanup(client.close) + client.admin.command("ping") + + def predicate(): + for _, server in client._topology._servers.items(): + monitor = server._monitor + if not monitor._stream: + return False + if client_context.version >= (4, 4): + if monitor._rtt_monitor._executor._thread is None: + return False + else: + if monitor._rtt_monitor._executor._thread is not None: + return False + return True + + wait_until(predicate, "find all RTT monitors") + + def test_rtt_connection_is_disabled_poll(self): + client = rs_or_single_client(serverMonitoringMode="poll") + self.addCleanup(client.close) + self.assert_rtt_connection_is_disabled(client) + + def test_rtt_connection_is_disabled_auto(self): + envs = [ + {"AWS_EXECUTION_ENV": "AWS_Lambda_python3.9"}, + {"FUNCTIONS_WORKER_RUNTIME": "python"}, + {"K_SERVICE": "gcpservicename"}, + {"FUNCTION_NAME": "gcpfunctionname"}, + {"VERCEL": "1"}, + ] + for env in envs: + with patch.dict("os.environ", env): + client = rs_or_single_client(serverMonitoringMode="auto") + self.addCleanup(client.close) + self.assert_rtt_connection_is_disabled(client) + + def assert_rtt_connection_is_disabled(self, client): + client.admin.command("ping") + for _, server in client._topology._servers.items(): + monitor = server._monitor + self.assertFalse(monitor._stream) + self.assertIsNone(monitor._rtt_monitor._executor._thread) + + +# Generate unified tests. +globals().update(generate_test_classes(os.path.join(SDAM_PATH, "unified"), module=__name__)) + + if __name__ == "__main__": unittest.main() diff --git a/test/test_dns.py b/test/test_dns.py index 58c9005d3e..0fe57a4fe7 100644 --- a/test/test_dns.py +++ b/test/test_dns.py @@ -13,6 +13,7 @@ # limitations under the License. """Run the SRV support tests.""" +from __future__ import annotations import glob import json @@ -21,70 +22,118 @@ sys.path[0:0] = [""] +from test import IntegrationTest, client_context, unittest +from test.utils import wait_until + from pymongo.common import validate_read_preference_tags -from pymongo.srv_resolver import _HAVE_DNSPYTHON from pymongo.errors import ConfigurationError from pymongo.mongo_client import MongoClient +from pymongo.srv_resolver import _HAVE_DNSPYTHON from pymongo.uri_parser import parse_uri, split_hosts -from test import client_context, unittest -from test.utils import wait_until -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'srv_seedlist') +class TestDNSRepl(unittest.TestCase): + TEST_PATH = os.path.join( + os.path.dirname(os.path.realpath(__file__)), "srv_seedlist", "replica-set" + ) + load_balanced = False -class TestDNS(unittest.TestCase): - pass + @client_context.require_replica_set + def setUp(self): + pass -def create_test(test_case): +class TestDNSLoadBalanced(unittest.TestCase): + TEST_PATH = os.path.join( + os.path.dirname(os.path.realpath(__file__)), "srv_seedlist", "load-balanced" + ) + load_balanced = True - @client_context.require_replica_set - @client_context.require_ssl + @client_context.require_load_balancer + def setUp(self): + pass + + +class TestDNSSharded(unittest.TestCase): + TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "srv_seedlist", "sharded") + load_balanced = False + + @client_context.require_mongos + def setUp(self): + pass + + +def create_test(test_case): def run_test(self): if not _HAVE_DNSPYTHON: raise unittest.SkipTest("DNS tests require the dnspython module") - uri = test_case['uri'] - seeds = test_case['seeds'] - hosts = test_case['hosts'] - options = test_case.get('options') - if seeds: - seeds = split_hosts(','.join(seeds)) - if hosts: - hosts = frozenset(split_hosts(','.join(hosts))) - if options: - for key, value in options.items(): - # Convert numbers / booleans to strings for comparison - if isinstance(value, bool): - options[key] = 'true' if value else 'false' - elif isinstance(value, (int, float)): - options[key] = str(value) + uri = test_case["uri"] + seeds = test_case.get("seeds") + num_seeds = test_case.get("numSeeds", len(seeds or [])) + hosts = test_case.get("hosts") + num_hosts = test_case.get("numHosts", len(hosts or [])) + + options = test_case.get("options", {}) + if "ssl" in options: + options["tls"] = options.pop("ssl") + parsed_options = test_case.get("parsed_options") + # See DRIVERS-1324, unless tls is explicitly set to False we need TLS. + needs_tls = not (options and (options.get("ssl") is False or options.get("tls") is False)) + if needs_tls and not client_context.tls: + self.skipTest("this test requires a TLS cluster") + if not needs_tls and client_context.tls: + self.skipTest("this test requires a non-TLS cluster") if seeds: - result = parse_uri(uri, validate=False) - self.assertEqual(sorted(result['nodelist']), sorted(seeds)) + seeds = split_hosts(",".join(seeds)) + if hosts: + hosts = frozenset(split_hosts(",".join(hosts))) + + if seeds or num_seeds: + result = parse_uri(uri, validate=True) + if seeds is not None: + self.assertEqual(sorted(result["nodelist"]), sorted(seeds)) + if num_seeds is not None: + self.assertEqual(len(result["nodelist"]), num_seeds) if options: - opts = result['options'] - if 'readpreferencetags' in opts: + opts = result["options"] + if "readpreferencetags" in opts: rpts = validate_read_preference_tags( - 'readPreferenceTags', opts.pop('readpreferencetags')) - opts['readPreferenceTags'] = rpts - self.assertEqual(result['options'], options) + "readPreferenceTags", opts.pop("readpreferencetags") + ) + opts["readPreferenceTags"] = rpts + self.assertEqual(result["options"], options) + if parsed_options: + for opt, expected in parsed_options.items(): + if opt == "user": + self.assertEqual(result["username"], expected) + elif opt == "password": + self.assertEqual(result["password"], expected) + elif opt == "auth_database" or opt == "db": + self.assertEqual(result["database"], expected) hostname = next(iter(client_context.client.nodes))[0] # The replica set members must be configured as 'localhost'. - if hostname == 'localhost': + if hostname == "localhost": copts = client_context.default_client_options.copy() - if client_context.ssl is True: - # Our test certs don't support the SRV hosts used in these tests. - copts['ssl_match_hostname'] = False + # Remove tls since SRV parsing should add it automatically. + copts.pop("tls", None) + if client_context.tls: + # Our test certs don't support the SRV hosts used in these + # tests. + copts["tlsAllowInvalidHostnames"] = True client = MongoClient(uri, **copts) - # Force server selection - client.admin.command('ismaster') - wait_until( - lambda: hosts == client.nodes, - 'match test hosts to client nodes') + if num_seeds is not None: + self.assertEqual(len(client._topology_settings.seeds), num_seeds) + if hosts is not None: + wait_until(lambda: hosts == client.nodes, "match test hosts to client nodes") + if num_hosts is not None: + wait_until( + lambda: num_hosts == len(client.nodes), "wait to connect to num_hosts" + ) + # XXX: we should block until SRV poller runs at least once + # and re-run these assertions. else: try: parse_uri(uri) @@ -96,29 +145,55 @@ def run_test(self): return run_test -def create_tests(): - for filename in glob.glob(os.path.join(_TEST_PATH, '*.json')): +def create_tests(cls): + for filename in glob.glob(os.path.join(cls.TEST_PATH, "*.json")): test_suffix, _ = os.path.splitext(os.path.basename(filename)) with open(filename) as dns_test_file: test_method = create_test(json.load(dns_test_file)) - setattr(TestDNS, 'test_' + test_suffix, test_method) + setattr(cls, "test_" + test_suffix, test_method) -create_tests() +create_tests(TestDNSRepl) +create_tests(TestDNSLoadBalanced) +create_tests(TestDNSSharded) -class TestParsingErrors(unittest.TestCase): +class TestParsingErrors(unittest.TestCase): @unittest.skipUnless(_HAVE_DNSPYTHON, "DNS tests require the dnspython module") def test_invalid_host(self): self.assertRaisesRegex( ConfigurationError, - "Invalid URI host: mongodb", - MongoClient, "mongodb+srv://mongodb") + "Invalid URI host: mongodb is not", + MongoClient, + "mongodb+srv://mongodb", + ) self.assertRaisesRegex( ConfigurationError, - "Invalid URI host: mongodb.com", - MongoClient, "mongodb+srv://mongodb.com") + "Invalid URI host: mongodb.com is not", + MongoClient, + "mongodb+srv://mongodb.com", + ) + self.assertRaisesRegex( + ConfigurationError, + "Invalid URI host: an IP address is not", + MongoClient, + "mongodb+srv://127.0.0.1", + ) + self.assertRaisesRegex( + ConfigurationError, + "Invalid URI host: an IP address is not", + MongoClient, + "mongodb+srv://[::1]", + ) + + +class TestCaseInsensitive(IntegrationTest): + @unittest.skipUnless(_HAVE_DNSPYTHON, "DNS tests require the dnspython module") + def test_connect_case_insensitive(self): + client = MongoClient("mongodb+srv://TEST1.TEST.BUILD.10GEN.cc/") + self.addCleanup(client.close) + self.assertGreater(len(client.topology_description.server_descriptions()), 1) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/test_encryption.py b/test/test_encryption.py index 91018d74bf..2ffb6d4935 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -13,99 +13,174 @@ # limitations under the License. """Test client side encryption spec.""" +from __future__ import annotations import base64 import copy import os -import traceback +import re import socket +import socketserver +import ssl import sys +import textwrap +import traceback import uuid +from threading import Thread +from typing import Any, Dict, Mapping + +from pymongo.collection import Collection sys.path[0:0] = [""] -from bson import encode, json_util -from bson.binary import (Binary, - JAVA_LEGACY, - STANDARD, - UUID_SUBTYPE) +from test import ( + AWS_CREDS, + AZURE_CREDS, + CA_PEM, + CLIENT_PEM, + GCP_CREDS, + KMIP_CREDS, + LOCAL_MASTER_KEY, + IntegrationTest, + PyMongoTestCase, + client_context, + unittest, +) +from test.test_bulk import BulkTestBase +from test.unified_format import generate_test_classes +from test.utils import ( + AllowListEventListener, + OvertCommandListener, + SpecTestCreator, + TopologyEventListener, + camel_to_snake_args, + is_greenthread_patched, + rs_or_single_client, + wait_until, +) +from test.utils_spec_runner import SpecRunner + +from bson import DatetimeMS, Decimal128, encode, json_util +from bson.binary import UUID_SUBTYPE, Binary, UuidRepresentation from bson.codec_options import CodecOptions from bson.errors import BSONError from bson.json_util import JSONOptions from bson.son import SON - +from pymongo import ReadPreference, encryption from pymongo.cursor import CursorType -from pymongo.encryption import (Algorithm, - ClientEncryption) -from pymongo.encryption_options import AutoEncryptionOpts, _HAVE_PYMONGOCRYPT -from pymongo.errors import (BulkWriteError, - ConfigurationError, - EncryptionError, - InvalidOperation, - OperationFailure, - WriteError) +from pymongo.encryption import Algorithm, ClientEncryption, QueryType +from pymongo.encryption_options import _HAVE_PYMONGOCRYPT, AutoEncryptionOpts, RangeOpts +from pymongo.errors import ( + AutoReconnect, + BulkWriteError, + ConfigurationError, + DuplicateKeyError, + EncryptedCollectionError, + EncryptionError, + InvalidOperation, + OperationFailure, + ServerSelectionTimeoutError, + WriteError, +) from pymongo.mongo_client import MongoClient -from pymongo.operations import InsertOne +from pymongo.operations import InsertOne, ReplaceOne, UpdateOne from pymongo.write_concern import WriteConcern -from test import unittest, IntegrationTest, PyMongoTestCase, client_context -from test.utils import (TestCreator, - camel_to_snake_args, - OvertCommandListener, - rs_or_single_client, - wait_until) -from test.utils_spec_runner import SpecRunner +KMS_PROVIDERS = {"local": {"key": b"\x00" * 96}} def get_client_opts(client): return client._MongoClient__options -KMS_PROVIDERS = {'local': {'key': b'\x00'*96}} - - class TestAutoEncryptionOpts(PyMongoTestCase): - @unittest.skipIf(_HAVE_PYMONGOCRYPT, 'pymongocrypt is installed') + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + @unittest.skipUnless(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is not installed") + def test_crypt_shared(self): + # Test that we can pick up crypt_shared lib automatically + client = MongoClient( + auto_encryption_opts=AutoEncryptionOpts( + KMS_PROVIDERS, "keyvault.datakeys", crypt_shared_lib_required=True + ), + connect=False, + ) + self.addCleanup(client.close) + + @unittest.skipIf(_HAVE_PYMONGOCRYPT, "pymongocrypt is installed") def test_init_requires_pymongocrypt(self): with self.assertRaises(ConfigurationError): - AutoEncryptionOpts({}, 'admin.datakeys') + AutoEncryptionOpts({}, "keyvault.datakeys") - @unittest.skipUnless(_HAVE_PYMONGOCRYPT, 'pymongocrypt is not installed') + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") def test_init(self): - opts = AutoEncryptionOpts({}, 'admin.datakeys') + opts = AutoEncryptionOpts({}, "keyvault.datakeys") self.assertEqual(opts._kms_providers, {}) - self.assertEqual(opts._key_vault_namespace, 'admin.datakeys') + self.assertEqual(opts._key_vault_namespace, "keyvault.datakeys") self.assertEqual(opts._key_vault_client, None) self.assertEqual(opts._schema_map, None) self.assertEqual(opts._bypass_auto_encryption, False) - self.assertEqual(opts._mongocryptd_uri, 'mongodb://localhost:27020') + self.assertEqual(opts._mongocryptd_uri, "mongodb://localhost:27020") self.assertEqual(opts._mongocryptd_bypass_spawn, False) - self.assertEqual(opts._mongocryptd_spawn_path, 'mongocryptd') - self.assertEqual( - opts._mongocryptd_spawn_args, ['--idleShutdownTimeoutSecs=60']) + self.assertEqual(opts._mongocryptd_spawn_path, "mongocryptd") + self.assertEqual(opts._mongocryptd_spawn_args, ["--idleShutdownTimeoutSecs=60"]) + self.assertEqual(opts._kms_ssl_contexts, {}) - @unittest.skipUnless(_HAVE_PYMONGOCRYPT, 'pymongocrypt is not installed') + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") def test_init_spawn_args(self): # User can override idleShutdownTimeoutSecs opts = AutoEncryptionOpts( - {}, 'admin.datakeys', - mongocryptd_spawn_args=['--idleShutdownTimeoutSecs=88']) - self.assertEqual( - opts._mongocryptd_spawn_args, ['--idleShutdownTimeoutSecs=88']) + {}, "keyvault.datakeys", mongocryptd_spawn_args=["--idleShutdownTimeoutSecs=88"] + ) + self.assertEqual(opts._mongocryptd_spawn_args, ["--idleShutdownTimeoutSecs=88"]) # idleShutdownTimeoutSecs is added by default - opts = AutoEncryptionOpts( - {}, 'admin.datakeys', mongocryptd_spawn_args=[]) - self.assertEqual( - opts._mongocryptd_spawn_args, ['--idleShutdownTimeoutSecs=60']) + opts = AutoEncryptionOpts({}, "keyvault.datakeys", mongocryptd_spawn_args=[]) + self.assertEqual(opts._mongocryptd_spawn_args, ["--idleShutdownTimeoutSecs=60"]) # Also added when other options are given opts = AutoEncryptionOpts( - {}, 'admin.datakeys', - mongocryptd_spawn_args=['--quiet', '--port=27020']) + {}, "keyvault.datakeys", mongocryptd_spawn_args=["--quiet", "--port=27020"] + ) self.assertEqual( opts._mongocryptd_spawn_args, - ['--quiet', '--port=27020', '--idleShutdownTimeoutSecs=60']) + ["--quiet", "--port=27020", "--idleShutdownTimeoutSecs=60"], + ) + + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + def test_init_kms_tls_options(self): + # Error cases: + with self.assertRaisesRegex(TypeError, r'kms_tls_options\["kmip"\] must be a dict'): + AutoEncryptionOpts({}, "k.d", kms_tls_options={"kmip": 1}) + tls_opts: Any + for tls_opts in [ + {"kmip": {"tls": True, "tlsInsecure": True}}, + {"kmip": {"tls": True, "tlsAllowInvalidCertificates": True}}, + {"kmip": {"tls": True, "tlsAllowInvalidHostnames": True}}, + ]: + with self.assertRaisesRegex(ConfigurationError, "Insecure TLS options prohibited"): + opts = AutoEncryptionOpts({}, "k.d", kms_tls_options=tls_opts) + with self.assertRaises(FileNotFoundError): + AutoEncryptionOpts({}, "k.d", kms_tls_options={"kmip": {"tlsCAFile": "does-not-exist"}}) + # Success cases: + tls_opts: Any + for tls_opts in [None, {}]: + opts = AutoEncryptionOpts({}, "k.d", kms_tls_options=tls_opts) + self.assertEqual(opts._kms_ssl_contexts, {}) + opts = AutoEncryptionOpts({}, "k.d", kms_tls_options={"kmip": {"tls": True}, "aws": {}}) + ctx = opts._kms_ssl_contexts["kmip"] + self.assertEqual(ctx.check_hostname, True) + self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) + ctx = opts._kms_ssl_contexts["aws"] + self.assertEqual(ctx.check_hostname, True) + self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) + opts = AutoEncryptionOpts( + {}, + "k.d", + kms_tls_options={"kmip": {"tlsCAFile": CA_PEM, "tlsCertificateKeyFile": CLIENT_PEM}}, + ) + ctx = opts._kms_ssl_contexts["kmip"] + self.assertEqual(ctx.check_hostname, True) + self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) class TestClientOptions(PyMongoTestCase): @@ -118,9 +193,9 @@ def test_default(self): self.addCleanup(client.close) self.assertEqual(get_client_opts(client).auto_encryption_opts, None) - @unittest.skipUnless(_HAVE_PYMONGOCRYPT, 'pymongocrypt is not installed') + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") def test_kwargs(self): - opts = AutoEncryptionOpts(KMS_PROVIDERS, 'admin.datakeys') + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") client = MongoClient(auto_encryption_opts=opts, connect=False) self.addCleanup(client.close) self.assertEqual(get_client_opts(client).auto_encryption_opts, opts) @@ -130,10 +205,10 @@ class EncryptionIntegrationTest(IntegrationTest): """Base class for encryption integration tests.""" @classmethod - @unittest.skipUnless(_HAVE_PYMONGOCRYPT, 'pymongocrypt is not installed') + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") @client_context.require_version_min(4, 2, -1) def setUpClass(cls): - super(EncryptionIntegrationTest, cls).setUpClass() + super().setUpClass() def assertEncrypted(self, val): self.assertIsInstance(val, Binary) @@ -145,16 +220,14 @@ def assertBinaryUUID(self, val): # Location of JSON test files. -BASE = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'client-side-encryption') -SPEC_PATH = os.path.join(BASE, 'spec') +BASE = os.path.join(os.path.dirname(os.path.realpath(__file__)), "client-side-encryption") +SPEC_PATH = os.path.join(BASE, "spec") -OPTS = CodecOptions(uuid_representation=STANDARD) +OPTS = CodecOptions() # Use SON to preserve the order of fields while parsing json. Use tz_aware # =False to match how CodecOptions decodes dates. -JSON_OPTS = JSONOptions(document_class=SON, uuid_representation=STANDARD, - tz_aware=False) +JSON_OPTS = JSONOptions(document_class=SON, tz_aware=False) def read(*paths): @@ -171,38 +244,39 @@ def bson_data(*paths): class TestClientSimple(EncryptionIntegrationTest): - def _test_auto_encrypt(self, opts): client = rs_or_single_client(auto_encryption_opts=opts) self.addCleanup(client.close) # Create the encrypted field's data key. key_vault = create_key_vault( - self.client.admin.datakeys, - json_data('custom', 'key-document-local.json')) + self.client.keyvault.datakeys, json_data("custom", "key-document-local.json") + ) self.addCleanup(key_vault.drop) # Collection.insert_one/insert_many auto encrypts. - docs = [{'_id': 0, 'ssn': '000'}, - {'_id': 1, 'ssn': '111'}, - {'_id': 2, 'ssn': '222'}, - {'_id': 3, 'ssn': '333'}, - {'_id': 4, 'ssn': '444'}, - {'_id': 5, 'ssn': '555'}] + docs = [ + {"_id": 0, "ssn": "000"}, + {"_id": 1, "ssn": "111"}, + {"_id": 2, "ssn": "222"}, + {"_id": 3, "ssn": "333"}, + {"_id": 4, "ssn": "444"}, + {"_id": 5, "ssn": "555"}, + ] encrypted_coll = client.pymongo_test.test encrypted_coll.insert_one(docs[0]) encrypted_coll.insert_many(docs[1:3]) unack = encrypted_coll.with_options(write_concern=WriteConcern(w=0)) unack.insert_one(docs[3]) unack.insert_many(docs[4:], ordered=False) - wait_until(lambda: self.db.test.count_documents({}) == len(docs), - 'insert documents with w=0') + wait_until( + lambda: self.db.test.count_documents({}) == len(docs), "insert documents with w=0" + ) # Database.command auto decrypts. - res = client.pymongo_test.command( - 'find', 'test', filter={'ssn': '000'}) - decrypted_docs = res['cursor']['firstBatch'] - self.assertEqual(decrypted_docs, [{'_id': 0, 'ssn': '000'}]) + res = client.pymongo_test.command("find", "test", filter={"ssn": "000"}) + decrypted_docs = res["cursor"]["firstBatch"] + self.assertEqual(decrypted_docs, [{"_id": 0, "ssn": "000"}]) # Collection.find auto decrypts. decrypted_docs = list(encrypted_coll.find()) @@ -221,149 +295,200 @@ def _test_auto_encrypt(self, opts): self.assertEqual(decrypted_docs, docs) # Collection.distinct auto decrypts. - decrypted_ssns = encrypted_coll.distinct('ssn') - self.assertEqual(set(decrypted_ssns), set(d['ssn'] for d in docs)) + decrypted_ssns = encrypted_coll.distinct("ssn") + self.assertEqual(set(decrypted_ssns), {d["ssn"] for d in docs}) # Make sure the field is actually encrypted. for encrypted_doc in self.db.test.find(): - self.assertIsInstance(encrypted_doc['_id'], int) - self.assertEncrypted(encrypted_doc['ssn']) + self.assertIsInstance(encrypted_doc["_id"], int) + self.assertEncrypted(encrypted_doc["ssn"]) # Attempt to encrypt an unencodable object. with self.assertRaises(BSONError): - encrypted_coll.insert_one({'unencodeable': object()}) + encrypted_coll.insert_one({"unencodeable": object()}) def test_auto_encrypt(self): # Configure the encrypted field via jsonSchema. - json_schema = json_data('custom', 'schema.json') + json_schema = json_data("custom", "schema.json") create_with_schema(self.db.test, json_schema) self.addCleanup(self.db.test.drop) - opts = AutoEncryptionOpts(KMS_PROVIDERS, 'admin.datakeys') + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") self._test_auto_encrypt(opts) def test_auto_encrypt_local_schema_map(self): # Configure the encrypted field via the local schema_map option. - schemas = {'pymongo_test.test': json_data('custom', 'schema.json')} - opts = AutoEncryptionOpts( - KMS_PROVIDERS, 'admin.datakeys', schema_map=schemas) + schemas = {"pymongo_test.test": json_data("custom", "schema.json")} + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys", schema_map=schemas) self._test_auto_encrypt(opts) def test_use_after_close(self): - opts = AutoEncryptionOpts(KMS_PROVIDERS, 'admin.datakeys') + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") client = rs_or_single_client(auto_encryption_opts=opts) self.addCleanup(client.close) - client.admin.command('isMaster') + client.admin.command("ping") client.close() - with self.assertRaisesRegex(InvalidOperation, - 'Cannot use MongoClient after close'): - client.admin.command('isMaster') + with self.assertRaisesRegex(InvalidOperation, "Cannot use MongoClient after close"): + client.admin.command("ping") + + @unittest.skipIf( + not hasattr(os, "register_at_fork"), + "register_at_fork not available in this version of Python", + ) + @unittest.skipIf( + is_greenthread_patched(), + "gevent and eventlet do not support POSIX-style forking.", + ) + def test_fork(self): + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") + client = rs_or_single_client(auto_encryption_opts=opts) + self.addCleanup(client.close) + def target(): + client.admin.command("ping") -class TestClientMaxWireVersion(IntegrationTest): + with self.fork(target): + target() + + +class TestEncryptedBulkWrite(BulkTestBase, EncryptionIntegrationTest): + def test_upsert_uuid_standard_encrypt(self): + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") + client = rs_or_single_client(auto_encryption_opts=opts) + self.addCleanup(client.close) + options = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + encrypted_coll = client.pymongo_test.test + coll = encrypted_coll.with_options(codec_options=options) + uuids = [uuid.uuid4() for _ in range(3)] + result = coll.bulk_write( + [ + UpdateOne({"_id": uuids[0]}, {"$set": {"a": 0}}, upsert=True), + ReplaceOne({"a": 1}, {"_id": uuids[1]}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({"_id": uuids[2]}, {"_id": uuids[2]}, upsert=True), + ] + ) + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 3, + "nInserted": 0, + "nRemoved": 0, + "upserted": [ + {"index": 0, "_id": uuids[0]}, + {"index": 1, "_id": uuids[1]}, + {"index": 2, "_id": uuids[2]}, + ], + }, + result.bulk_api_result, + ) + + +class TestClientMaxWireVersion(IntegrationTest): @classmethod - @unittest.skipUnless(_HAVE_PYMONGOCRYPT, 'pymongocrypt is not installed') + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") def setUpClass(cls): - super(TestClientMaxWireVersion, cls).setUpClass() + super().setUpClass() @client_context.require_version_max(4, 0, 99) def test_raise_max_wire_version_error(self): - opts = AutoEncryptionOpts(KMS_PROVIDERS, 'admin.datakeys') + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") client = rs_or_single_client(auto_encryption_opts=opts) self.addCleanup(client.close) - msg = 'Auto-encryption requires a minimum MongoDB version of 4.2' + msg = "Auto-encryption requires a minimum MongoDB version of 4.2" with self.assertRaisesRegex(ConfigurationError, msg): client.test.test.insert_one({}) with self.assertRaisesRegex(ConfigurationError, msg): - client.admin.command('isMaster') + client.admin.command("ping") with self.assertRaisesRegex(ConfigurationError, msg): client.test.test.find_one({}) with self.assertRaisesRegex(ConfigurationError, msg): client.test.test.bulk_write([InsertOne({})]) def test_raise_unsupported_error(self): - opts = AutoEncryptionOpts(KMS_PROVIDERS, 'admin.datakeys') + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") client = rs_or_single_client(auto_encryption_opts=opts) self.addCleanup(client.close) - msg = 'find_raw_batches does not support auto encryption' + msg = "find_raw_batches does not support auto encryption" with self.assertRaisesRegex(InvalidOperation, msg): client.test.test.find_raw_batches({}) - msg = 'aggregate_raw_batches does not support auto encryption' + msg = "aggregate_raw_batches does not support auto encryption" with self.assertRaisesRegex(InvalidOperation, msg): client.test.test.aggregate_raw_batches([]) if client_context.is_mongos: - msg = 'Exhaust cursors are not supported by mongos' + msg = "Exhaust cursors are not supported by mongos" else: - msg = 'exhaust cursors do not support auto encryption' + msg = "exhaust cursors do not support auto encryption" with self.assertRaisesRegex(InvalidOperation, msg): next(client.test.test.find(cursor_type=CursorType.EXHAUST)) class TestExplicitSimple(EncryptionIntegrationTest): - def test_encrypt_decrypt(self): client_encryption = ClientEncryption( - KMS_PROVIDERS, 'admin.datakeys', client_context.client, OPTS) + KMS_PROVIDERS, "keyvault.datakeys", client_context.client, OPTS + ) self.addCleanup(client_encryption.close) # Use standard UUID representation. - key_vault = client_context.client.admin.get_collection( - 'datakeys', codec_options=OPTS) + key_vault = client_context.client.keyvault.get_collection("datakeys", codec_options=OPTS) self.addCleanup(key_vault.drop) # Create the encrypted field's data key. - key_id = client_encryption.create_data_key( - 'local', key_alt_names=['name']) + key_id = client_encryption.create_data_key("local", key_alt_names=["name"]) self.assertBinaryUUID(key_id) - self.assertTrue(key_vault.find_one({'_id': key_id})) + self.assertTrue(key_vault.find_one({"_id": key_id})) # Create an unused data key to make sure filtering works. - unused_key_id = client_encryption.create_data_key( - 'local', key_alt_names=['unused']) + unused_key_id = client_encryption.create_data_key("local", key_alt_names=["unused"]) self.assertBinaryUUID(unused_key_id) - self.assertTrue(key_vault.find_one({'_id': unused_key_id})) + self.assertTrue(key_vault.find_one({"_id": unused_key_id})) - doc = {'_id': 0, 'ssn': '000'} + doc = {"_id": 0, "ssn": "000"} encrypted_ssn = client_encryption.encrypt( - doc['ssn'], Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=key_id) + doc["ssn"], Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=key_id + ) # Ensure encryption via key_alt_name for the same key produces the # same output. encrypted_ssn2 = client_encryption.encrypt( - doc['ssn'], Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_alt_name='name') + doc["ssn"], Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_alt_name="name" + ) self.assertEqual(encrypted_ssn, encrypted_ssn2) # Test decryption. decrypted_ssn = client_encryption.decrypt(encrypted_ssn) - self.assertEqual(decrypted_ssn, doc['ssn']) + self.assertEqual(decrypted_ssn, doc["ssn"]) def test_validation(self): client_encryption = ClientEncryption( - KMS_PROVIDERS, 'admin.datakeys', client_context.client, OPTS) + KMS_PROVIDERS, "keyvault.datakeys", client_context.client, OPTS + ) self.addCleanup(client_encryption.close) - msg = 'value to decrypt must be a bson.binary.Binary with subtype 6' + msg = "value to decrypt must be a bson.binary.Binary with subtype 6" with self.assertRaisesRegex(TypeError, msg): - client_encryption.decrypt('str') + client_encryption.decrypt("str") # type: ignore[arg-type] with self.assertRaisesRegex(TypeError, msg): - client_encryption.decrypt(Binary(b'123')) + client_encryption.decrypt(Binary(b"123")) - msg = 'key_id must be a bson.binary.Binary with subtype 4' + msg = "key_id must be a bson.binary.Binary with subtype 4" algo = Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic + uid = uuid.uuid4() with self.assertRaisesRegex(TypeError, msg): - client_encryption.encrypt('str', algo, key_id=uuid.uuid4()) + client_encryption.encrypt("str", algo, key_id=uid) # type: ignore[arg-type] with self.assertRaisesRegex(TypeError, msg): - client_encryption.encrypt('str', algo, key_id=Binary(b'123')) + client_encryption.encrypt("str", algo, key_id=Binary(b"123")) def test_bson_errors(self): client_encryption = ClientEncryption( - KMS_PROVIDERS, 'admin.datakeys', client_context.client, OPTS) + KMS_PROVIDERS, "keyvault.datakeys", client_context.client, OPTS + ) self.addCleanup(client_encryption.close) # Attempt to encrypt an unencodable object. @@ -372,37 +497,41 @@ def test_bson_errors(self): client_encryption.encrypt( unencodable_value, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=Binary(uuid.uuid4().bytes, UUID_SUBTYPE)) + key_id=Binary.from_uuid(uuid.uuid4()), + ) def test_codec_options(self): - with self.assertRaisesRegex(TypeError, 'codec_options must be'): + with self.assertRaisesRegex(TypeError, "codec_options must be"): ClientEncryption( - KMS_PROVIDERS, 'admin.datakeys', client_context.client, None) + KMS_PROVIDERS, "keyvault.datakeys", client_context.client, None # type: ignore[arg-type] + ) - opts = CodecOptions(uuid_representation=JAVA_LEGACY) + opts = CodecOptions(uuid_representation=UuidRepresentation.JAVA_LEGACY) client_encryption_legacy = ClientEncryption( - KMS_PROVIDERS, 'admin.datakeys', client_context.client, opts) + KMS_PROVIDERS, "keyvault.datakeys", client_context.client, opts + ) self.addCleanup(client_encryption_legacy.close) # Create the encrypted field's data key. - key_id = client_encryption_legacy.create_data_key('local') + key_id = client_encryption_legacy.create_data_key("local") # Encrypt a UUID with JAVA_LEGACY codec options. value = uuid.uuid4() encrypted_legacy = client_encryption_legacy.encrypt( - value, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=key_id) - decrypted_value_legacy = client_encryption_legacy.decrypt( - encrypted_legacy) + value, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=key_id + ) + decrypted_value_legacy = client_encryption_legacy.decrypt(encrypted_legacy) self.assertEqual(decrypted_value_legacy, value) # Encrypt the same UUID with STANDARD codec options. + opts = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) client_encryption = ClientEncryption( - KMS_PROVIDERS, 'admin.datakeys', client_context.client, OPTS) + KMS_PROVIDERS, "keyvault.datakeys", client_context.client, opts + ) self.addCleanup(client_encryption.close) encrypted_standard = client_encryption.encrypt( - value, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=key_id) + value, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=key_id + ) decrypted_standard = client_encryption.decrypt(encrypted_standard) self.assertEqual(decrypted_standard, value) @@ -410,124 +539,151 @@ def test_codec_options(self): self.assertNotEqual(encrypted_standard, encrypted_legacy) # Test that codec_options is applied during decryption. self.assertEqual( - client_encryption_legacy.decrypt(encrypted_standard), value) - self.assertNotEqual( - client_encryption.decrypt(encrypted_legacy), value) + client_encryption_legacy.decrypt(encrypted_standard), Binary.from_uuid(value) + ) + self.assertNotEqual(client_encryption.decrypt(encrypted_legacy), value) def test_close(self): client_encryption = ClientEncryption( - KMS_PROVIDERS, 'admin.datakeys', client_context.client, OPTS) + KMS_PROVIDERS, "keyvault.datakeys", client_context.client, OPTS + ) client_encryption.close() # Close can be called multiple times. client_encryption.close() algo = Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic - msg = 'Cannot use closed ClientEncryption' + msg = "Cannot use closed ClientEncryption" with self.assertRaisesRegex(InvalidOperation, msg): - client_encryption.create_data_key('local') + client_encryption.create_data_key("local") with self.assertRaisesRegex(InvalidOperation, msg): - client_encryption.encrypt('val', algo, key_alt_name='name') + client_encryption.encrypt("val", algo, key_alt_name="name") with self.assertRaisesRegex(InvalidOperation, msg): - client_encryption.decrypt(Binary(b'', 6)) + client_encryption.decrypt(Binary(b"", 6)) def test_with_statement(self): with ClientEncryption( - KMS_PROVIDERS, 'admin.datakeys', - client_context.client, OPTS) as client_encryption: + KMS_PROVIDERS, "keyvault.datakeys", client_context.client, OPTS + ) as client_encryption: pass - with self.assertRaisesRegex( - InvalidOperation, 'Cannot use closed ClientEncryption'): - client_encryption.create_data_key('local') + with self.assertRaisesRegex(InvalidOperation, "Cannot use closed ClientEncryption"): + client_encryption.create_data_key("local") # Spec tests +AWS_TEMP_CREDS = { + "accessKeyId": os.environ.get("CSFLE_AWS_TEMP_ACCESS_KEY_ID", ""), + "secretAccessKey": os.environ.get("CSFLE_AWS_TEMP_SECRET_ACCESS_KEY", ""), + "sessionToken": os.environ.get("CSFLE_AWS_TEMP_SESSION_TOKEN", ""), +} -AWS_CREDS = { - 'accessKeyId': os.environ.get('FLE_AWS_KEY', ''), - 'secretAccessKey': os.environ.get('FLE_AWS_SECRET', '') +AWS_TEMP_NO_SESSION_CREDS = { + "accessKeyId": os.environ.get("CSFLE_AWS_TEMP_ACCESS_KEY_ID", ""), + "secretAccessKey": os.environ.get("CSFLE_AWS_TEMP_SECRET_ACCESS_KEY", ""), } +KMS_TLS_OPTS = {"kmip": {"tlsCAFile": CA_PEM, "tlsCertificateKeyFile": CLIENT_PEM}} class TestSpec(SpecRunner): - @classmethod - @unittest.skipUnless(_HAVE_PYMONGOCRYPT, 'pymongocrypt is not installed') - @client_context.require_version_min(3, 6) # SpecRunner requires sessions. + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") def setUpClass(cls): - super(TestSpec, cls).setUpClass() + super().setUpClass() def parse_auto_encrypt_opts(self, opts): """Parse clientOptions.autoEncryptOpts.""" opts = camel_to_snake_args(opts) - kms_providers = opts['kms_providers'] - if 'aws' in kms_providers: - kms_providers['aws'] = AWS_CREDS + kms_providers = opts["kms_providers"] + if "aws" in kms_providers: + kms_providers["aws"] = AWS_CREDS if not any(AWS_CREDS.values()): - self.skipTest('AWS environment credentials are not set') - if 'key_vault_namespace' not in opts: - opts['key_vault_namespace'] = 'admin.datakeys' + self.skipTest("AWS environment credentials are not set") + if "awsTemporary" in kms_providers: + kms_providers["aws"] = AWS_TEMP_CREDS + del kms_providers["awsTemporary"] + if not any(AWS_TEMP_CREDS.values()): + self.skipTest("AWS Temp environment credentials are not set") + if "awsTemporaryNoSessionToken" in kms_providers: + kms_providers["aws"] = AWS_TEMP_NO_SESSION_CREDS + del kms_providers["awsTemporaryNoSessionToken"] + if not any(AWS_TEMP_NO_SESSION_CREDS.values()): + self.skipTest("AWS Temp environment credentials are not set") + if "azure" in kms_providers: + kms_providers["azure"] = AZURE_CREDS + if not any(AZURE_CREDS.values()): + self.skipTest("Azure environment credentials are not set") + if "gcp" in kms_providers: + kms_providers["gcp"] = GCP_CREDS + if not any(AZURE_CREDS.values()): + self.skipTest("GCP environment credentials are not set") + if "kmip" in kms_providers: + kms_providers["kmip"] = KMIP_CREDS + opts["kms_tls_options"] = KMS_TLS_OPTS + if "key_vault_namespace" not in opts: + opts["key_vault_namespace"] = "keyvault.datakeys" + if "extra_options" in opts: + opts.update(camel_to_snake_args(opts.pop("extra_options"))) + opts = dict(opts) return AutoEncryptionOpts(**opts) def parse_client_options(self, opts): """Override clientOptions parsing to support autoEncryptOpts.""" - encrypt_opts = opts.pop('autoEncryptOpts') + encrypt_opts = opts.pop("autoEncryptOpts", None) if encrypt_opts: - opts['auto_encryption_opts'] = self.parse_auto_encrypt_opts( - encrypt_opts) + opts["auto_encryption_opts"] = self.parse_auto_encrypt_opts(encrypt_opts) - return super(TestSpec, self).parse_client_options(opts) + return super().parse_client_options(opts) def get_object_name(self, op): """Default object is collection.""" - return op.get('object', 'collection') + return op.get("object", "collection") def maybe_skip_scenario(self, test): - super(TestSpec, self).maybe_skip_scenario(test) - desc = test['description'].lower() - if 'type=symbol' in desc: - self.skipTest('PyMongo does not support the symbol type') - if desc == 'explain a find with deterministic encryption': - # PyPy and Python 3.6+ have ordered dict. - if sys.version_info[:2] < (3, 6) and 'PyPy' not in sys.version: - self.skipTest( - 'explain test does not work without ordered dict') + super().maybe_skip_scenario(test) + desc = test["description"].lower() + if ( + "timeoutms applied to listcollections to get collection schema" in desc + and sys.platform in ("win32", "darwin") + ): + self.skipTest("PYTHON-3706 flaky test on Windows/macOS") + if "type=symbol" in desc: + self.skipTest("PyMongo does not support the symbol type") def setup_scenario(self, scenario_def): """Override a test's setup.""" - key_vault_data = scenario_def['key_vault_data'] + key_vault_data = scenario_def["key_vault_data"] + encrypted_fields = scenario_def["encrypted_fields"] + json_schema = scenario_def["json_schema"] + data = scenario_def["data"] + coll = client_context.client.get_database("keyvault", codec_options=OPTS)["datakeys"] + coll.delete_many({}) if key_vault_data: - coll = client_context.client.get_database( - 'admin', - write_concern=WriteConcern(w='majority'), - codec_options=OPTS)['datakeys'] - coll.drop() coll.insert_many(key_vault_data) db_name = self.get_scenario_db_name(scenario_def) coll_name = self.get_scenario_coll_name(scenario_def) - db = client_context.client.get_database( - db_name, write_concern=WriteConcern(w='majority'), - codec_options=OPTS) - coll = db[coll_name] - coll.drop() - json_schema = scenario_def['json_schema'] + db = client_context.client.get_database(db_name, codec_options=OPTS) + coll = db.drop_collection(coll_name, encrypted_fields=encrypted_fields) + wc = WriteConcern(w="majority") + kwargs: Dict[str, Any] = {} if json_schema: - db.create_collection( - coll_name, - validator={'$jsonSchema': json_schema}, codec_options=OPTS) - else: - db.create_collection(coll_name) - - if scenario_def['data']: + kwargs["validator"] = {"$jsonSchema": json_schema} + kwargs["codec_options"] = OPTS + if not data: + kwargs["write_concern"] = wc + if encrypted_fields: + kwargs["encryptedFields"] = encrypted_fields + db.create_collection(coll_name, **kwargs) + coll = db[coll_name] + if data: # Load data. - coll.insert_many(scenario_def['data']) + coll.with_options(write_concern=wc).insert_many(scenario_def["data"]) def allowable_errors(self, op): """Override expected error classes.""" - errors = super(TestSpec, self).allowable_errors(op) + errors = super().allowable_errors(op) # An updateOne test expects encryption to error when no $ operator # appears but pymongo raises a client side ValueError in this case. - if op['name'] == 'updateOne': + if op["name"] == "updateOne": errors += (ValueError,) return errors @@ -540,189 +696,222 @@ def run_scenario(self): return run_scenario -test_creator = TestCreator(create_test, TestSpec, SPEC_PATH) +test_creator = SpecTestCreator(create_test, TestSpec, os.path.join(SPEC_PATH, "legacy")) test_creator.create_tests() +if _HAVE_PYMONGOCRYPT: + globals().update( + generate_test_classes( + os.path.join(SPEC_PATH, "unified"), + module=__name__, + ) + ) + # Prose Tests -LOCAL_MASTER_KEY = base64.b64decode( - b'Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ' - b'5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk') +ALL_KMS_PROVIDERS = { + "aws": AWS_CREDS, + "azure": AZURE_CREDS, + "gcp": GCP_CREDS, + "kmip": KMIP_CREDS, + "local": {"key": LOCAL_MASTER_KEY}, +} -LOCAL_KEY_ID = Binary( - base64.b64decode(b'LOCALAAAAAAAAAAAAAAAAA=='), UUID_SUBTYPE) -AWS_KEY_ID = Binary( - base64.b64decode(b'AWSAAAAAAAAAAAAAAAAAAA=='), UUID_SUBTYPE) +LOCAL_KEY_ID = Binary(base64.b64decode(b"LOCALAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) +AWS_KEY_ID = Binary(base64.b64decode(b"AWSAAAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) +AZURE_KEY_ID = Binary(base64.b64decode(b"AZUREAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) +GCP_KEY_ID = Binary(base64.b64decode(b"GCPAAAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) +KMIP_KEY_ID = Binary(base64.b64decode(b"KMIPAAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) def create_with_schema(coll, json_schema): """Create and return a Collection with a jsonSchema.""" - coll.with_options(write_concern=WriteConcern(w='majority')).drop() + coll.with_options(write_concern=WriteConcern(w="majority")).drop() return coll.database.create_collection( - coll.name, validator={'$jsonSchema': json_schema}, codec_options=OPTS) + coll.name, validator={"$jsonSchema": json_schema}, codec_options=OPTS + ) def create_key_vault(vault, *data_keys): """Create the key vault collection with optional data keys.""" - vault = vault.with_options( - write_concern=WriteConcern(w='majority'), - codec_options=OPTS) + vault = vault.with_options(write_concern=WriteConcern(w="majority"), codec_options=OPTS) vault.drop() if data_keys: vault.insert_many(data_keys) + vault.create_index( + "keyAltNames", + unique=True, + partialFilterExpression={"keyAltNames": {"$exists": True}}, + ) return vault class TestDataKeyDoubleEncryption(EncryptionIntegrationTest): + client_encrypted: MongoClient + client_encryption: ClientEncryption + listener: OvertCommandListener + vault: Any - @classmethod - @unittest.skipUnless(all(AWS_CREDS.values()), - 'AWS environment credentials are not set') - def setUpClass(cls): - super(TestDataKeyDoubleEncryption, cls).setUpClass() + KMS_PROVIDERS = ALL_KMS_PROVIDERS - @staticmethod - def kms_providers(): - return {'aws': AWS_CREDS, 'local': {'key': LOCAL_MASTER_KEY}} + MASTER_KEYS = { + "aws": { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + }, + "azure": { + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle", + }, + "gcp": { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle", + }, + "kmip": {}, + "local": None, + } - def test_data_key(self): - listener = OvertCommandListener() - client = rs_or_single_client(event_listeners=[listener]) - self.addCleanup(client.close) - client.db.coll.drop() - vault = create_key_vault(client.admin.datakeys) - self.addCleanup(vault.drop) + @classmethod + @unittest.skipUnless( + any([all(AWS_CREDS.values()), all(AZURE_CREDS.values()), all(GCP_CREDS.values())]), + "No environment credentials are set", + ) + def setUpClass(cls): + super().setUpClass() + cls.listener = OvertCommandListener() + cls.client = rs_or_single_client(event_listeners=[cls.listener]) + cls.client.db.coll.drop() + cls.vault = create_key_vault(cls.client.keyvault.datakeys) # Configure the encrypted field via the local schema_map option. schemas = { - "db.coll": { - "bsonType": "object", - "properties": { - "encrypted_placeholder": { - "encrypt": { - "keyId": "/placeholder", - "bsonType": "string", - "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" - } - } + "db.coll": { + "bsonType": "object", + "properties": { + "encrypted_placeholder": { + "encrypt": { + "keyId": "/placeholder", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + } + } + }, } - } } opts = AutoEncryptionOpts( - self.kms_providers(), 'admin.datakeys', schema_map=schemas) - client_encrypted = rs_or_single_client( - auto_encryption_opts=opts, uuidRepresentation='standard') - self.addCleanup(client_encrypted.close) - - client_encryption = ClientEncryption( - self.kms_providers(), 'admin.datakeys', client, OPTS) - self.addCleanup(client_encryption.close) + cls.KMS_PROVIDERS, "keyvault.datakeys", schema_map=schemas, kms_tls_options=KMS_TLS_OPTS + ) + cls.client_encrypted = rs_or_single_client( + auto_encryption_opts=opts, uuidRepresentation="standard" + ) + cls.client_encryption = ClientEncryption( + cls.KMS_PROVIDERS, "keyvault.datakeys", cls.client, OPTS, kms_tls_options=KMS_TLS_OPTS + ) - # Local create data key. - listener.reset() - local_datakey_id = client_encryption.create_data_key( - 'local', key_alt_names=['local_altname']) - self.assertBinaryUUID(local_datakey_id) - cmd = listener.results['started'][-1] - self.assertEqual('insert', cmd.command_name) - self.assertEqual({'w': 'majority'}, cmd.command.get('writeConcern')) - docs = list(vault.find({'_id': local_datakey_id})) - self.assertEqual(len(docs), 1) - self.assertEqual(docs[0]['masterKey']['provider'], 'local') + @classmethod + def tearDownClass(cls): + cls.vault.drop() + cls.client.close() + cls.client_encrypted.close() + cls.client_encryption.close() - # Local encrypt by key_id. - local_encrypted = client_encryption.encrypt( - 'hello local', - Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=local_datakey_id) - self.assertEncrypted(local_encrypted) - client_encrypted.db.coll.insert_one( - {'_id': 'local', 'value': local_encrypted}) - doc_decrypted = client_encrypted.db.coll.find_one({'_id': 'local'}) - self.assertEqual(doc_decrypted['value'], 'hello local') - - # Local encrypt by key_alt_name. - local_encrypted_altname = client_encryption.encrypt( - 'hello local', - Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_alt_name='local_altname') - self.assertEqual(local_encrypted_altname, local_encrypted) + def setUp(self): + self.listener.reset() - # AWS create data key. - listener.reset() - master_key = { - 'region': 'us-east-1', - 'key': 'arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-' - '9f25-e30687b580d0' - } - aws_datakey_id = client_encryption.create_data_key( - 'aws', master_key=master_key, key_alt_names=['aws_altname']) - self.assertBinaryUUID(aws_datakey_id) - cmd = listener.results['started'][-1] - self.assertEqual('insert', cmd.command_name) - self.assertEqual({'w': 'majority'}, cmd.command.get('writeConcern')) - docs = list(vault.find({'_id': aws_datakey_id})) + def run_test(self, provider_name): + # Create data key. + master_key: Any = self.MASTER_KEYS[provider_name] + datakey_id = self.client_encryption.create_data_key( + provider_name, master_key=master_key, key_alt_names=[f"{provider_name}_altname"] + ) + self.assertBinaryUUID(datakey_id) + cmd = self.listener.started_events[-1] + self.assertEqual("insert", cmd.command_name) + self.assertEqual({"w": "majority"}, cmd.command.get("writeConcern")) + docs = list(self.vault.find({"_id": datakey_id})) self.assertEqual(len(docs), 1) - self.assertEqual(docs[0]['masterKey']['provider'], 'aws') + self.assertEqual(docs[0]["masterKey"]["provider"], provider_name) - # AWS encrypt by key_id. - aws_encrypted = client_encryption.encrypt( - 'hello aws', + # Encrypt by key_id. + encrypted = self.client_encryption.encrypt( + f"hello {provider_name}", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=aws_datakey_id) - self.assertEncrypted(aws_encrypted) - client_encrypted.db.coll.insert_one( - {'_id': 'aws', 'value': aws_encrypted}) - doc_decrypted = client_encrypted.db.coll.find_one({'_id': 'aws'}) - self.assertEqual(doc_decrypted['value'], 'hello aws') - - # AWS encrypt by key_alt_name. - aws_encrypted_altname = client_encryption.encrypt( - 'hello aws', + key_id=datakey_id, + ) + self.assertEncrypted(encrypted) + self.client_encrypted.db.coll.insert_one({"_id": provider_name, "value": encrypted}) + doc_decrypted = self.client_encrypted.db.coll.find_one({"_id": provider_name}) + self.assertEqual(doc_decrypted["value"], f"hello {provider_name}") # type: ignore + + # Encrypt by key_alt_name. + encrypted_altname = self.client_encryption.encrypt( + f"hello {provider_name}", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_alt_name='aws_altname') - self.assertEqual(aws_encrypted_altname, aws_encrypted) + key_alt_name=f"{provider_name}_altname", + ) + self.assertEqual(encrypted_altname, encrypted) # Explicitly encrypting an auto encrypted field. - msg = (r'Cannot encrypt element of type binData because schema ' - r'requires that type is one of: \[ string \]') - with self.assertRaisesRegex(EncryptionError, msg): - client_encrypted.db.coll.insert_one( - {'encrypted_placeholder': local_encrypted}) + with self.assertRaisesRegex(EncryptionError, r"encrypt element of type"): + self.client_encrypted.db.coll.insert_one({"encrypted_placeholder": encrypted}) + def test_data_key_local(self): + self.run_test("local") -class TestExternalKeyVault(EncryptionIntegrationTest): + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + def test_data_key_aws(self): + self.run_test("aws") + + @unittest.skipUnless(any(AZURE_CREDS.values()), "Azure environment credentials are not set") + def test_data_key_azure(self): + self.run_test("azure") + + @unittest.skipUnless(any(GCP_CREDS.values()), "GCP environment credentials are not set") + def test_data_key_gcp(self): + self.run_test("gcp") + def test_data_key_kmip(self): + self.run_test("kmip") + + +class TestExternalKeyVault(EncryptionIntegrationTest): @staticmethod def kms_providers(): - return {'local': {'key': LOCAL_MASTER_KEY}} + return {"local": {"key": LOCAL_MASTER_KEY}} def _test_external_key_vault(self, with_external_key_vault): self.client.db.coll.drop() vault = create_key_vault( - self.client.admin.datakeys, - json_data('corpus', 'corpus-key-local.json'), - json_data('corpus', 'corpus-key-aws.json')) + self.client.keyvault.datakeys, + json_data("corpus", "corpus-key-local.json"), + json_data("corpus", "corpus-key-aws.json"), + ) self.addCleanup(vault.drop) # Configure the encrypted field via the local schema_map option. - schemas = {'db.coll': json_data('external', 'external-schema.json')} + schemas = {"db.coll": json_data("external", "external-schema.json")} if with_external_key_vault: - key_vault_client = rs_or_single_client( - username='fake-user', password='fake-pwd') + key_vault_client = rs_or_single_client(username="fake-user", password="fake-pwd") self.addCleanup(key_vault_client.close) else: key_vault_client = client_context.client opts = AutoEncryptionOpts( - self.kms_providers(), 'admin.datakeys', schema_map=schemas, - key_vault_client=key_vault_client) + self.kms_providers(), + "keyvault.datakeys", + schema_map=schemas, + key_vault_client=key_vault_client, + ) client_encrypted = rs_or_single_client( - auto_encryption_opts=opts, uuidRepresentation='standard') + auto_encryption_opts=opts, uuidRepresentation="standard" + ) self.addCleanup(client_encrypted.close) client_encryption = ClientEncryption( - self.kms_providers(), 'admin.datakeys', key_vault_client, OPTS) + self.kms_providers(), "keyvault.datakeys", key_vault_client, OPTS + ) self.addCleanup(client_encryption.close) if with_external_key_vault: @@ -741,14 +930,15 @@ def _test_external_key_vault(self, with_external_key_vault): client_encryption.encrypt( "test", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=LOCAL_KEY_ID) + key_id=LOCAL_KEY_ID, + ) # AuthenticationFailed error. self.assertIsInstance(ctx.exception.cause, OperationFailure) self.assertEqual(ctx.exception.cause.code, 18) else: client_encryption.encrypt( - "test", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=LOCAL_KEY_ID) + "test", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=LOCAL_KEY_ID + ) def test_external_key_vault_1(self): self._test_external_key_vault(True) @@ -758,163 +948,188 @@ def test_external_key_vault_2(self): class TestViews(EncryptionIntegrationTest): - @staticmethod def kms_providers(): - return {'local': {'key': LOCAL_MASTER_KEY}} + return {"local": {"key": LOCAL_MASTER_KEY}} def test_views_are_prohibited(self): self.client.db.view.drop() - self.client.db.create_collection('view', viewOn='coll') + self.client.db.create_collection("view", viewOn="coll") self.addCleanup(self.client.db.view.drop) - opts = AutoEncryptionOpts(self.kms_providers(), 'admin.datakeys') + opts = AutoEncryptionOpts(self.kms_providers(), "keyvault.datakeys") client_encrypted = rs_or_single_client( - auto_encryption_opts=opts, uuidRepresentation='standard') + auto_encryption_opts=opts, uuidRepresentation="standard" + ) self.addCleanup(client_encrypted.close) - with self.assertRaisesRegex( - EncryptionError, 'cannot auto encrypt a view'): + with self.assertRaisesRegex(EncryptionError, "cannot auto encrypt a view"): client_encrypted.db.view.insert_one({}) class TestCorpus(EncryptionIntegrationTest): - @classmethod - @unittest.skipUnless(all(AWS_CREDS.values()), - 'AWS environment credentials are not set') + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") def setUpClass(cls): - super(TestCorpus, cls).setUpClass() + super().setUpClass() @staticmethod def kms_providers(): - return {'aws': AWS_CREDS, 'local': {'key': LOCAL_MASTER_KEY}} + return ALL_KMS_PROVIDERS @staticmethod def fix_up_schema(json_schema): """Remove deprecated symbol/dbPointer types from json schema.""" - for key in json_schema['properties'].keys(): - if '_symbol_' in key or '_dbPointer_' in key: - del json_schema['properties'][key] + for key in list(json_schema["properties"]): + if "_symbol_" in key or "_dbPointer_" in key: + del json_schema["properties"][key] return json_schema @staticmethod def fix_up_curpus(corpus): """Disallow deprecated symbol/dbPointer types from corpus test.""" for key in corpus: - if '_symbol_' in key or '_dbPointer_' in key: - corpus[key]['allowed'] = False + if "_symbol_" in key or "_dbPointer_" in key: + corpus[key]["allowed"] = False return corpus @staticmethod def fix_up_curpus_encrypted(corpus_encrypted, corpus): """Fix the expected values for deprecated symbol/dbPointer types.""" for key in corpus_encrypted: - if '_symbol_' in key or '_dbPointer_' in key: + if "_symbol_" in key or "_dbPointer_" in key: corpus_encrypted[key] = copy.deepcopy(corpus[key]) return corpus_encrypted def _test_corpus(self, opts): # Drop and create the collection 'db.coll' with jsonSchema. coll = create_with_schema( - self.client.db.coll, - self.fix_up_schema(json_data('corpus', 'corpus-schema.json'))) + self.client.db.coll, self.fix_up_schema(json_data("corpus", "corpus-schema.json")) + ) self.addCleanup(coll.drop) vault = create_key_vault( - self.client.admin.datakeys, - json_data('corpus', 'corpus-key-local.json'), - json_data('corpus', 'corpus-key-aws.json')) + self.client.keyvault.datakeys, + json_data("corpus", "corpus-key-local.json"), + json_data("corpus", "corpus-key-aws.json"), + json_data("corpus", "corpus-key-azure.json"), + json_data("corpus", "corpus-key-gcp.json"), + json_data("corpus", "corpus-key-kmip.json"), + ) self.addCleanup(vault.drop) - client_encrypted = rs_or_single_client( - auto_encryption_opts=opts, uuidRepresentation='standard') + client_encrypted = rs_or_single_client(auto_encryption_opts=opts) self.addCleanup(client_encrypted.close) client_encryption = ClientEncryption( - self.kms_providers(), 'admin.datakeys', client_context.client, - OPTS) + self.kms_providers(), + "keyvault.datakeys", + client_context.client, + OPTS, + kms_tls_options=KMS_TLS_OPTS, + ) self.addCleanup(client_encryption.close) - corpus = self.fix_up_curpus(json_data('corpus', 'corpus.json')) - corpus_copied = SON() + corpus = self.fix_up_curpus(json_data("corpus", "corpus.json")) + corpus_copied: SON = SON() for key, value in corpus.items(): corpus_copied[key] = copy.deepcopy(value) - if key in ('_id', 'altname_aws', 'altname_local'): + if key in ( + "_id", + "altname_aws", + "altname_azure", + "altname_gcp", + "altname_local", + "altname_kmip", + ): continue - if value['method'] == 'auto': + if value["method"] == "auto": continue - if value['method'] == 'explicit': - identifier = value['identifier'] - self.assertIn(identifier, ('id', 'altname')) - kms = value['kms'] - self.assertIn(kms, ('local', 'aws')) - if identifier == 'id': - if kms == 'local': - kwargs = dict(key_id=LOCAL_KEY_ID) + if value["method"] == "explicit": + identifier = value["identifier"] + self.assertIn(identifier, ("id", "altname")) + kms = value["kms"] + self.assertIn(kms, ("local", "aws", "azure", "gcp", "kmip")) + if identifier == "id": + if kms == "local": + kwargs = {"key_id": LOCAL_KEY_ID} + elif kms == "aws": + kwargs = {"key_id": AWS_KEY_ID} + elif kms == "azure": + kwargs = {"key_id": AZURE_KEY_ID} + elif kms == "gcp": + kwargs = {"key_id": GCP_KEY_ID} else: - kwargs = dict(key_id=AWS_KEY_ID) + kwargs = {"key_id": KMIP_KEY_ID} else: - kwargs = dict(key_alt_name=kms) + kwargs = {"key_alt_name": kms} - self.assertIn(value['algo'], ('det', 'rand')) - if value['algo'] == 'det': - algo = (Algorithm. - AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic) + self.assertIn(value["algo"], ("det", "rand")) + if value["algo"] == "det": + algo = Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic else: algo = Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random try: encrypted_val = client_encryption.encrypt( - value['value'], algo, **kwargs) - if not value['allowed']: - self.fail('encrypt should have failed: %r: %r' % ( - key, value)) - corpus_copied[key]['value'] = encrypted_val + value["value"], algo, **kwargs # type: ignore[arg-type] + ) + if not value["allowed"]: + self.fail(f"encrypt should have failed: {key!r}: {value!r}") + corpus_copied[key]["value"] = encrypted_val except Exception: - if value['allowed']: + if value["allowed"]: tb = traceback.format_exc() - self.fail('encrypt failed: %r: %r, traceback: %s' % ( - key, value, tb)) + self.fail(f"encrypt failed: {key!r}: {value!r}, traceback: {tb}") client_encrypted.db.coll.insert_one(corpus_copied) corpus_decrypted = client_encrypted.db.coll.find_one() self.assertEqual(corpus_decrypted, corpus) - corpus_encrypted_expected = self.fix_up_curpus_encrypted(json_data( - 'corpus', 'corpus-encrypted.json'), corpus) + corpus_encrypted_expected = self.fix_up_curpus_encrypted( + json_data("corpus", "corpus-encrypted.json"), corpus + ) corpus_encrypted_actual = coll.find_one() for key, value in corpus_encrypted_actual.items(): - if key in ('_id', 'altname_aws', 'altname_local'): + if key in ( + "_id", + "altname_aws", + "altname_azure", + "altname_gcp", + "altname_local", + "altname_kmip", + ): continue - if value['algo'] == 'det': - self.assertEqual( - value['value'], corpus_encrypted_expected[key]['value'], - key) - elif value['algo'] == 'rand' and value['allowed']: - self.assertNotEqual( - value['value'], corpus_encrypted_expected[key]['value'], - key) - - if value['allowed']: - decrypt_actual = client_encryption.decrypt(value['value']) + if value["algo"] == "det": + self.assertEqual(value["value"], corpus_encrypted_expected[key]["value"], key) + elif value["algo"] == "rand" and value["allowed"]: + self.assertNotEqual(value["value"], corpus_encrypted_expected[key]["value"], key) + + if value["allowed"]: + decrypt_actual = client_encryption.decrypt(value["value"]) decrypt_expected = client_encryption.decrypt( - corpus_encrypted_expected[key]['value']) + corpus_encrypted_expected[key]["value"] + ) self.assertEqual(decrypt_actual, decrypt_expected, key) else: - self.assertEqual(value['value'], corpus[key]['value'], key) + self.assertEqual(value["value"], corpus[key]["value"], key) def test_corpus(self): - opts = AutoEncryptionOpts(self.kms_providers(), 'admin.datakeys') + opts = AutoEncryptionOpts( + self.kms_providers(), "keyvault.datakeys", kms_tls_options=KMS_TLS_OPTS + ) self._test_corpus(opts) def test_corpus_local_schema(self): # Configure the encrypted field via the local schema_map option. - schemas = {'db.coll': self.fix_up_schema( - json_data('corpus', 'corpus-schema.json'))} + schemas = {"db.coll": self.fix_up_schema(json_data("corpus", "corpus-schema.json"))} opts = AutoEncryptionOpts( - self.kms_providers(), 'admin.datakeys', schema_map=schemas) + self.kms_providers(), + "keyvault.datakeys", + schema_map=schemas, + kms_tls_options=KMS_TLS_OPTS, + ) self._test_corpus(opts) @@ -925,184 +1140,1792 @@ def test_corpus_local_schema(self): class TestBsonSizeBatches(EncryptionIntegrationTest): """Prose tests for BSON size limits and batch splitting.""" + coll: Collection + coll_encrypted: Collection + client_encrypted: MongoClient + listener: OvertCommandListener + @classmethod def setUpClass(cls): - super(TestBsonSizeBatches, cls).setUpClass() + super().setUpClass() db = client_context.client.db cls.coll = db.coll cls.coll.drop() # Configure the encrypted 'db.coll' collection via jsonSchema. - json_schema = json_data('limits', 'limits-schema.json') + json_schema = json_data("limits", "limits-schema.json") db.create_collection( - 'coll', validator={'$jsonSchema': json_schema}, codec_options=OPTS, - write_concern=WriteConcern(w='majority')) + "coll", + validator={"$jsonSchema": json_schema}, + codec_options=OPTS, + write_concern=WriteConcern(w="majority"), + ) # Create the key vault. coll = client_context.client.get_database( - 'admin', - write_concern=WriteConcern(w='majority'), - codec_options=OPTS)['datakeys'] + "keyvault", write_concern=WriteConcern(w="majority"), codec_options=OPTS + )["datakeys"] coll.drop() - coll.insert_one(json_data('limits', 'limits-key.json')) + coll.insert_one(json_data("limits", "limits-key.json")) - opts = AutoEncryptionOpts( - {'local': {'key': LOCAL_MASTER_KEY}}, 'admin.datakeys') + opts = AutoEncryptionOpts({"local": {"key": LOCAL_MASTER_KEY}}, "keyvault.datakeys") cls.listener = OvertCommandListener() cls.client_encrypted = rs_or_single_client( - auto_encryption_opts=opts, event_listeners=[cls.listener]) + auto_encryption_opts=opts, event_listeners=[cls.listener] + ) cls.coll_encrypted = cls.client_encrypted.db.coll @classmethod def tearDownClass(cls): cls.coll_encrypted.drop() cls.client_encrypted.close() - super(TestBsonSizeBatches, cls).tearDownClass() + super().tearDownClass() def test_01_insert_succeeds_under_2MiB(self): - doc = {'_id': 'over_2mib_under_16mib', 'unencrypted': 'a' * _2_MiB} + doc = {"_id": "over_2mib_under_16mib", "unencrypted": "a" * _2_MiB} self.coll_encrypted.insert_one(doc) # Same with bulk_write. - doc['_id'] = 'over_2mib_under_16mib_bulk' + doc["_id"] = "over_2mib_under_16mib_bulk" self.coll_encrypted.bulk_write([InsertOne(doc)]) def test_02_insert_succeeds_over_2MiB_post_encryption(self): - doc = {'_id': 'encryption_exceeds_2mib', - 'unencrypted': 'a' * ((2**21) - 2000)} - doc.update(json_data('limits', 'limits-doc.json')) + doc = {"_id": "encryption_exceeds_2mib", "unencrypted": "a" * ((2**21) - 2000)} + doc.update(json_data("limits", "limits-doc.json")) self.coll_encrypted.insert_one(doc) # Same with bulk_write. - doc['_id'] = 'encryption_exceeds_2mib_bulk' + doc["_id"] = "encryption_exceeds_2mib_bulk" self.coll_encrypted.bulk_write([InsertOne(doc)]) def test_03_bulk_batch_split(self): - doc1 = {'_id': 'over_2mib_1', 'unencrypted': 'a' * _2_MiB} - doc2 = {'_id': 'over_2mib_2', 'unencrypted': 'a' * _2_MiB} + doc1 = {"_id": "over_2mib_1", "unencrypted": "a" * _2_MiB} + doc2 = {"_id": "over_2mib_2", "unencrypted": "a" * _2_MiB} self.listener.reset() self.coll_encrypted.bulk_write([InsertOne(doc1), InsertOne(doc2)]) - self.assertEqual( - self.listener.started_command_names(), ['insert', 'insert']) + self.assertEqual(self.listener.started_command_names(), ["insert", "insert"]) def test_04_bulk_batch_split(self): - limits_doc = json_data('limits', 'limits-doc.json') - doc1 = {'_id': 'encryption_exceeds_2mib_1', - 'unencrypted': 'a' * (_2_MiB - 2000)} + limits_doc = json_data("limits", "limits-doc.json") + doc1 = {"_id": "encryption_exceeds_2mib_1", "unencrypted": "a" * (_2_MiB - 2000)} doc1.update(limits_doc) - doc2 = {'_id': 'encryption_exceeds_2mib_2', - 'unencrypted': 'a' * (_2_MiB - 2000)} + doc2 = {"_id": "encryption_exceeds_2mib_2", "unencrypted": "a" * (_2_MiB - 2000)} doc2.update(limits_doc) self.listener.reset() self.coll_encrypted.bulk_write([InsertOne(doc1), InsertOne(doc2)]) - self.assertEqual( - self.listener.started_command_names(), ['insert', 'insert']) + self.assertEqual(self.listener.started_command_names(), ["insert", "insert"]) def test_05_insert_succeeds_just_under_16MiB(self): - doc = {'_id': 'under_16mib', 'unencrypted': 'a' * (_16_MiB - 2000)} + doc = {"_id": "under_16mib", "unencrypted": "a" * (_16_MiB - 2000)} self.coll_encrypted.insert_one(doc) # Same with bulk_write. - doc['_id'] = 'under_16mib_bulk' + doc["_id"] = "under_16mib_bulk" self.coll_encrypted.bulk_write([InsertOne(doc)]) def test_06_insert_fails_over_16MiB(self): - limits_doc = json_data('limits', 'limits-doc.json') - doc = {'_id': 'encryption_exceeds_16mib', - 'unencrypted': 'a' * (_16_MiB - 2000)} + limits_doc = json_data("limits", "limits-doc.json") + doc = {"_id": "encryption_exceeds_16mib", "unencrypted": "a" * (_16_MiB - 2000)} doc.update(limits_doc) - with self.assertRaisesRegex(WriteError, 'object to insert too large'): + with self.assertRaisesRegex(WriteError, "object to insert too large"): self.coll_encrypted.insert_one(doc) # Same with bulk_write. - doc['_id'] = 'encryption_exceeds_16mib_bulk' + doc["_id"] = "encryption_exceeds_16mib_bulk" with self.assertRaises(BulkWriteError) as ctx: self.coll_encrypted.bulk_write([InsertOne(doc)]) - err = ctx.exception.details['writeErrors'][0] - self.assertEqual(2, err['code']) - self.assertIn('object to insert too large', err['errmsg']) - + err = ctx.exception.details["writeErrors"][0] + self.assertEqual(2, err["code"]) + self.assertIn("object to insert too large", err["errmsg"]) class TestCustomEndpoint(EncryptionIntegrationTest): """Prose tests for creating data keys with a custom endpoint.""" @classmethod - @unittest.skipUnless(all(AWS_CREDS.values()), - 'AWS environment credentials are not set') + @unittest.skipUnless( + any([all(AWS_CREDS.values()), all(AZURE_CREDS.values()), all(GCP_CREDS.values())]), + "No environment credentials are set", + ) def setUpClass(cls): - super(TestCustomEndpoint, cls).setUpClass() - cls.client_encryption = ClientEncryption( - {'aws': AWS_CREDS}, 'admin.datakeys', client_context.client, OPTS) - - def _test_create_data_key(self, master_key): - data_key_id = self.client_encryption.create_data_key( - 'aws', master_key=master_key) + super().setUpClass() + + def setUp(self): + kms_providers = { + "aws": AWS_CREDS, + "azure": AZURE_CREDS, + "gcp": GCP_CREDS, + "kmip": KMIP_CREDS, + } + self.client_encryption = ClientEncryption( + kms_providers=kms_providers, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client_context.client, + codec_options=OPTS, + kms_tls_options=KMS_TLS_OPTS, + ) + + kms_providers_invalid = copy.deepcopy(kms_providers) + kms_providers_invalid["azure"]["identityPlatformEndpoint"] = "doesnotexist.invalid:443" + kms_providers_invalid["gcp"]["endpoint"] = "doesnotexist.invalid:443" + kms_providers_invalid["kmip"]["endpoint"] = "doesnotexist.local:5698" + self.client_encryption_invalid = ClientEncryption( + kms_providers=kms_providers_invalid, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client_context.client, + codec_options=OPTS, + kms_tls_options=KMS_TLS_OPTS, + ) + self._kmip_host_error = None + self._invalid_host_error = None + + def tearDown(self): + self.client_encryption.close() + self.client_encryption_invalid.close() + + def run_test_expected_success(self, provider_name, master_key): + data_key_id = self.client_encryption.create_data_key(provider_name, master_key=master_key) encrypted = self.client_encryption.encrypt( - 'test', Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=data_key_id) - self.assertEqual('test', self.client_encryption.decrypt(encrypted)) - - def test_02_aws_region_key(self): - self._test_create_data_key({ + "test", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=data_key_id + ) + self.assertEqual("test", self.client_encryption.decrypt(encrypted)) + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + def test_01_aws_region_key(self): + self.run_test_expected_success( + "aws", + { + "region": "us-east-1", + "key": ( + "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0" + ), + }, + ) + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + def test_02_aws_region_key_endpoint(self): + self.run_test_expected_success( + "aws", + { + "region": "us-east-1", + "key": ( + "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0" + ), + "endpoint": "kms.us-east-1.amazonaws.com", + }, + ) + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + def test_03_aws_region_key_endpoint_port(self): + self.run_test_expected_success( + "aws", + { + "region": "us-east-1", + "key": ( + "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0" + ), + "endpoint": "kms.us-east-1.amazonaws.com:443", + }, + ) + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + def test_04_aws_endpoint_invalid_port(self): + master_key = { "region": "us-east-1", - "key": ("arn:aws:kms:us-east-1:579766882180:key/" - "89fcc2c4-08b0-4bd9-9f25-e30687b580d0") - }) + "key": ("arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), + "endpoint": "kms.us-east-1.amazonaws.com:12345", + } + with self.assertRaisesRegex(EncryptionError, "kms.us-east-1.amazonaws.com:12345") as ctx: + self.client_encryption.create_data_key("aws", master_key=master_key) + self.assertIsInstance(ctx.exception.cause, AutoReconnect) - def test_03_aws_region_key_endpoint(self): - self._test_create_data_key({ + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + def test_05_aws_endpoint_wrong_region(self): + master_key = { "region": "us-east-1", - "key": ("arn:aws:kms:us-east-1:579766882180:key/" - "89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), - "endpoint": "kms.us-east-1.amazonaws.com" - }) + "key": ("arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), + "endpoint": "kms.us-east-2.amazonaws.com", + } + # The full error should be something like: + # "Credential should be scoped to a valid region, not 'us-east-1'" + # but we only check for EncryptionError to avoid breaking on slight + # changes to AWS' error message. + with self.assertRaises(EncryptionError): + self.client_encryption.create_data_key("aws", master_key=master_key) - def test_04_aws_region_key_endpoint_port(self): - self._test_create_data_key({ + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + def test_06_aws_endpoint_invalid_host(self): + master_key = { "region": "us-east-1", - "key": ("arn:aws:kms:us-east-1:579766882180:key/" - "89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), - "endpoint": "kms.us-east-1.amazonaws.com:443" - }) + "key": ("arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), + "endpoint": "doesnotexist.invalid", + } + with self.assertRaisesRegex(EncryptionError, self.invalid_host_error): + self.client_encryption.create_data_key("aws", master_key=master_key) - def test_05_endpoint_invalid_port(self): + @unittest.skipUnless(any(AZURE_CREDS.values()), "Azure environment credentials are not set") + def test_07_azure(self): master_key = { - "region": "us-east-1", - "key": ("arn:aws:kms:us-east-1:579766882180:key/" - "89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), - "endpoint": "kms.us-east-1.amazonaws.com:12345" + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle", } - with self.assertRaises(EncryptionError) as ctx: - self.client_encryption.create_data_key( - 'aws', master_key=master_key) - self.assertIsInstance(ctx.exception.cause, socket.error) + self.run_test_expected_success("azure", master_key) + + # The full error should be something like: + # "[Errno 8] nodename nor servname provided, or not known" + with self.assertRaisesRegex(EncryptionError, self.invalid_host_error): + self.client_encryption_invalid.create_data_key("azure", master_key=master_key) - def test_05_endpoint_wrong_region(self): + @unittest.skipUnless(any(GCP_CREDS.values()), "GCP environment credentials are not set") + def test_08_gcp_valid_endpoint(self): master_key = { - "region": "us-east-1", - "key": ("arn:aws:kms:us-east-1:579766882180:key/" - "89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), - "endpoint": "kms.us-east-2.amazonaws.com" + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle", + "endpoint": "cloudkms.googleapis.com:443", } + self.run_test_expected_success("gcp", master_key) + # The full error should be something like: - # "Credential should be scoped to a valid region, not 'us-east-1'" - # but we only check for "us-east-1" to avoid breaking on slight - # changes to AWS' error message. - with self.assertRaisesRegex(EncryptionError, 'us-east-1'): - self.client_encryption.create_data_key( - 'aws', master_key=master_key) + # "[Errno 8] nodename nor servname provided, or not known" + with self.assertRaisesRegex(EncryptionError, self.invalid_host_error): + self.client_encryption_invalid.create_data_key("gcp", master_key=master_key) - def test_05_endpoint_invalid_host(self): + @unittest.skipUnless(any(GCP_CREDS.values()), "GCP environment credentials are not set") + def test_09_gcp_invalid_endpoint(self): master_key = { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle", + "endpoint": "doesnotexist.invalid:443", + } + + # The full error should be something like: + # "Invalid KMS response, no access_token returned. HTTP status=200" + with self.assertRaisesRegex(EncryptionError, "Invalid KMS response"): + self.client_encryption.create_data_key("gcp", master_key=master_key) + + def dns_error(self, host, port): + # The full error should be something like: + # "[Errno 8] nodename nor servname provided, or not known" + with self.assertRaises(Exception) as ctx: + socket.getaddrinfo(host, port, socket.AF_INET, socket.SOCK_STREAM) + return re.escape(str(ctx.exception)) + + @property + def invalid_host_error(self): + if self._invalid_host_error is None: + self._invalid_host_error = self.dns_error("doesnotexist.invalid", 443) + return self._invalid_host_error + + @property + def kmip_host_error(self): + if self._kmip_host_error is None: + self._kmip_host_error = self.dns_error("doesnotexist.local", 5698) + return self._kmip_host_error + + def test_10_kmip_invalid_endpoint(self): + key = {"keyId": "1"} + self.run_test_expected_success("kmip", key) + with self.assertRaisesRegex(EncryptionError, self.kmip_host_error): + self.client_encryption_invalid.create_data_key("kmip", key) + + def test_11_kmip_master_key_endpoint(self): + key = {"keyId": "1", "endpoint": KMIP_CREDS["endpoint"]} + self.run_test_expected_success("kmip", key) + # Override invalid endpoint: + data_key_id = self.client_encryption_invalid.create_data_key("kmip", master_key=key) + encrypted = self.client_encryption_invalid.encrypt( + "test", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=data_key_id + ) + self.assertEqual("test", self.client_encryption_invalid.decrypt(encrypted)) + + def test_12_kmip_master_key_invalid_endpoint(self): + key = {"keyId": "1", "endpoint": "doesnotexist.local:5698"} + with self.assertRaisesRegex(EncryptionError, self.kmip_host_error): + self.client_encryption.create_data_key("kmip", key) + + +class AzureGCPEncryptionTestMixin: + DEK = None + KMS_PROVIDER_MAP = None + KEYVAULT_DB = "keyvault" + KEYVAULT_COLL = "datakeys" + client: MongoClient + + def setUp(self): + keyvault = self.client.get_database(self.KEYVAULT_DB).get_collection(self.KEYVAULT_COLL) + create_key_vault(keyvault, self.DEK) + + def _test_explicit(self, expectation): + client_encryption = ClientEncryption( + self.KMS_PROVIDER_MAP, # type: ignore[arg-type] + ".".join([self.KEYVAULT_DB, self.KEYVAULT_COLL]), + client_context.client, + OPTS, + ) + self.addCleanup(client_encryption.close) + + ciphertext = client_encryption.encrypt( + "string0", + algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, + key_id=self.DEK["_id"], + ) + + self.assertEqual(bytes(ciphertext), base64.b64decode(expectation)) + self.assertEqual(client_encryption.decrypt(ciphertext), "string0") + + def _test_automatic(self, expectation_extjson, payload): + encrypted_db = "db" + encrypted_coll = "coll" + keyvault_namespace = ".".join([self.KEYVAULT_DB, self.KEYVAULT_COLL]) + + encryption_opts = AutoEncryptionOpts( + self.KMS_PROVIDER_MAP, # type: ignore[arg-type] + keyvault_namespace, + schema_map=self.SCHEMA_MAP, + ) + + insert_listener = AllowListEventListener("insert") + client = rs_or_single_client( + auto_encryption_opts=encryption_opts, event_listeners=[insert_listener] + ) + self.addCleanup(client.close) + + coll = client.get_database(encrypted_db).get_collection( + encrypted_coll, codec_options=OPTS, write_concern=WriteConcern("majority") + ) + coll.drop() + + expected_document = json_util.loads(expectation_extjson, json_options=JSON_OPTS) + + coll.insert_one(payload) + event = insert_listener.started_events[0] + inserted_doc = event.command["documents"][0] + + for key, value in expected_document.items(): + self.assertEqual(value, inserted_doc[key]) + + output_doc = coll.find_one({}) + for key, value in payload.items(): + self.assertEqual(output_doc[key], value) + + +class TestAzureEncryption(AzureGCPEncryptionTestMixin, EncryptionIntegrationTest): + @classmethod + @unittest.skipUnless(any(AZURE_CREDS.values()), "Azure environment credentials are not set") + def setUpClass(cls): + cls.KMS_PROVIDER_MAP = {"azure": AZURE_CREDS} + cls.DEK = json_data(BASE, "custom", "azure-dek.json") + cls.SCHEMA_MAP = json_data(BASE, "custom", "azure-gcp-schema.json") + super().setUpClass() + + def test_explicit(self): + return self._test_explicit( + "AQGVERPgAAAAAAAAAAAAAAAC5DbBSwPwfSlBrDtRuglvNvCXD1KzDuCKY2P+4bRFtHDjpTOE2XuytPAUaAbXf1orsPq59PVZmsbTZbt2CB8qaQ==" + ) + + def test_automatic(self): + expected_document_extjson = textwrap.dedent( + """ + {"secret_azure": { + "$binary": { + "base64": "AQGVERPgAAAAAAAAAAAAAAAC5DbBSwPwfSlBrDtRuglvNvCXD1KzDuCKY2P+4bRFtHDjpTOE2XuytPAUaAbXf1orsPq59PVZmsbTZbt2CB8qaQ==", + "subType": "06"} + }}""" + ) + return self._test_automatic(expected_document_extjson, {"secret_azure": "string0"}) + + +class TestGCPEncryption(AzureGCPEncryptionTestMixin, EncryptionIntegrationTest): + @classmethod + @unittest.skipUnless(any(GCP_CREDS.values()), "GCP environment credentials are not set") + def setUpClass(cls): + cls.KMS_PROVIDER_MAP = {"gcp": GCP_CREDS} + cls.DEK = json_data(BASE, "custom", "gcp-dek.json") + cls.SCHEMA_MAP = json_data(BASE, "custom", "azure-gcp-schema.json") + super().setUpClass() + + def test_explicit(self): + return self._test_explicit( + "ARgj/gAAAAAAAAAAAAAAAAACwFd+Y5Ojw45GUXNvbcIpN9YkRdoHDHkR4kssdn0tIMKlDQOLFkWFY9X07IRlXsxPD8DcTiKnl6XINK28vhcGlg==" + ) + + def test_automatic(self): + expected_document_extjson = textwrap.dedent( + """ + {"secret_gcp": { + "$binary": { + "base64": "ARgj/gAAAAAAAAAAAAAAAAACwFd+Y5Ojw45GUXNvbcIpN9YkRdoHDHkR4kssdn0tIMKlDQOLFkWFY9X07IRlXsxPD8DcTiKnl6XINK28vhcGlg==", + "subType": "06"} + }}""" + ) + return self._test_automatic(expected_document_extjson, {"secret_gcp": "string0"}) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#deadlock-tests +class TestDeadlockProse(EncryptionIntegrationTest): + def setUp(self): + self.client_test = rs_or_single_client( + maxPoolSize=1, readConcernLevel="majority", w="majority", uuidRepresentation="standard" + ) + self.addCleanup(self.client_test.close) + + self.client_keyvault_listener = OvertCommandListener() + self.client_keyvault = rs_or_single_client( + maxPoolSize=1, + readConcernLevel="majority", + w="majority", + event_listeners=[self.client_keyvault_listener], + ) + self.addCleanup(self.client_keyvault.close) + + self.client_test.keyvault.datakeys.drop() + self.client_test.db.coll.drop() + self.client_test.keyvault.datakeys.insert_one(json_data("external", "external-key.json")) + _ = self.client_test.db.create_collection( + "coll", + validator={"$jsonSchema": json_data("external", "external-schema.json")}, + codec_options=OPTS, + ) + + client_encryption = ClientEncryption( + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=self.client_test, + codec_options=OPTS, + ) + self.ciphertext = client_encryption.encrypt( + "string0", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_alt_name="local" + ) + client_encryption.close() + + self.client_listener = OvertCommandListener() + self.topology_listener = TopologyEventListener() + self.optargs = ({"local": {"key": LOCAL_MASTER_KEY}}, "keyvault.datakeys") + + def _run_test(self, max_pool_size, auto_encryption_opts): + client_encrypted = rs_or_single_client( + readConcernLevel="majority", + w="majority", + maxPoolSize=max_pool_size, + auto_encryption_opts=auto_encryption_opts, + event_listeners=[self.client_listener, self.topology_listener], + ) + + if auto_encryption_opts._bypass_auto_encryption is True: + self.client_test.db.coll.insert_one({"_id": 0, "encrypted": self.ciphertext}) + elif auto_encryption_opts._bypass_auto_encryption is False: + client_encrypted.db.coll.insert_one({"_id": 0, "encrypted": "string0"}) + else: + raise RuntimeError("bypass_auto_encryption must be a bool") + + result = client_encrypted.db.coll.find_one({"_id": 0}) + self.assertEqual(result, {"_id": 0, "encrypted": "string0"}) + + self.addCleanup(client_encrypted.close) + + def test_case_1(self): + self._run_test( + max_pool_size=1, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=False, key_vault_client=None + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 4) + self.assertEqual(cev[0].command_name, "listCollections") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "find") + self.assertEqual(cev[1].database_name, "keyvault") + self.assertEqual(cev[2].command_name, "insert") + self.assertEqual(cev[2].database_name, "db") + self.assertEqual(cev[3].command_name, "find") + self.assertEqual(cev[3].database_name, "db") + + self.assertEqual(len(self.topology_listener.results["opened"]), 2) + + def test_case_2(self): + self._run_test( + max_pool_size=1, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=False, key_vault_client=self.client_keyvault + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 3) + self.assertEqual(cev[0].command_name, "listCollections") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "insert") + self.assertEqual(cev[1].database_name, "db") + self.assertEqual(cev[2].command_name, "find") + self.assertEqual(cev[2].database_name, "db") + + cev = self.client_keyvault_listener.started_events + self.assertEqual(len(cev), 1) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "keyvault") + + self.assertEqual(len(self.topology_listener.results["opened"]), 2) + + def test_case_3(self): + self._run_test( + max_pool_size=1, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=True, key_vault_client=None + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 2) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "find") + self.assertEqual(cev[1].database_name, "keyvault") + + self.assertEqual(len(self.topology_listener.results["opened"]), 2) + + def test_case_4(self): + self._run_test( + max_pool_size=1, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=True, key_vault_client=self.client_keyvault + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 1) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "db") + + cev = self.client_keyvault_listener.started_events + self.assertEqual(len(cev), 1) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "keyvault") + + self.assertEqual(len(self.topology_listener.results["opened"]), 1) + + def test_case_5(self): + self._run_test( + max_pool_size=None, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=False, key_vault_client=None + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 5) + self.assertEqual(cev[0].command_name, "listCollections") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "listCollections") + self.assertEqual(cev[1].database_name, "keyvault") + self.assertEqual(cev[2].command_name, "find") + self.assertEqual(cev[2].database_name, "keyvault") + self.assertEqual(cev[3].command_name, "insert") + self.assertEqual(cev[3].database_name, "db") + self.assertEqual(cev[4].command_name, "find") + self.assertEqual(cev[4].database_name, "db") + + self.assertEqual(len(self.topology_listener.results["opened"]), 1) + + def test_case_6(self): + self._run_test( + max_pool_size=None, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=False, key_vault_client=self.client_keyvault + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 3) + self.assertEqual(cev[0].command_name, "listCollections") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "insert") + self.assertEqual(cev[1].database_name, "db") + self.assertEqual(cev[2].command_name, "find") + self.assertEqual(cev[2].database_name, "db") + + cev = self.client_keyvault_listener.started_events + self.assertEqual(len(cev), 1) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "keyvault") + + self.assertEqual(len(self.topology_listener.results["opened"]), 1) + + def test_case_7(self): + self._run_test( + max_pool_size=None, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=True, key_vault_client=None + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 2) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "find") + self.assertEqual(cev[1].database_name, "keyvault") + + self.assertEqual(len(self.topology_listener.results["opened"]), 1) + + def test_case_8(self): + self._run_test( + max_pool_size=None, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=True, key_vault_client=self.client_keyvault + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 1) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "db") + + cev = self.client_keyvault_listener.started_events + self.assertEqual(len(cev), 1) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "keyvault") + + self.assertEqual(len(self.topology_listener.results["opened"]), 1) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#14-decryption-events +class TestDecryptProse(EncryptionIntegrationTest): + def setUp(self): + self.client = client_context.client + self.client.db.drop_collection("decryption_events") + create_key_vault(self.client.keyvault.datakeys) + kms_providers_map = {"local": {"key": LOCAL_MASTER_KEY}} + + self.client_encryption = ClientEncryption( + kms_providers_map, "keyvault.datakeys", self.client, CodecOptions() + ) + keyID = self.client_encryption.create_data_key("local") + self.cipher_text = self.client_encryption.encrypt( + "hello", key_id=keyID, algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic + ) + self.malformed_cipher_text = self.cipher_text[:-1] + (self.cipher_text[-1] ^ 1).to_bytes( + 1, "big" + ) + self.malformed_cipher_text = Binary(self.malformed_cipher_text, 6) + opts = AutoEncryptionOpts( + key_vault_namespace="keyvault.datakeys", kms_providers=kms_providers_map + ) + self.listener = AllowListEventListener("aggregate") + self.encrypted_client = rs_or_single_client( + auto_encryption_opts=opts, retryReads=False, event_listeners=[self.listener] + ) + self.addCleanup(self.encrypted_client.close) + + def test_01_command_error(self): + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"errorCode": 123, "failCommands": ["aggregate"]}, + } + ): + with self.assertRaises(OperationFailure): + self.encrypted_client.db.decryption_events.aggregate([]) + self.assertEqual(len(self.listener.failed_events), 1) + for event in self.listener.failed_events: + self.assertEqual(event.failure["code"], 123) + + def test_02_network_error(self): + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"errorCode": 123, "closeConnection": True, "failCommands": ["aggregate"]}, + } + ): + with self.assertRaises(AutoReconnect): + self.encrypted_client.db.decryption_events.aggregate([]) + self.assertEqual(len(self.listener.failed_events), 1) + self.assertEqual(self.listener.failed_events[0].command_name, "aggregate") + + def test_03_decrypt_error(self): + self.encrypted_client.db.decryption_events.insert_one( + {"encrypted": self.malformed_cipher_text} + ) + with self.assertRaises(EncryptionError): + next(self.encrypted_client.db.decryption_events.aggregate([])) + event = self.listener.succeeded_events[0] + self.assertEqual(len(self.listener.failed_events), 0) + self.assertEqual( + event.reply["cursor"]["firstBatch"][0]["encrypted"], self.malformed_cipher_text + ) + + def test_04_decrypt_success(self): + self.encrypted_client.db.decryption_events.insert_one({"encrypted": self.cipher_text}) + next(self.encrypted_client.db.decryption_events.aggregate([])) + event = self.listener.succeeded_events[0] + self.assertEqual(len(self.listener.failed_events), 0) + self.assertEqual(event.reply["cursor"]["firstBatch"][0]["encrypted"], self.cipher_text) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#bypass-spawning-mongocryptd +class TestBypassSpawningMongocryptdProse(EncryptionIntegrationTest): + @unittest.skipIf( + os.environ.get("TEST_CRYPT_SHARED"), + "this prose test does not work when crypt_shared is on a system dynamic " + "library search path.", + ) + def test_mongocryptd_bypass_spawn(self): + # Lower the mongocryptd timeout to reduce the test run time. + self._original_timeout = encryption._MONGOCRYPTD_TIMEOUT_MS + encryption._MONGOCRYPTD_TIMEOUT_MS = 500 + + def reset_timeout(): + encryption._MONGOCRYPTD_TIMEOUT_MS = self._original_timeout + + self.addCleanup(reset_timeout) + + # Configure the encrypted field via the local schema_map option. + schemas = {"db.coll": json_data("external", "external-schema.json")} + opts = AutoEncryptionOpts( + {"local": {"key": LOCAL_MASTER_KEY}}, + "keyvault.datakeys", + schema_map=schemas, + mongocryptd_bypass_spawn=True, + mongocryptd_uri="mongodb://localhost:27027/", + mongocryptd_spawn_args=[ + "--pidfilepath=bypass-spawning-mongocryptd.pid", + "--port=27027", + ], + ) + client_encrypted = rs_or_single_client(auto_encryption_opts=opts) + self.addCleanup(client_encrypted.close) + with self.assertRaisesRegex(EncryptionError, "Timeout"): + client_encrypted.db.coll.insert_one({"encrypted": "test"}) + + def test_bypassAutoEncryption(self): + opts = AutoEncryptionOpts( + {"local": {"key": LOCAL_MASTER_KEY}}, + "keyvault.datakeys", + bypass_auto_encryption=True, + mongocryptd_spawn_args=[ + "--pidfilepath=bypass-spawning-mongocryptd.pid", + "--port=27027", + ], + ) + client_encrypted = rs_or_single_client(auto_encryption_opts=opts) + self.addCleanup(client_encrypted.close) + client_encrypted.db.coll.insert_one({"unencrypted": "test"}) + # Validate that mongocryptd was not spawned: + mongocryptd_client = MongoClient("mongodb://localhost:27027/?serverSelectionTimeoutMS=500") + with self.assertRaises(ServerSelectionTimeoutError): + mongocryptd_client.admin.command("ping") + + @unittest.skipUnless(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is not installed") + def test_via_loading_shared_library(self): + create_key_vault( + client_context.client.keyvault.datakeys, json_data("external", "external-key.json") + ) + schemas = {"db.coll": json_data("external", "external-schema.json")} + opts = AutoEncryptionOpts( + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + key_vault_namespace="keyvault.datakeys", + schema_map=schemas, + mongocryptd_uri="mongodb://localhost:47021/db?serverSelectionTimeoutMS=1000", + mongocryptd_spawn_args=[ + "--pidfilepath=bypass-spawning-mongocryptd.pid", + "--port=47021", + ], + crypt_shared_lib_required=True, + ) + client_encrypted = rs_or_single_client(auto_encryption_opts=opts) + self.addCleanup(client_encrypted.close) + client_encrypted.db.coll.drop() + client_encrypted.db.coll.insert_one({"encrypted": "test"}) + self.assertEncrypted(client_context.client.db.coll.find_one({})["encrypted"]) + no_mongocryptd_client = MongoClient( + host="mongodb://localhost:47021/db?serverSelectionTimeoutMS=1000" + ) + self.addCleanup(no_mongocryptd_client.close) + with self.assertRaises(ServerSelectionTimeoutError): + no_mongocryptd_client.db.command("ping") + + # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#20-bypass-creating-mongocryptd-client-when-shared-library-is-loaded + @unittest.skipUnless(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is not installed") + def test_client_via_loading_shared_library(self): + connection_established = False + + class Handler(socketserver.BaseRequestHandler): + def handle(self): + nonlocal connection_established + connection_established = True + + server = socketserver.TCPServer(("localhost", 47021), Handler) + + def listener(): + with server: + server.serve_forever(poll_interval=0.05) # Short poll timeout to speed up the test + + listener_t = Thread(target=listener) + listener_t.start() + create_key_vault( + client_context.client.keyvault.datakeys, json_data("external", "external-key.json") + ) + schemas = {"db.coll": json_data("external", "external-schema.json")} + opts = AutoEncryptionOpts( + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + key_vault_namespace="keyvault.datakeys", + schema_map=schemas, + mongocryptd_uri="mongodb://localhost:47021", + crypt_shared_lib_required=False, + ) + client_encrypted = rs_or_single_client(auto_encryption_opts=opts) + self.addCleanup(client_encrypted.close) + client_encrypted.db.coll.drop() + client_encrypted.db.coll.insert_one({"encrypted": "test"}) + server.shutdown() + listener_t.join() + self.assertFalse(connection_established, "a connection was established on port 47021") + + +# https://github.com/mongodb/specifications/tree/master/source/client-side-encryption/tests#kms-tls-tests +class TestKmsTLSProse(EncryptionIntegrationTest): + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + def setUp(self): + super().setUp() + self.patch_system_certs(CA_PEM) + self.client_encrypted = ClientEncryption( + {"aws": AWS_CREDS}, "keyvault.datakeys", self.client, OPTS + ) + self.addCleanup(self.client_encrypted.close) + + def test_invalid_kms_certificate_expired(self): + key = { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "endpoint": "mongodb://127.0.0.1:8000", + } + # Some examples: + # certificate verify failed: certificate has expired (_ssl.c:1129) + # amazon1-2018 Python 3.6: certificate verify failed (_ssl.c:852) + with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): + self.client_encrypted.create_data_key("aws", master_key=key) + + def test_invalid_hostname_in_kms_certificate(self): + key = { "region": "us-east-1", - "key": ("arn:aws:kms:us-east-1:579766882180:key/" - "89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), - "endpoint": "example.com" + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "endpoint": "mongodb://127.0.0.1:8001", + } + # Some examples: + # certificate verify failed: IP address mismatch, certificate is not valid for '127.0.0.1'. (_ssl.c:1129)" + # hostname '127.0.0.1' doesn't match 'wronghost.com' + with self.assertRaisesRegex( + EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch" + ): + self.client_encrypted.create_data_key("aws", master_key=key) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#kms-tls-options-tests +class TestKmsTLSOptions(EncryptionIntegrationTest): + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + def setUp(self): + super().setUp() + # 1, create client with only tlsCAFile. + providers: dict = copy.deepcopy(ALL_KMS_PROVIDERS) + providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:8002" + providers["gcp"]["endpoint"] = "127.0.0.1:8002" + kms_tls_opts_ca_only = { + "aws": {"tlsCAFile": CA_PEM}, + "azure": {"tlsCAFile": CA_PEM}, + "gcp": {"tlsCAFile": CA_PEM}, + "kmip": {"tlsCAFile": CA_PEM}, + } + self.client_encryption_no_client_cert = ClientEncryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_ca_only + ) + self.addCleanup(self.client_encryption_no_client_cert.close) + # 2, same providers as above but with tlsCertificateKeyFile. + kms_tls_opts = copy.deepcopy(kms_tls_opts_ca_only) + for p in kms_tls_opts: + kms_tls_opts[p]["tlsCertificateKeyFile"] = CLIENT_PEM + self.client_encryption_with_tls = ClientEncryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts + ) + self.addCleanup(self.client_encryption_with_tls.close) + # 3, update endpoints to expired host. + providers: dict = copy.deepcopy(providers) + providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:8000" + providers["gcp"]["endpoint"] = "127.0.0.1:8000" + providers["kmip"]["endpoint"] = "127.0.0.1:8000" + self.client_encryption_expired = ClientEncryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_ca_only + ) + self.addCleanup(self.client_encryption_expired.close) + # 3, update endpoints to invalid host. + providers: dict = copy.deepcopy(providers) + providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:8001" + providers["gcp"]["endpoint"] = "127.0.0.1:8001" + providers["kmip"]["endpoint"] = "127.0.0.1:8001" + self.client_encryption_invalid_hostname = ClientEncryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_ca_only + ) + self.addCleanup(self.client_encryption_invalid_hostname.close) + # Errors when client has no cert, some examples: + # [SSL: TLSV13_ALERT_CERTIFICATE_REQUIRED] tlsv13 alert certificate required (_ssl.c:2623) + self.cert_error = ( + "certificate required|SSL handshake failed|" + "KMS connection closed|Connection reset by peer|ECONNRESET|EPIPE" + ) + # On Python 3.10+ this error might be: + # EOF occurred in violation of protocol (_ssl.c:2384) + if sys.version_info[:2] >= (3, 10): + self.cert_error += "|EOF" + # On Windows this error might be: + # [WinError 10054] An existing connection was forcibly closed by the remote host + if sys.platform == "win32": + self.cert_error += "|forcibly closed" + + def test_01_aws(self): + key = { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "endpoint": "127.0.0.1:8002", + } + with self.assertRaisesRegex(EncryptionError, self.cert_error): + self.client_encryption_no_client_cert.create_data_key("aws", key) + # "parse error" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, "parse error"): + self.client_encryption_with_tls.create_data_key("aws", key) + # Some examples: + # certificate verify failed: certificate has expired (_ssl.c:1129) + # amazon1-2018 Python 3.6: certificate verify failed (_ssl.c:852) + key["endpoint"] = "127.0.0.1:8000" + with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): + self.client_encryption_expired.create_data_key("aws", key) + # Some examples: + # certificate verify failed: IP address mismatch, certificate is not valid for '127.0.0.1'. (_ssl.c:1129)" + # hostname '127.0.0.1' doesn't match 'wronghost.com' + key["endpoint"] = "127.0.0.1:8001" + with self.assertRaisesRegex( + EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch" + ): + self.client_encryption_invalid_hostname.create_data_key("aws", key) + + def test_02_azure(self): + key = {"keyVaultEndpoint": "doesnotexist.local", "keyName": "foo"} + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + self.client_encryption_no_client_cert.create_data_key("azure", key) + # "HTTP status=404" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, "HTTP status=404"): + self.client_encryption_with_tls.create_data_key("azure", key) + # Expired cert error. + with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): + self.client_encryption_expired.create_data_key("azure", key) + # Invalid cert hostname error. + with self.assertRaisesRegex( + EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch" + ): + self.client_encryption_invalid_hostname.create_data_key("azure", key) + + def test_03_gcp(self): + key = {"projectId": "foo", "location": "bar", "keyRing": "baz", "keyName": "foo"} + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + self.client_encryption_no_client_cert.create_data_key("gcp", key) + # "HTTP status=404" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, "HTTP status=404"): + self.client_encryption_with_tls.create_data_key("gcp", key) + # Expired cert error. + with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): + self.client_encryption_expired.create_data_key("gcp", key) + # Invalid cert hostname error. + with self.assertRaisesRegex( + EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch" + ): + self.client_encryption_invalid_hostname.create_data_key("gcp", key) + + def test_04_kmip(self): + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + self.client_encryption_no_client_cert.create_data_key("kmip") + self.client_encryption_with_tls.create_data_key("kmip") + # Expired cert error. + with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): + self.client_encryption_expired.create_data_key("kmip") + # Invalid cert hostname error. + with self.assertRaisesRegex( + EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch" + ): + self.client_encryption_invalid_hostname.create_data_key("kmip") + + def test_05_tlsDisableOCSPEndpointCheck_is_permitted(self): + providers = {"aws": {"accessKeyId": "foo", "secretAccessKey": "bar"}} + options = {"aws": {"tlsDisableOCSPEndpointCheck": True}} + encryption = ClientEncryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=options + ) + self.addCleanup(encryption.close) + ctx = encryption._io_callbacks.opts._kms_ssl_contexts["aws"] + if not hasattr(ctx, "check_ocsp_endpoint"): + raise self.skipTest("OCSP not enabled") + self.assertFalse(ctx.check_ocsp_endpoint) + + +# https://github.com/mongodb/specifications/blob/50e26fe/source/client-side-encryption/tests/README.rst#unique-index-on-keyaltnames +class TestUniqueIndexOnKeyAltNamesProse(EncryptionIntegrationTest): + def setUp(self): + self.client = client_context.client + create_key_vault(self.client.keyvault.datakeys) + kms_providers_map = {"local": {"key": LOCAL_MASTER_KEY}} + self.client_encryption = ClientEncryption( + kms_providers_map, "keyvault.datakeys", self.client, CodecOptions() + ) + self.def_key_id = self.client_encryption.create_data_key("local", key_alt_names=["def"]) + + def test_01_create_key(self): + self.client_encryption.create_data_key("local", key_alt_names=["abc"]) + with self.assertRaisesRegex(EncryptionError, "E11000 duplicate key error collection"): + self.client_encryption.create_data_key("local", key_alt_names=["abc"]) + with self.assertRaisesRegex(EncryptionError, "E11000 duplicate key error collection"): + self.client_encryption.create_data_key("local", key_alt_names=["def"]) + + def test_02_add_key_alt_name(self): + key_id = self.client_encryption.create_data_key("local") + self.client_encryption.add_key_alt_name(key_id, "abc") + key_doc = self.client_encryption.add_key_alt_name(key_id, "abc") + assert key_doc["keyAltNames"] == ["abc"] + with self.assertRaisesRegex(DuplicateKeyError, "E11000 duplicate key error collection"): + self.client_encryption.add_key_alt_name(key_id, "def") + key_doc = self.client_encryption.add_key_alt_name(self.def_key_id, "def") + assert key_doc["keyAltNames"] == ["def"] + + +# https://github.com/mongodb/specifications/blob/d4c9432/source/client-side-encryption/tests/README.rst#explicit-encryption +class TestExplicitQueryableEncryption(EncryptionIntegrationTest): + @client_context.require_no_standalone + @client_context.require_version_min(7, 0, -1) + def setUp(self): + super().setUp() + self.encrypted_fields = json_data("etc", "data", "encryptedFields.json") + self.key1_document = json_data("etc", "data", "keys", "key1-document.json") + self.key1_id = self.key1_document["_id"] + self.db = self.client.test_queryable_encryption + self.client.drop_database(self.db) + self.db.command("create", "explicit_encryption", encryptedFields=self.encrypted_fields) + key_vault = create_key_vault(self.client.keyvault.datakeys, self.key1_document) + self.addCleanup(key_vault.drop) + self.key_vault_client = self.client + self.client_encryption = ClientEncryption( + {"local": {"key": LOCAL_MASTER_KEY}}, key_vault.full_name, self.key_vault_client, OPTS + ) + self.addCleanup(self.client_encryption.close) + opts = AutoEncryptionOpts( + {"local": {"key": LOCAL_MASTER_KEY}}, + key_vault.full_name, + bypass_query_analysis=True, + ) + self.encrypted_client = rs_or_single_client(auto_encryption_opts=opts) + self.addCleanup(self.encrypted_client.close) + + def test_01_insert_encrypted_indexed_and_find(self): + val = "encrypted indexed value" + insert_payload = self.client_encryption.encrypt( + val, Algorithm.INDEXED, self.key1_id, contention_factor=0 + ) + self.encrypted_client[self.db.name].explicit_encryption.insert_one( + {"encryptedIndexed": insert_payload} + ) + + find_payload = self.client_encryption.encrypt( + val, Algorithm.INDEXED, self.key1_id, query_type=QueryType.EQUALITY, contention_factor=0 + ) + docs = list( + self.encrypted_client[self.db.name].explicit_encryption.find( + {"encryptedIndexed": find_payload} + ) + ) + self.assertEqual(len(docs), 1) + self.assertEqual(docs[0]["encryptedIndexed"], val) + + def test_02_insert_encrypted_indexed_and_find_contention(self): + val = "encrypted indexed value" + contention = 10 + for _ in range(contention): + insert_payload = self.client_encryption.encrypt( + val, Algorithm.INDEXED, self.key1_id, contention_factor=contention + ) + self.encrypted_client[self.db.name].explicit_encryption.insert_one( + {"encryptedIndexed": insert_payload} + ) + + find_payload = self.client_encryption.encrypt( + val, Algorithm.INDEXED, self.key1_id, query_type=QueryType.EQUALITY, contention_factor=0 + ) + docs = list( + self.encrypted_client[self.db.name].explicit_encryption.find( + {"encryptedIndexed": find_payload} + ) + ) + self.assertLessEqual(len(docs), 10) + for doc in docs: + self.assertEqual(doc["encryptedIndexed"], val) + + # Find with contention_factor will return all 10 documents. + find_payload = self.client_encryption.encrypt( + val, + Algorithm.INDEXED, + self.key1_id, + query_type=QueryType.EQUALITY, + contention_factor=contention, + ) + docs = list( + self.encrypted_client[self.db.name].explicit_encryption.find( + {"encryptedIndexed": find_payload} + ) + ) + self.assertEqual(len(docs), 10) + for doc in docs: + self.assertEqual(doc["encryptedIndexed"], val) + + def test_03_insert_encrypted_unindexed(self): + val = "encrypted unindexed value" + insert_payload = self.client_encryption.encrypt(val, Algorithm.UNINDEXED, self.key1_id) + self.encrypted_client[self.db.name].explicit_encryption.insert_one( + {"_id": 1, "encryptedUnindexed": insert_payload} + ) + + docs = list(self.encrypted_client[self.db.name].explicit_encryption.find({"_id": 1})) + self.assertEqual(len(docs), 1) + self.assertEqual(docs[0]["encryptedUnindexed"], val) + + def test_04_roundtrip_encrypted_indexed(self): + val = "encrypted indexed value" + payload = self.client_encryption.encrypt( + val, Algorithm.INDEXED, self.key1_id, contention_factor=0 + ) + decrypted = self.client_encryption.decrypt(payload) + self.assertEqual(decrypted, val) + + def test_05_roundtrip_encrypted_unindexed(self): + val = "encrypted indexed value" + payload = self.client_encryption.encrypt(val, Algorithm.UNINDEXED, self.key1_id) + decrypted = self.client_encryption.decrypt(payload) + self.assertEqual(decrypted, val) + + +# https://github.com/mongodb/specifications/blob/072601/source/client-side-encryption/tests/README.rst#rewrap +class TestRewrapWithSeparateClientEncryption(EncryptionIntegrationTest): + + MASTER_KEYS: Mapping[str, Mapping[str, Any]] = { + "aws": { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + }, + "azure": { + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle", + }, + "gcp": { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle", + }, + "kmip": {}, + "local": {}, + } + + def test_rewrap(self): + for src_provider in self.MASTER_KEYS: + for dst_provider in self.MASTER_KEYS: + with self.subTest(src_provider=src_provider, dst_provider=dst_provider): + self.run_test(src_provider, dst_provider) + + def run_test(self, src_provider, dst_provider): + # Step 1. Drop the collection ``keyvault.datakeys``. + self.client.keyvault.drop_collection("datakeys") + + # Step 2. Create a ``ClientEncryption`` object named ``client_encryption1`` + client_encryption1 = ClientEncryption( + key_vault_client=self.client, + key_vault_namespace="keyvault.datakeys", + kms_providers=ALL_KMS_PROVIDERS, + kms_tls_options=KMS_TLS_OPTS, + codec_options=OPTS, + ) + self.addCleanup(client_encryption1.close) + + # Step 3. Call ``client_encryption1.create_data_key`` with ``src_provider``. + key_id = client_encryption1.create_data_key( + master_key=self.MASTER_KEYS[src_provider], kms_provider=src_provider + ) + + # Step 4. Call ``client_encryption1.encrypt`` with the value "test" + cipher_text = client_encryption1.encrypt( + "test", key_id=key_id, algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic + ) + + # Step 5. Create a ``ClientEncryption`` object named ``client_encryption2`` + client2 = rs_or_single_client() + self.addCleanup(client2.close) + client_encryption2 = ClientEncryption( + key_vault_client=client2, + key_vault_namespace="keyvault.datakeys", + kms_providers=ALL_KMS_PROVIDERS, + kms_tls_options=KMS_TLS_OPTS, + codec_options=OPTS, + ) + self.addCleanup(client_encryption1.close) + + # Step 6. Call ``client_encryption2.rewrap_many_data_key`` with an empty ``filter``. + rewrap_many_data_key_result = client_encryption2.rewrap_many_data_key( + {}, provider=dst_provider, master_key=self.MASTER_KEYS[dst_provider] + ) + + self.assertEqual(rewrap_many_data_key_result.bulk_write_result.modified_count, 1) + + # 7. Call ``client_encryption1.decrypt`` with the ``cipher_text``. Assert the return value is "test". + decrypt_result1 = client_encryption1.decrypt(cipher_text) + self.assertEqual(decrypt_result1, "test") + + # 8. Call ``client_encryption2.decrypt`` with the ``cipher_text``. Assert the return value is "test". + decrypt_result2 = client_encryption2.decrypt(cipher_text) + self.assertEqual(decrypt_result2, "test") + + # 8. Case 2. Provider is not optional when master_key is given. + with self.assertRaises(ConfigurationError): + rewrap_many_data_key_result = client_encryption2.rewrap_many_data_key( + {}, master_key=self.MASTER_KEYS[dst_provider] + ) + + +# https://github.com/mongodb/specifications/blob/5cf3ed/source/client-side-encryption/tests/README.rst#on-demand-aws-credentials +class TestOnDemandAWSCredentials(EncryptionIntegrationTest): + def setUp(self): + super().setUp() + self.master_key = { + "region": "us-east-1", + "key": ("arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), + } + + @unittest.skipIf(any(AWS_CREDS.values()), "AWS environment credentials are set") + def test_01_failure(self): + self.client_encryption = ClientEncryption( + kms_providers={"aws": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client_context.client, + codec_options=OPTS, + ) + with self.assertRaises(EncryptionError): + self.client_encryption.create_data_key("aws", self.master_key) + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + def test_02_success(self): + self.client_encryption = ClientEncryption( + kms_providers={"aws": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client_context.client, + codec_options=OPTS, + ) + self.client_encryption.create_data_key("aws", self.master_key) + + +class TestQueryableEncryptionDocsExample(EncryptionIntegrationTest): + # Queryable Encryption is not supported on Standalone topology. + @client_context.require_no_standalone + @client_context.require_version_min(7, 0, -1) + def setUp(self): + super().setUp() + + def test_queryable_encryption(self): + # MongoClient to use in testing that handles auth/tls/etc, + # and cleanup. + def MongoClient(**kwargs): + c = rs_or_single_client(**kwargs) + self.addCleanup(c.close) + return c + + # Drop data from prior test runs. + self.client.keyvault.datakeys.drop() + self.client.drop_database("docs_examples") + + kms_providers_map = {"local": {"key": LOCAL_MASTER_KEY}} + + # Create two data keys. + key_vault_client = MongoClient() + client_encryption = ClientEncryption( + kms_providers_map, "keyvault.datakeys", key_vault_client, CodecOptions() + ) + key1_id = client_encryption.create_data_key("local") + key2_id = client_encryption.create_data_key("local") + + # Create an encryptedFieldsMap. + encrypted_fields_map = { + "docs_examples.encrypted": { + "fields": [ + { + "path": "encrypted_indexed", + "bsonType": "string", + "keyId": key1_id, + "queries": [ + { + "queryType": "equality", + }, + ], + }, + { + "path": "encrypted_unindexed", + "bsonType": "string", + "keyId": key2_id, + }, + ], + }, + } + + # Create an Queryable Encryption collection. + opts = AutoEncryptionOpts( + kms_providers_map, "keyvault.datakeys", encrypted_fields_map=encrypted_fields_map + ) + encrypted_client = MongoClient(auto_encryption_opts=opts) + + # Create a Queryable Encryption collection "docs_examples.encrypted". + # Because docs_examples.encrypted is in encrypted_fields_map, it is + # created with Queryable Encryption support. + db = encrypted_client.docs_examples + encrypted_coll = db.create_collection("encrypted") + + # Auto encrypt an insert and find. + + # Encrypt an insert. + encrypted_coll.insert_one( + { + "_id": 1, + "encrypted_indexed": "indexed_value", + "encrypted_unindexed": "unindexed_value", + } + ) + + # Encrypt a find. + res = encrypted_coll.find_one({"encrypted_indexed": "indexed_value"}) + assert res is not None + assert res["encrypted_indexed"] == "indexed_value" + assert res["encrypted_unindexed"] == "unindexed_value" + + # Find documents without decryption. + unencrypted_client = MongoClient() + unencrypted_coll = unencrypted_client.docs_examples.encrypted + res = unencrypted_coll.find_one({"_id": 1}) + assert res is not None + assert isinstance(res["encrypted_indexed"], Binary) + assert isinstance(res["encrypted_unindexed"], Binary) + + client_encryption.close() + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#range-explicit-encryption +class TestRangeQueryProse(EncryptionIntegrationTest): + @client_context.require_no_standalone + @client_context.require_version_min(7, 0, -1) + def setUp(self): + super().setUp() + self.key1_document = json_data("etc", "data", "keys", "key1-document.json") + self.key1_id = self.key1_document["_id"] + self.client.drop_database(self.db) + key_vault = create_key_vault(self.client.keyvault.datakeys, self.key1_document) + self.addCleanup(key_vault.drop) + self.key_vault_client = self.client + self.client_encryption = ClientEncryption( + {"local": {"key": LOCAL_MASTER_KEY}}, key_vault.full_name, self.key_vault_client, OPTS + ) + self.addCleanup(self.client_encryption.close) + opts = AutoEncryptionOpts( + {"local": {"key": LOCAL_MASTER_KEY}}, + key_vault.full_name, + bypass_query_analysis=True, + ) + self.encrypted_client = rs_or_single_client(auto_encryption_opts=opts) + self.db = self.encrypted_client.db + self.addCleanup(self.encrypted_client.close) + + def run_expression_find(self, name, expression, expected_elems, range_opts, use_expr=False): + find_payload = self.client_encryption.encrypt_expression( + expression=expression, + key_id=self.key1_id, + algorithm=Algorithm.RANGEPREVIEW, + query_type=QueryType.RANGEPREVIEW, + contention_factor=0, + range_opts=range_opts, + ) + if use_expr: + find_payload = {"$expr": find_payload} + sorted_find = sorted( + self.encrypted_client.db.explicit_encryption.find(find_payload), key=lambda x: x["_id"] + ) + for elem, expected in zip(sorted_find, expected_elems): + self.assertEqual(elem[f"encrypted{name}"], expected) + + def run_test_cases(self, name, range_opts, cast_func): + encrypted_fields = json_data("etc", "data", f"range-encryptedFields-{name}.json") + self.db.drop_collection("explicit_encryption", encrypted_fields=encrypted_fields) + self.db.create_collection("explicit_encryption", encryptedFields=encrypted_fields) + + def encrypt_and_cast(i): + return self.client_encryption.encrypt( + cast_func(i), + key_id=self.key1_id, + algorithm=Algorithm.RANGEPREVIEW, + contention_factor=0, + range_opts=range_opts, + ) + + for elem in [{f"encrypted{name}": encrypt_and_cast(i)} for i in [0, 6, 30, 200]]: + self.encrypted_client.db.explicit_encryption.insert_one(elem) + + # Case 1. + insert_payload = self.client_encryption.encrypt( + cast_func(6), + key_id=self.key1_id, + algorithm=Algorithm.RANGEPREVIEW, + contention_factor=0, + range_opts=range_opts, + ) + self.assertEqual(self.client_encryption.decrypt(insert_payload), cast_func(6)) + + # Case 2. + self.run_expression_find( + name, + { + "$and": [ + {f"encrypted{name}": {"$gte": cast_func(6)}}, + {f"encrypted{name}": {"$lte": cast_func(200)}}, + ] + }, + [cast_func(i) for i in [6, 30, 200]], + range_opts, + ) + + # Case 3. + self.run_expression_find( + name, + { + "$and": [ + {f"encrypted{name}": {"$gte": cast_func(0)}}, + {f"encrypted{name}": {"$lte": cast_func(6)}}, + ] + }, + [cast_func(i) for i in [0, 6]], + range_opts, + ) + + # Case 4. + self.run_expression_find( + name, + { + "$and": [ + {f"encrypted{name}": {"$gt": cast_func(30)}}, + ] + }, + [cast_func(i) for i in [200]], + range_opts, + ) + + # Case 5. + self.run_expression_find( + name, + {"$and": [{"$lt": [f"$encrypted{name}", cast_func(30)]}]}, + [cast_func(i) for i in [0, 6]], + range_opts, + use_expr=True, + ) + + # The spec says to skip the following tests for no precision decimal or double types. + if name not in ("DoubleNoPrecision", "DecimalNoPrecision"): + # Case 6. + with self.assertRaisesRegex( + EncryptionError, + "greater than or equal to the minimum value and less than or equal to the maximum value", + ): + self.client_encryption.encrypt( + cast_func(201), + key_id=self.key1_id, + algorithm=Algorithm.RANGEPREVIEW, + contention_factor=0, + range_opts=range_opts, + ) + + # Case 7. + with self.assertRaisesRegex( + EncryptionError, "expected matching 'min' and value type. Got range option" + ): + self.client_encryption.encrypt( + 6 if cast_func != int else float(6), + key_id=self.key1_id, + algorithm=Algorithm.RANGEPREVIEW, + contention_factor=0, + range_opts=range_opts, + ) + + # Case 8. + # The spec says we must additionally not run this case with any precision type, not just the ones above. + if "Precision" not in name: + with self.assertRaisesRegex( + EncryptionError, + "expected 'precision' to be set with double or decimal128 index, but got:", + ): + self.client_encryption.encrypt( + cast_func(6), + key_id=self.key1_id, + algorithm=Algorithm.RANGEPREVIEW, + contention_factor=0, + range_opts=RangeOpts( + min=cast_func(0), max=cast_func(200), sparsity=1, precision=2 + ), + ) + + def test_double_no_precision(self): + self.run_test_cases("DoubleNoPrecision", RangeOpts(sparsity=1), float) + + def test_double_precision(self): + self.run_test_cases( + "DoublePrecision", + RangeOpts(min=0.0, max=200.0, sparsity=1, precision=2), + float, + ) + + def test_decimal_no_precision(self): + self.run_test_cases( + "DecimalNoPrecision", RangeOpts(sparsity=1), lambda x: Decimal128(str(x)) + ) + + def test_decimal_precision(self): + self.run_test_cases( + "DecimalPrecision", + RangeOpts(min=Decimal128("0.0"), max=Decimal128("200.0"), sparsity=1, precision=2), + lambda x: Decimal128(str(x)), + ) + + def test_datetime(self): + self.run_test_cases( + "Date", + RangeOpts(min=DatetimeMS(0), max=DatetimeMS(200), sparsity=1), + lambda x: DatetimeMS(x).as_datetime(), + ) + + def test_int(self): + self.run_test_cases("Int", RangeOpts(min=0, max=200, sparsity=1), int) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#automatic-data-encryption-keys +class TestAutomaticDecryptionKeys(EncryptionIntegrationTest): + @client_context.require_no_standalone + @client_context.require_version_min(7, 0, -1) + def setUp(self): + super().setUp() + self.key1_document = json_data("etc", "data", "keys", "key1-document.json") + self.key1_id = self.key1_document["_id"] + self.client.drop_database(self.db) + self.key_vault = create_key_vault(self.client.keyvault.datakeys, self.key1_document) + self.addCleanup(self.key_vault.drop) + self.client_encryption = ClientEncryption( + {"local": {"key": LOCAL_MASTER_KEY}}, + self.key_vault.full_name, + self.client, + OPTS, + ) + self.addCleanup(self.client_encryption.close) + + def test_01_simple_create(self): + coll, _ = self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields={"fields": [{"path": "ssn", "bsonType": "string", "keyId": None}]}, + kms_provider="local", + ) + with self.assertRaises(WriteError) as exc: + coll.insert_one({"ssn": "123-45-6789"}) + self.assertEqual(exc.exception.code, 121) + + def test_02_no_fields(self): + with self.assertRaisesRegex( + TypeError, + "create_encrypted_collection.* missing 1 required positional argument: 'encrypted_fields'", + ): + self.client_encryption.create_encrypted_collection( # type:ignore[call-arg] + database=self.db, + name="testing1", + ) + + def test_03_invalid_keyid(self): + with self.assertRaisesRegex( + EncryptedCollectionError, + "create.encryptedFields.fields.keyId' is the wrong type 'bool', expected type 'binData", + ): + self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields={ + "fields": [{"path": "ssn", "bsonType": "string", "keyId": False}] + }, + kms_provider="local", + ) + + def test_04_insert_encrypted(self): + coll, ef = self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields={"fields": [{"path": "ssn", "bsonType": "string", "keyId": None}]}, + kms_provider="local", + ) + key1_id = ef["fields"][0]["keyId"] + encrypted_value = self.client_encryption.encrypt( + "123-45-6789", + key_id=key1_id, + algorithm=Algorithm.UNINDEXED, + ) + coll.insert_one({"ssn": encrypted_value}) + + def test_copy_encrypted_fields(self): + encrypted_fields = { + "fields": [ + { + "path": "ssn", + "bsonType": "string", + "keyId": None, + } + ] + } + _, ef = self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + kms_provider="local", + encrypted_fields=encrypted_fields, + ) + self.assertIsNotNone(ef["fields"][0]["keyId"]) + self.assertIsNone(encrypted_fields["fields"][0]["keyId"]) + + def test_options_forward(self): + coll, ef = self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + kms_provider="local", + encrypted_fields={"fields": [{"path": "ssn", "bsonType": "string", "keyId": None}]}, + read_preference=ReadPreference.NEAREST, + ) + self.assertEqual(coll.read_preference, ReadPreference.NEAREST) + self.assertEqual(coll.name, "testing1") + + def test_mixed_null_keyids(self): + key = self.client_encryption.create_data_key(kms_provider="local") + coll, ef = self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields={ + "fields": [ + {"path": "ssn", "bsonType": "string", "keyId": None}, + {"path": "dob", "bsonType": "string", "keyId": key}, + {"path": "secrets", "bsonType": "string"}, + {"path": "address", "bsonType": "string", "keyId": None}, + ] + }, + kms_provider="local", + ) + encrypted_values = [ + self.client_encryption.encrypt( + val, + key_id=key, + algorithm=Algorithm.UNINDEXED, + ) + for val, key in zip( + ["123-45-6789", "11/22/1963", "My secret", "New Mexico, 87104"], + [field["keyId"] for field in ef["fields"]], + ) + ] + coll.insert_one( + { + "ssn": encrypted_values[0], + "dob": encrypted_values[1], + "secrets": encrypted_values[2], + "address": encrypted_values[3], + } + ) + + def test_create_datakey_fails(self): + key = self.client_encryption.create_data_key(kms_provider="local") + encrypted_fields = { + "fields": [ + {"path": "address", "bsonType": "string", "keyId": key}, + {"path": "dob", "bsonType": "string", "keyId": None}, + ] + } + # Make sure the exception's encrypted_fields object includes the previous keys in the error message even when + # generating keys fails. + with self.assertRaises( + EncryptedCollectionError, + ) as exc: + self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields=encrypted_fields, + kms_provider="does not exist", + ) + self.assertEqual(exc.exception.encrypted_fields, encrypted_fields) + + def test_create_failure(self): + key = self.client_encryption.create_data_key(kms_provider="local") + # Make sure the exception's encrypted_fields object includes the previous keys in the error message even when + # it is the creation of the collection that fails. + with self.assertRaises( + EncryptedCollectionError, + ) as exc: + self.client_encryption.create_encrypted_collection( + database=self.db, + name=1, # type:ignore[arg-type] + encrypted_fields={ + "fields": [ + {"path": "address", "bsonType": "string", "keyId": key}, + {"path": "dob", "bsonType": "string", "keyId": None}, + ] + }, + kms_provider="local", + ) + for field in exc.exception.encrypted_fields["fields"]: + self.assertIsInstance(field["keyId"], Binary) + + def test_collection_name_collision(self): + encrypted_fields = { + "fields": [ + {"path": "address", "bsonType": "string", "keyId": None}, + ] } - with self.assertRaisesRegex(EncryptionError, 'parse error'): - self.client_encryption.create_data_key( - 'aws', master_key=master_key) + self.db.create_collection("testing1") + with self.assertRaises( + EncryptedCollectionError, + ) as exc: + self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields=encrypted_fields, + kms_provider="local", + ) + self.assertIsInstance(exc.exception.encrypted_fields["fields"][0]["keyId"], Binary) + self.db.drop_collection("testing1", encrypted_fields=encrypted_fields) + self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields=encrypted_fields, + kms_provider="local", + ) + with self.assertRaises( + EncryptedCollectionError, + ) as exc: + self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields=encrypted_fields, + kms_provider="local", + ) + self.assertIsInstance(exc.exception.encrypted_fields["fields"][0]["keyId"], Binary) if __name__ == "__main__": diff --git a/test/test_errors.py b/test/test_errors.py new file mode 100644 index 0000000000..2cee7c15d8 --- /dev/null +++ b/test/test_errors.py @@ -0,0 +1,106 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import pickle +import sys +import traceback + +sys.path[0:0] = [""] + +from test import PyMongoTestCase, unittest + +from pymongo.errors import ( + BulkWriteError, + EncryptionError, + NotPrimaryError, + OperationFailure, +) + + +class TestErrors(PyMongoTestCase): + def test_not_primary_error(self): + exc = NotPrimaryError("not primary test", {"errmsg": "error"}) + self.assertIn("full error", str(exc)) + try: + raise exc + except NotPrimaryError: + self.assertIn("full error", traceback.format_exc()) + + def test_operation_failure(self): + exc = OperationFailure("operation failure test", 10, {"errmsg": "error"}) + self.assertIn("full error", str(exc)) + try: + raise exc + except OperationFailure: + self.assertIn("full error", traceback.format_exc()) + + def _test_unicode_strs(self, exc): + if sys.implementation.name == "pypy" and sys.implementation.version < (7, 3, 7): + # PyPy used to display unicode in repr differently. + self.assertEqual( + "unicode \U0001f40d, full error: {'errmsg': 'unicode \\U0001f40d'}", str(exc) + ) + else: + self.assertEqual( + "unicode \U0001f40d, full error: {'errmsg': 'unicode \U0001f40d'}", str(exc) + ) + try: + raise exc + except Exception: + self.assertIn("full error", traceback.format_exc()) + + def test_unicode_strs_operation_failure(self): + exc = OperationFailure("unicode \U0001f40d", 10, {"errmsg": "unicode \U0001f40d"}) + self._test_unicode_strs(exc) + + def test_unicode_strs_not_primary_error(self): + exc = NotPrimaryError("unicode \U0001f40d", {"errmsg": "unicode \U0001f40d"}) + self._test_unicode_strs(exc) + + def assertPyMongoErrorEqual(self, exc1, exc2): + self.assertEqual(exc1._message, exc2._message) + self.assertEqual(exc1._error_labels, exc2._error_labels) + self.assertEqual(exc1.args, exc2.args) + self.assertEqual(str(exc1), str(exc2)) + + def assertOperationFailureEqual(self, exc1, exc2): + self.assertPyMongoErrorEqual(exc1, exc2) + self.assertEqual(exc1.code, exc2.code) + self.assertEqual(exc1.details, exc2.details) + self.assertEqual(exc1._max_wire_version, exc2._max_wire_version) + + def test_pickle_NotPrimaryError(self): + exc = NotPrimaryError("not primary test", {"errmsg": "error"}) + self.assertPyMongoErrorEqual(exc, pickle.loads(pickle.dumps(exc))) + + def test_pickle_OperationFailure(self): + exc = OperationFailure("error", code=5, details={}, max_wire_version=7) + self.assertOperationFailureEqual(exc, pickle.loads(pickle.dumps(exc))) + + def test_pickle_BulkWriteError(self): + exc = BulkWriteError({}) + self.assertOperationFailureEqual(exc, pickle.loads(pickle.dumps(exc))) + self.assertIn("batch op errors occurred", str(exc)) + + def test_pickle_EncryptionError(self): + cause = OperationFailure("error", code=5, details={}, max_wire_version=7) + exc = EncryptionError(cause) + exc2 = pickle.loads(pickle.dumps(exc)) + self.assertPyMongoErrorEqual(exc, exc2) + self.assertOperationFailureEqual(cause, exc2.cause) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_examples.py b/test/test_examples.py index 4db7cf020d..e003d8459a 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -13,6 +13,7 @@ # limitations under the License. """MongoDB documentation examples in Python.""" +from __future__ import annotations import datetime import sys @@ -20,41 +21,44 @@ sys.path[0:0] = [""] +from test import IntegrationTest, client_context, unittest +from test.utils import rs_client, wait_until + import pymongo from pymongo.errors import ConnectionFailure, OperationFailure from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference +from pymongo.server_api import ServerApi from pymongo.write_concern import WriteConcern -from test import client_context, unittest, IntegrationTest -from test.utils import rs_client, rs_or_single_client - -class TestSampleShellCommands(unittest.TestCase): +class TestSampleShellCommands(IntegrationTest): @classmethod - @client_context.require_connection def setUpClass(cls): - cls.client = rs_or_single_client(w="majority") + super().setUpClass() # Run once before any tests run. - cls.client.pymongo_test.inventory.drop() + cls.db.inventory.drop() @classmethod def tearDownClass(cls): - client_context.client.drop_database("pymongo_test") + cls.client.drop_database("pymongo_test") def tearDown(self): # Run after every test. - self.client.pymongo_test.inventory.drop() + self.db.inventory.drop() def test_first_three_examples(self): - db = client_context.client.pymongo_test + db = self.db # Start Example 1 db.inventory.insert_one( - {"item": "canvas", - "qty": 100, - "tags": ["cotton"], - "size": {"h": 28, "w": 35.5, "uom": "cm"}}) + { + "item": "canvas", + "qty": 100, + "tags": ["cotton"], + "size": {"h": 28, "w": 35.5, "uom": "cm"}, + } + ) # End Example 1 self.assertEqual(db.inventory.count_documents({}), 1) @@ -63,50 +67,73 @@ def test_first_three_examples(self): cursor = db.inventory.find({"item": "canvas"}) # End Example 2 - self.assertEqual(cursor.count(), 1) + self.assertEqual(len(list(cursor)), 1) # Start Example 3 - db.inventory.insert_many([ - {"item": "journal", - "qty": 25, - "tags": ["blank", "red"], - "size": {"h": 14, "w": 21, "uom": "cm"}}, - {"item": "mat", - "qty": 85, - "tags": ["gray"], - "size": {"h": 27.9, "w": 35.5, "uom": "cm"}}, - {"item": "mousepad", - "qty": 25, - "tags": ["gel", "blue"], - "size": {"h": 19, "w": 22.85, "uom": "cm"}}]) + db.inventory.insert_many( + [ + { + "item": "journal", + "qty": 25, + "tags": ["blank", "red"], + "size": {"h": 14, "w": 21, "uom": "cm"}, + }, + { + "item": "mat", + "qty": 85, + "tags": ["gray"], + "size": {"h": 27.9, "w": 35.5, "uom": "cm"}, + }, + { + "item": "mousepad", + "qty": 25, + "tags": ["gel", "blue"], + "size": {"h": 19, "w": 22.85, "uom": "cm"}, + }, + ] + ) # End Example 3 self.assertEqual(db.inventory.count_documents({}), 4) def test_query_top_level_fields(self): - db = client_context.client.pymongo_test + db = self.db # Start Example 6 - db.inventory.insert_many([ - {"item": "journal", - "qty": 25, - "size": {"h": 14, "w": 21, "uom": "cm"}, - "status": "A"}, - {"item": "notebook", - "qty": 50, - "size": {"h": 8.5, "w": 11, "uom": "in"}, - "status": "A"}, - {"item": "paper", - "qty": 100, - "size": {"h": 8.5, "w": 11, "uom": "in"}, - "status": "D"}, - {"item": "planner", - "qty": 75, "size": {"h": 22.85, "w": 30, "uom": "cm"}, - "status": "D"}, - {"item": "postcard", - "qty": 45, - "size": {"h": 10, "w": 15.25, "uom": "cm"}, - "status": "A"}]) + db.inventory.insert_many( + [ + { + "item": "journal", + "qty": 25, + "size": {"h": 14, "w": 21, "uom": "cm"}, + "status": "A", + }, + { + "item": "notebook", + "qty": 50, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "A", + }, + { + "item": "paper", + "qty": 100, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "D", + }, + { + "item": "planner", + "qty": 75, + "size": {"h": 22.85, "w": 30, "uom": "cm"}, + "status": "D", + }, + { + "item": "postcard", + "qty": 45, + "size": {"h": 10, "w": 15.25, "uom": "cm"}, + "status": "A", + }, + ] + ) # End Example 6 self.assertEqual(db.inventory.count_documents({}), 5) @@ -136,60 +163,71 @@ def test_query_top_level_fields(self): self.assertEqual(len(list(cursor)), 1) # Start Example 12 - cursor = db.inventory.find( - {"$or": [{"status": "A"}, {"qty": {"$lt": 30}}]}) + cursor = db.inventory.find({"$or": [{"status": "A"}, {"qty": {"$lt": 30}}]}) # End Example 12 self.assertEqual(len(list(cursor)), 3) # Start Example 13 - cursor = db.inventory.find({ - "status": "A", - "$or": [{"qty": {"$lt": 30}}, {"item": {"$regex": "^p"}}]}) + cursor = db.inventory.find( + {"status": "A", "$or": [{"qty": {"$lt": 30}}, {"item": {"$regex": "^p"}}]} + ) # End Example 13 self.assertEqual(len(list(cursor)), 2) def test_query_embedded_documents(self): - db = client_context.client.pymongo_test + db = self.db # Start Example 14 # Subdocument key order matters in a few of these examples so we have # to use bson.son.SON instead of a Python dict. from bson.son import SON - db.inventory.insert_many([ - {"item": "journal", - "qty": 25, - "size": SON([("h", 14), ("w", 21), ("uom", "cm")]), - "status": "A"}, - {"item": "notebook", - "qty": 50, - "size": SON([("h", 8.5), ("w", 11), ("uom", "in")]), - "status": "A"}, - {"item": "paper", - "qty": 100, - "size": SON([("h", 8.5), ("w", 11), ("uom", "in")]), - "status": "D"}, - {"item": "planner", - "qty": 75, - "size": SON([("h", 22.85), ("w", 30), ("uom", "cm")]), - "status": "D"}, - {"item": "postcard", - "qty": 45, - "size": SON([("h", 10), ("w", 15.25), ("uom", "cm")]), - "status": "A"}]) + + db.inventory.insert_many( + [ + { + "item": "journal", + "qty": 25, + "size": SON([("h", 14), ("w", 21), ("uom", "cm")]), + "status": "A", + }, + { + "item": "notebook", + "qty": 50, + "size": SON([("h", 8.5), ("w", 11), ("uom", "in")]), + "status": "A", + }, + { + "item": "paper", + "qty": 100, + "size": SON([("h", 8.5), ("w", 11), ("uom", "in")]), + "status": "D", + }, + { + "item": "planner", + "qty": 75, + "size": SON([("h", 22.85), ("w", 30), ("uom", "cm")]), + "status": "D", + }, + { + "item": "postcard", + "qty": 45, + "size": SON([("h", 10), ("w", 15.25), ("uom", "cm")]), + "status": "A", + }, + ] + ) # End Example 14 # Start Example 15 - cursor = db.inventory.find( - {"size": SON([("h", 14), ("w", 21), ("uom", "cm")])}) + cursor = db.inventory.find({"size": SON([("h", 14), ("w", 21), ("uom", "cm")])}) # End Example 15 self.assertEqual(len(list(cursor)), 1) # Start Example 16 - cursor = db.inventory.find( - {"size": SON([("w", 21), ("h", 14), ("uom", "cm")])}) + cursor = db.inventory.find({"size": SON([("w", 21), ("h", 14), ("uom", "cm")])}) # End Example 16 self.assertEqual(len(list(cursor)), 0) @@ -207,37 +245,29 @@ def test_query_embedded_documents(self): self.assertEqual(len(list(cursor)), 4) # Start Example 19 - cursor = db.inventory.find( - {"size.h": {"$lt": 15}, "size.uom": "in", "status": "D"}) + cursor = db.inventory.find({"size.h": {"$lt": 15}, "size.uom": "in", "status": "D"}) # End Example 19 self.assertEqual(len(list(cursor)), 1) def test_query_arrays(self): - db = client_context.client.pymongo_test + db = self.db # Start Example 20 - db.inventory.insert_many([ - {"item": "journal", - "qty": 25, - "tags": ["blank", "red"], - "dim_cm": [14, 21]}, - {"item": "notebook", - "qty": 50, - "tags": ["red", "blank"], - "dim_cm": [14, 21]}, - {"item": "paper", - "qty": 100, - "tags": ["red", "blank", "plain"], - "dim_cm": [14, 21]}, - {"item": "planner", - "qty": 75, - "tags": ["blank", "red"], - "dim_cm": [22.85, 30]}, - {"item": "postcard", - "qty": 45, - "tags": ["blue"], - "dim_cm": [10, 15.25]}]) + db.inventory.insert_many( + [ + {"item": "journal", "qty": 25, "tags": ["blank", "red"], "dim_cm": [14, 21]}, + {"item": "notebook", "qty": 50, "tags": ["red", "blank"], "dim_cm": [14, 21]}, + { + "item": "paper", + "qty": 100, + "tags": ["red", "blank", "plain"], + "dim_cm": [14, 21], + }, + {"item": "planner", "qty": 75, "tags": ["blank", "red"], "dim_cm": [22.85, 30]}, + {"item": "postcard", "qty": 45, "tags": ["blue"], "dim_cm": [10, 15.25]}, + ] + ) # End Example 20 # Start Example 21 @@ -271,8 +301,7 @@ def test_query_arrays(self): self.assertEqual(len(list(cursor)), 4) # Start Example 26 - cursor = db.inventory.find( - {"dim_cm": {"$elemMatch": {"$gt": 22, "$lt": 30}}}) + cursor = db.inventory.find({"dim_cm": {"$elemMatch": {"$gt": 22, "$lt": 30}}}) # End Example 26 self.assertEqual(len(list(cursor)), 1) @@ -290,70 +319,80 @@ def test_query_arrays(self): self.assertEqual(len(list(cursor)), 1) def test_query_array_of_documents(self): - db = client_context.client.pymongo_test + db = self.db # Start Example 29 # Subdocument key order matters in a few of these examples so we have # to use bson.son.SON instead of a Python dict. from bson.son import SON - db.inventory.insert_many([ - {"item": "journal", - "instock": [ - SON([("warehouse", "A"), ("qty", 5)]), - SON([("warehouse", "C"), ("qty", 15)])]}, - {"item": "notebook", - "instock": [ - SON([("warehouse", "C"), ("qty", 5)])]}, - {"item": "paper", - "instock": [ - SON([("warehouse", "A"), ("qty", 60)]), - SON([("warehouse", "B"), ("qty", 15)])]}, - {"item": "planner", - "instock": [ - SON([("warehouse", "A"), ("qty", 40)]), - SON([("warehouse", "B"), ("qty", 5)])]}, - {"item": "postcard", - "instock": [ - SON([("warehouse", "B"), ("qty", 15)]), - SON([("warehouse", "C"), ("qty", 35)])]}]) + + db.inventory.insert_many( + [ + { + "item": "journal", + "instock": [ + SON([("warehouse", "A"), ("qty", 5)]), + SON([("warehouse", "C"), ("qty", 15)]), + ], + }, + {"item": "notebook", "instock": [SON([("warehouse", "C"), ("qty", 5)])]}, + { + "item": "paper", + "instock": [ + SON([("warehouse", "A"), ("qty", 60)]), + SON([("warehouse", "B"), ("qty", 15)]), + ], + }, + { + "item": "planner", + "instock": [ + SON([("warehouse", "A"), ("qty", 40)]), + SON([("warehouse", "B"), ("qty", 5)]), + ], + }, + { + "item": "postcard", + "instock": [ + SON([("warehouse", "B"), ("qty", 15)]), + SON([("warehouse", "C"), ("qty", 35)]), + ], + }, + ] + ) # End Example 29 # Start Example 30 - cursor = db.inventory.find( - {"instock": SON([("warehouse", "A"), ("qty", 5)])}) + cursor = db.inventory.find({"instock": SON([("warehouse", "A"), ("qty", 5)])}) # End Example 30 self.assertEqual(len(list(cursor)), 1) # Start Example 31 - cursor = db.inventory.find( - {"instock": SON([("qty", 5), ("warehouse", "A")])}) + cursor = db.inventory.find({"instock": SON([("qty", 5), ("warehouse", "A")])}) # End Example 31 self.assertEqual(len(list(cursor)), 0) # Start Example 32 - cursor = db.inventory.find({'instock.0.qty': {"$lte": 20}}) + cursor = db.inventory.find({"instock.0.qty": {"$lte": 20}}) # End Example 32 self.assertEqual(len(list(cursor)), 3) # Start Example 33 - cursor = db.inventory.find({'instock.qty': {"$lte": 20}}) + cursor = db.inventory.find({"instock.qty": {"$lte": 20}}) # End Example 33 self.assertEqual(len(list(cursor)), 5) # Start Example 34 - cursor = db.inventory.find( - {"instock": {"$elemMatch": {"qty": 5, "warehouse": "A"}}}) + cursor = db.inventory.find({"instock": {"$elemMatch": {"qty": 5, "warehouse": "A"}}}) # End Example 34 self.assertEqual(len(list(cursor)), 1) # Start Example 35 - cursor = db.inventory.find( - {"instock": {"$elemMatch": {"qty": {"$gt": 10, "$lte": 20}}}}) + cursor = db.inventory.find({"instock": {"$elemMatch": {"qty": {"$gt": 10, "$lte": 20}}}}) # End Example 35 self.assertEqual(len(list(cursor)), 3) @@ -365,14 +404,13 @@ def test_query_array_of_documents(self): self.assertEqual(len(list(cursor)), 4) # Start Example 37 - cursor = db.inventory.find( - {"instock.qty": 5, "instock.warehouse": "A"}) + cursor = db.inventory.find({"instock.qty": 5, "instock.warehouse": "A"}) # End Example 37 self.assertEqual(len(list(cursor)), 2) def test_query_null(self): - db = client_context.client.pymongo_test + db = self.db # Start Example 38 db.inventory.insert_many([{"_id": 1, "item": None}, {"_id": 2}]) @@ -397,32 +435,43 @@ def test_query_null(self): self.assertEqual(len(list(cursor)), 1) def test_projection(self): - db = client_context.client.pymongo_test + db = self.db # Start Example 42 - db.inventory.insert_many([ - {"item": "journal", - "status": "A", - "size": {"h": 14, "w": 21, "uom": "cm"}, - "instock": [{"warehouse": "A", "qty": 5}]}, - {"item": "notebook", - "status": "A", - "size": {"h": 8.5, "w": 11, "uom": "in"}, - "instock": [{"warehouse": "C", "qty": 5}]}, - {"item": "paper", - "status": "D", - "size": {"h": 8.5, "w": 11, "uom": "in"}, - "instock": [{"warehouse": "A", "qty": 60}]}, - {"item": "planner", - "status": "D", - "size": {"h": 22.85, "w": 30, "uom": "cm"}, - "instock": [{"warehouse": "A", "qty": 40}]}, - {"item": "postcard", - "status": "A", - "size": {"h": 10, "w": 15.25, "uom": "cm"}, - "instock": [ - {"warehouse": "B", "qty": 15}, - {"warehouse": "C", "qty": 35}]}]) + db.inventory.insert_many( + [ + { + "item": "journal", + "status": "A", + "size": {"h": 14, "w": 21, "uom": "cm"}, + "instock": [{"warehouse": "A", "qty": 5}], + }, + { + "item": "notebook", + "status": "A", + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "instock": [{"warehouse": "C", "qty": 5}], + }, + { + "item": "paper", + "status": "D", + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "instock": [{"warehouse": "A", "qty": 60}], + }, + { + "item": "planner", + "status": "D", + "size": {"h": 22.85, "w": 30, "uom": "cm"}, + "instock": [{"warehouse": "A", "qty": 40}], + }, + { + "item": "postcard", + "status": "A", + "size": {"h": 10, "w": 15.25, "uom": "cm"}, + "instock": [{"warehouse": "B", "qty": 15}, {"warehouse": "C", "qty": 35}], + }, + ] + ) # End Example 42 # Start Example 43 @@ -432,8 +481,7 @@ def test_projection(self): self.assertEqual(len(list(cursor)), 3) # Start Example 44 - cursor = db.inventory.find( - {"status": "A"}, {"item": 1, "status": 1}) + cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1}) # End Example 44 for doc in cursor: @@ -444,8 +492,7 @@ def test_projection(self): self.assertFalse("instock" in doc) # Start Example 45 - cursor = db.inventory.find( - {"status": "A"}, {"item": 1, "status": 1, "_id": 0}) + cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1, "_id": 0}) # End Example 45 for doc in cursor: @@ -456,8 +503,7 @@ def test_projection(self): self.assertFalse("instock" in doc) # Start Example 46 - cursor = db.inventory.find( - {"status": "A"}, {"status": 0, "instock": 0}) + cursor = db.inventory.find({"status": "A"}, {"status": 0, "instock": 0}) # End Example 46 for doc in cursor: @@ -468,8 +514,7 @@ def test_projection(self): self.assertFalse("instock" in doc) # Start Example 47 - cursor = db.inventory.find( - {"status": "A"}, {"item": 1, "status": 1, "size.uom": 1}) + cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1, "size.uom": 1}) # End Example 47 for doc in cursor: @@ -478,10 +523,10 @@ def test_projection(self): self.assertTrue("status" in doc) self.assertTrue("size" in doc) self.assertFalse("instock" in doc) - size = doc['size'] - self.assertTrue('uom' in size) - self.assertFalse('h' in size) - self.assertFalse('w' in size) + size = doc["size"] + self.assertTrue("uom" in size) + self.assertFalse("h" in size) + self.assertFalse("w" in size) # Start Example 48 cursor = db.inventory.find({"status": "A"}, {"size.uom": 0}) @@ -493,14 +538,13 @@ def test_projection(self): self.assertTrue("status" in doc) self.assertTrue("size" in doc) self.assertTrue("instock" in doc) - size = doc['size'] - self.assertFalse('uom' in size) - self.assertTrue('h' in size) - self.assertTrue('w' in size) + size = doc["size"] + self.assertFalse("uom" in size) + self.assertTrue("h" in size) + self.assertTrue("w" in size) # Start Example 49 - cursor = db.inventory.find( - {"status": "A"}, {"item": 1, "status": 1, "instock.qty": 1}) + cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1, "instock.qty": 1}) # End Example 49 for doc in cursor: @@ -509,14 +553,14 @@ def test_projection(self): self.assertTrue("status" in doc) self.assertFalse("size" in doc) self.assertTrue("instock" in doc) - for subdoc in doc['instock']: - self.assertFalse('warehouse' in subdoc) - self.assertTrue('qty' in subdoc) + for subdoc in doc["instock"]: + self.assertFalse("warehouse" in subdoc) + self.assertTrue("qty" in subdoc) # Start Example 50 cursor = db.inventory.find( - {"status": "A"}, - {"item": 1, "status": 1, "instock": {"$slice": -1}}) + {"status": "A"}, {"item": 1, "status": 1, "instock": {"$slice": -1}} + ) # End Example 50 for doc in cursor: @@ -528,57 +572,80 @@ def test_projection(self): self.assertEqual(len(doc["instock"]), 1) def test_update_and_replace(self): - db = client_context.client.pymongo_test + db = self.db # Start Example 51 - db.inventory.insert_many([ - {"item": "canvas", - "qty": 100, - "size": {"h": 28, "w": 35.5, "uom": "cm"}, - "status": "A"}, - {"item": "journal", - "qty": 25, - "size": {"h": 14, "w": 21, "uom": "cm"}, - "status": "A"}, - {"item": "mat", - "qty": 85, - "size": {"h": 27.9, "w": 35.5, "uom": "cm"}, - "status": "A"}, - {"item": "mousepad", - "qty": 25, - "size": {"h": 19, "w": 22.85, "uom": "cm"}, - "status": "P"}, - {"item": "notebook", - "qty": 50, - "size": {"h": 8.5, "w": 11, "uom": "in"}, - "status": "P"}, - {"item": "paper", - "qty": 100, - "size": {"h": 8.5, "w": 11, "uom": "in"}, - "status": "D"}, - {"item": "planner", - "qty": 75, - "size": {"h": 22.85, "w": 30, "uom": "cm"}, - "status": "D"}, - {"item": "postcard", - "qty": 45, - "size": {"h": 10, "w": 15.25, "uom": "cm"}, - "status": "A"}, - {"item": "sketchbook", - "qty": 80, - "size": {"h": 14, "w": 21, "uom": "cm"}, - "status": "A"}, - {"item": "sketch pad", - "qty": 95, - "size": {"h": 22.85, "w": 30.5, "uom": "cm"}, - "status": "A"}]) + db.inventory.insert_many( + [ + { + "item": "canvas", + "qty": 100, + "size": {"h": 28, "w": 35.5, "uom": "cm"}, + "status": "A", + }, + { + "item": "journal", + "qty": 25, + "size": {"h": 14, "w": 21, "uom": "cm"}, + "status": "A", + }, + { + "item": "mat", + "qty": 85, + "size": {"h": 27.9, "w": 35.5, "uom": "cm"}, + "status": "A", + }, + { + "item": "mousepad", + "qty": 25, + "size": {"h": 19, "w": 22.85, "uom": "cm"}, + "status": "P", + }, + { + "item": "notebook", + "qty": 50, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "P", + }, + { + "item": "paper", + "qty": 100, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "D", + }, + { + "item": "planner", + "qty": 75, + "size": {"h": 22.85, "w": 30, "uom": "cm"}, + "status": "D", + }, + { + "item": "postcard", + "qty": 45, + "size": {"h": 10, "w": 15.25, "uom": "cm"}, + "status": "A", + }, + { + "item": "sketchbook", + "qty": 80, + "size": {"h": 14, "w": 21, "uom": "cm"}, + "status": "A", + }, + { + "item": "sketch pad", + "qty": 95, + "size": {"h": 22.85, "w": 30.5, "uom": "cm"}, + "status": "A", + }, + ] + ) # End Example 51 # Start Example 52 db.inventory.update_one( {"item": "paper"}, - {"$set": {"size.uom": "cm", "status": "P"}, - "$currentDate": {"lastModified": True}}) + {"$set": {"size.uom": "cm", "status": "P"}, "$currentDate": {"lastModified": True}}, + ) # End Example 52 for doc in db.inventory.find({"item": "paper"}): @@ -589,8 +656,8 @@ def test_update_and_replace(self): # Start Example 53 db.inventory.update_many( {"qty": {"$lt": 50}}, - {"$set": {"size.uom": "in", "status": "P"}, - "$currentDate": {"lastModified": True}}) + {"$set": {"size.uom": "in", "status": "P"}, "$currentDate": {"lastModified": True}}, + ) # End Example 53 for doc in db.inventory.find({"qty": {"$lt": 50}}): @@ -601,10 +668,11 @@ def test_update_and_replace(self): # Start Example 54 db.inventory.replace_one( {"item": "paper"}, - {"item": "paper", - "instock": [ - {"warehouse": "A", "qty": 60}, - {"warehouse": "B", "qty": 40}]}) + { + "item": "paper", + "instock": [{"warehouse": "A", "qty": 60}, {"warehouse": "B", "qty": 40}], + }, + ) # End Example 54 for doc in db.inventory.find({"item": "paper"}, {"_id": 0}): @@ -614,30 +682,43 @@ def test_update_and_replace(self): self.assertEqual(len(doc["instock"]), 2) def test_delete(self): - db = client_context.client.pymongo_test + db = self.db # Start Example 55 - db.inventory.insert_many([ - {"item": "journal", - "qty": 25, - "size": {"h": 14, "w": 21, "uom": "cm"}, - "status": "A"}, - {"item": "notebook", - "qty": 50, - "size": {"h": 8.5, "w": 11, "uom": "in"}, - "status": "P"}, - {"item": "paper", - "qty": 100, - "size": {"h": 8.5, "w": 11, "uom": "in"}, - "status": "D"}, - {"item": "planner", - "qty": 75, - "size": {"h": 22.85, "w": 30, "uom": "cm"}, - "status": "D"}, - {"item": "postcard", - "qty": 45, - "size": {"h": 10, "w": 15.25, "uom": "cm"}, - "status": "A"}]) + db.inventory.insert_many( + [ + { + "item": "journal", + "qty": 25, + "size": {"h": 14, "w": 21, "uom": "cm"}, + "status": "A", + }, + { + "item": "notebook", + "qty": 50, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "P", + }, + { + "item": "paper", + "qty": 100, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "D", + }, + { + "item": "planner", + "qty": 75, + "size": {"h": 22.85, "w": 30, "uom": "cm"}, + "status": "D", + }, + { + "item": "postcard", + "qty": 45, + "size": {"h": 10, "w": 15.25, "uom": "cm"}, + "status": "A", + }, + ] + ) # End Example 55 self.assertEqual(db.inventory.count_documents({}), 5) @@ -660,11 +741,9 @@ def test_delete(self): self.assertEqual(db.inventory.count_documents({}), 0) - @client_context.require_version_min(3, 5, 11) - @client_context.require_replica_set - @client_context.require_no_mmap + @client_context.require_change_streams def test_change_streams(self): - db = client_context.client.pymongo_test + db = self.db done = False def insert_docs(): @@ -679,120 +758,111 @@ def insert_docs(): # 1. The database for reactive, real-time applications # Start Changestream Example 1 cursor = db.inventory.watch() - document = next(cursor) + next(cursor) # End Changestream Example 1 # Start Changestream Example 2 - cursor = db.inventory.watch(full_document='updateLookup') - document = next(cursor) + cursor = db.inventory.watch(full_document="updateLookup") + next(cursor) # End Changestream Example 2 # Start Changestream Example 3 resume_token = cursor.resume_token cursor = db.inventory.watch(resume_after=resume_token) - document = next(cursor) + next(cursor) # End Changestream Example 3 # Start Changestream Example 4 pipeline = [ - {'$match': {'fullDocument.username': 'alice'}}, - {'$addFields': {'newField': 'this is an added field!'}} + {"$match": {"fullDocument.username": "alice"}}, + {"$addFields": {"newField": "this is an added field!"}}, ] cursor = db.inventory.watch(pipeline=pipeline) - document = next(cursor) + next(cursor) # End Changestream Example 4 finally: done = True t.join() def test_aggregate_examples(self): - db = client_context.client.pymongo_test + db = self.db # Start Aggregation Example 1 - db.sales.aggregate([ - {"$match": {"items.fruit": "banana"}}, - {"$sort": {"date": 1}} - ]) + db.sales.aggregate([{"$match": {"items.fruit": "banana"}}, {"$sort": {"date": 1}}]) # End Aggregation Example 1 # Start Aggregation Example 2 - db.sales.aggregate([ - {"$unwind": "$items"}, - {"$match": {"items.fruit": "banana"}}, - {"$group": { - "_id": {"day": {"$dayOfWeek": "$date"}}, - "count": {"$sum": "$items.quantity"}} - }, - {"$project": { - "dayOfWeek": "$_id.day", - "numberSold": "$count", - "_id": 0} - }, - {"$sort": {"numberSold": 1}} - ]) + db.sales.aggregate( + [ + {"$unwind": "$items"}, + {"$match": {"items.fruit": "banana"}}, + { + "$group": { + "_id": {"day": {"$dayOfWeek": "$date"}}, + "count": {"$sum": "$items.quantity"}, + } + }, + {"$project": {"dayOfWeek": "$_id.day", "numberSold": "$count", "_id": 0}}, + {"$sort": {"numberSold": 1}}, + ] + ) # End Aggregation Example 2 # Start Aggregation Example 3 - db.sales.aggregate([ - {"$unwind": "$items"}, - {"$group": { - "_id": {"day": {"$dayOfWeek": "$date"}}, - "items_sold": {"$sum": "$items.quantity"}, - "revenue": { - "$sum": { - "$multiply": [ - "$items.quantity", "$items.price"] - } + db.sales.aggregate( + [ + {"$unwind": "$items"}, + { + "$group": { + "_id": {"day": {"$dayOfWeek": "$date"}}, + "items_sold": {"$sum": "$items.quantity"}, + "revenue": {"$sum": {"$multiply": ["$items.quantity", "$items.price"]}}, } - } - }, - {"$project": { - "day": "$_id.day", - "revenue": 1, - "items_sold": 1, - "discount": { - "$cond": { - "if": {"$lte": ["$revenue", 250]}, - "then": 25, - "else": 0 - } + }, + { + "$project": { + "day": "$_id.day", + "revenue": 1, + "items_sold": 1, + "discount": { + "$cond": {"if": {"$lte": ["$revenue", 250]}, "then": 25, "else": 0} + }, } - } - } - ]) + }, + ] + ) # End Aggregation Example 3 - # $lookup was new in 3.2. The let and pipeline options - # were added in 3.6. - if client_context.version.at_least(3, 6, 0): - # Start Aggregation Example 4 - db.air_alliances.aggregate([ - {"$lookup": { - "from": "air_airlines", - "let": {"constituents": "$airlines"}, - "pipeline": [ - {"$match": {"$expr": {"$in": ["$name", "$$constituents"]}}} - ], - "as": "airlines" + # Start Aggregation Example 4 + db.air_alliances.aggregate( + [ + { + "$lookup": { + "from": "air_airlines", + "let": {"constituents": "$airlines"}, + "pipeline": [{"$match": {"$expr": {"$in": ["$name", "$$constituents"]}}}], + "as": "airlines", } }, - {"$project": { - "_id": 0, - "name": 1, - "airlines": { - "$filter": { - "input": "$airlines", - "as": "airline", - "cond": {"$eq": ["$$airline.country", "Canada"]} + { + "$project": { + "_id": 0, + "name": 1, + "airlines": { + "$filter": { + "input": "$airlines", + "as": "airline", + "cond": {"$eq": ["$$airline.country", "Canada"]}, } - } + }, } - } - ]) - # End Aggregation Example 4 + }, + ] + ) + # End Aggregation Example 4 def test_commands(self): - db = client_context.client.pymongo_test + db = self.db db.restaurants.insert_one({}) # Start runCommand Example 1 @@ -800,11 +870,11 @@ def test_commands(self): # End runCommand Example 1 # Start runCommand Example 2 - db.command("collStats", "restaurants") + db.command("count", "restaurants") # End runCommand Example 2 def test_index_management(self): - db = client_context.client.pymongo_test + db = self.db # Start Index Example 1 db.records.create_index("score") @@ -813,43 +883,31 @@ def test_index_management(self): # Start Index Example 1 db.restaurants.create_index( [("cuisine", pymongo.ASCENDING), ("name", pymongo.ASCENDING)], - partialFilterExpression={"rating": {"$gt": 5}} + partialFilterExpression={"rating": {"$gt": 5}}, ) # End Index Example 1 - @client_context.require_version_min(3, 6, 0) @client_context.require_replica_set def test_misc(self): # Marketing examples - client = client_context.client + client = self.client self.addCleanup(client.drop_database, "test") self.addCleanup(client.drop_database, "my_database") # 2. Tunable consistency controls collection = client.my_database.my_collection with client.start_session() as session: - collection.insert_one({'_id': 1}, session=session) - collection.update_one( - {'_id': 1}, {"$set": {"a": 1}}, session=session) - for doc in collection.find({}, session=session): + collection.insert_one({"_id": 1}, session=session) + collection.update_one({"_id": 1}, {"$set": {"a": 1}}, session=session) + for _doc in collection.find({}, session=session): pass # 3. Exploiting the power of arrays collection = client.test.array_updates_test - collection.update_one( - {'_id': 1}, - {"$set": {"a.$[i].b": 2}}, - array_filters=[{"i.b": 0}]) + collection.update_one({"_id": 1}, {"$set": {"a.$[i].b": 2}}, array_filters=[{"i.b": 0}]) class TestTransactionExamples(IntegrationTest): - - @classmethod - @client_context.require_connection - def setUpClass(cls): - super(TestTransactionExamples, cls).setUpClass() - cls.client = rs_or_single_client(w="majority") - @client_context.require_transactions def test_transactions(self): # Transaction examples @@ -860,8 +918,7 @@ def test_transactions(self): employees = client.hr.employees events = client.reporting.events employees.insert_one({"employee": 3, "status": "Active"}) - events.insert_one( - {"employee": 3, "status": {"new": "Active", "old": None}}) + events.insert_one({"employee": 3, "status": {"new": "Active", "old": None}}) # Start Transactions Intro Example 1 @@ -870,15 +927,14 @@ def update_employee_info(session): events_coll = session.client.reporting.events with session.start_transaction( - read_concern=ReadConcern("snapshot"), - write_concern=WriteConcern(w="majority")): + read_concern=ReadConcern("snapshot"), write_concern=WriteConcern(w="majority") + ): employees_coll.update_one( - {"employee": 3}, {"$set": {"status": "Inactive"}}, - session=session) + {"employee": 3}, {"$set": {"status": "Inactive"}}, session=session + ) events_coll.insert_one( - {"employee": 3, "status": { - "new": "Inactive", "old": "Active"}}, - session=session) + {"employee": 3, "status": {"new": "Inactive", "old": "Active"}}, session=session + ) while True: try: @@ -888,22 +944,22 @@ def update_employee_info(session): break except (ConnectionFailure, OperationFailure) as exc: # Can retry commit - if exc.has_error_label( - "UnknownTransactionCommitResult"): - print("UnknownTransactionCommitResult, retrying " - "commit operation ...") + if exc.has_error_label("UnknownTransactionCommitResult"): + print("UnknownTransactionCommitResult, retrying commit operation ...") continue else: print("Error during commit ...") raise + # End Transactions Intro Example 1 with client.start_session() as session: update_employee_info(session) employee = employees.find_one({"employee": 3}) + assert employee is not None self.assertIsNotNone(employee) - self.assertEqual(employee['status'], 'Inactive') + self.assertEqual(employee["status"], "Inactive") # Start Transactions Retry Example 1 def run_transaction_with_retry(txn_func, session): @@ -912,24 +968,24 @@ def run_transaction_with_retry(txn_func, session): txn_func(session) # performs transaction break except (ConnectionFailure, OperationFailure) as exc: - print("Transaction aborted. Caught exception during " - "transaction.") + print("Transaction aborted. Caught exception during transaction.") # If transient error, retry the whole transaction if exc.has_error_label("TransientTransactionError"): - print("TransientTransactionError, retrying" - "transaction ...") + print("TransientTransactionError, retrying transaction ...") continue else: raise + # End Transactions Retry Example 1 with client.start_session() as session: run_transaction_with_retry(update_employee_info, session) employee = employees.find_one({"employee": 3}) + assert employee is not None self.assertIsNotNone(employee) - self.assertEqual(employee['status'], 'Inactive') + self.assertEqual(employee["status"], "Inactive") # Start Transactions Retry Example 2 def commit_with_retry(session): @@ -942,23 +998,21 @@ def commit_with_retry(session): except (ConnectionFailure, OperationFailure) as exc: # Can retry commit if exc.has_error_label("UnknownTransactionCommitResult"): - print("UnknownTransactionCommitResult, retrying " - "commit operation ...") + print("UnknownTransactionCommitResult, retrying commit operation ...") continue else: print("Error during commit ...") raise + # End Transactions Retry Example 2 # Test commit_with_retry from the previous examples def _insert_employee_retry_commit(session): with session.start_transaction(): - employees.insert_one( - {"employee": 4, "status": "Active"}, - session=session) + employees.insert_one({"employee": 4, "status": "Active"}, session=session) events.insert_one( - {"employee": 4, "status": {"new": "Active", "old": None}}, - session=session) + {"employee": 4, "status": {"new": "Active", "old": None}}, session=session + ) commit_with_retry(session) @@ -966,8 +1020,9 @@ def _insert_employee_retry_commit(session): run_transaction_with_retry(_insert_employee_retry_commit, session) employee = employees.find_one({"employee": 4}) + assert employee is not None self.assertIsNotNone(employee) - self.assertEqual(employee['status'], 'Active') + self.assertEqual(employee["status"], "Active") # Start Transactions Retry Example 3 @@ -979,8 +1034,7 @@ def run_transaction_with_retry(txn_func, session): except (ConnectionFailure, OperationFailure) as exc: # If transient error, retry the whole transaction if exc.has_error_label("TransientTransactionError"): - print("TransientTransactionError, retrying " - "transaction ...") + print("TransientTransactionError, retrying transaction ...") continue else: raise @@ -995,8 +1049,7 @@ def commit_with_retry(session): except (ConnectionFailure, OperationFailure) as exc: # Can retry commit if exc.has_error_label("UnknownTransactionCommitResult"): - print("UnknownTransactionCommitResult, retrying " - "commit operation ...") + print("UnknownTransactionCommitResult, retrying commit operation ...") continue else: print("Error during commit ...") @@ -1009,16 +1062,16 @@ def update_employee_info(session): events_coll = session.client.reporting.events with session.start_transaction( - read_concern=ReadConcern("snapshot"), - write_concern=WriteConcern(w="majority"), - read_preference=ReadPreference.PRIMARY): + read_concern=ReadConcern("snapshot"), + write_concern=WriteConcern(w="majority"), + read_preference=ReadPreference.PRIMARY, + ): employees_coll.update_one( - {"employee": 3}, {"$set": {"status": "Inactive"}}, - session=session) + {"employee": 3}, {"$set": {"status": "Inactive"}}, session=session + ) events_coll.insert_one( - {"employee": 3, "status": { - "new": "Inactive", "old": "Active"}}, - session=session) + {"employee": 3, "status": {"new": "Inactive", "old": "Active"}}, session=session + ) commit_with_retry(session) @@ -1026,17 +1079,20 @@ def update_employee_info(session): with client.start_session() as session: try: run_transaction_with_retry(update_employee_info, session) - except Exception as exc: + except Exception: # Do something with error. raise # End Transactions Retry Example 3 employee = employees.find_one({"employee": 3}) + assert employee is not None self.assertIsNotNone(employee) - self.assertEqual(employee['status'], 'Inactive') + self.assertEqual(employee["status"], "Inactive") + + def MongoClient(_): + return rs_client() - MongoClient = lambda _: rs_client() uriString = None # Start Transactions withTxn API Example 1 @@ -1049,11 +1105,9 @@ def update_employee_info(session): client = MongoClient(uriString) wc_majority = WriteConcern("majority", wtimeout=1000) - # Prereq: Create collections. CRUD operations in transactions must be on existing collections. - client.get_database( - "mydb1", write_concern=wc_majority).foo.insert_one({'abc': 0}) - client.get_database( - "mydb2", write_concern=wc_majority).bar.insert_one({'xyz': 0}) + # Prereq: Create collections. + client.get_database("mydb1", write_concern=wc_majority).foo.insert_one({"abc": 0}) + client.get_database("mydb2", write_concern=wc_majority).bar.insert_one({"xyz": 0}) # Step 1: Define the callback that specifies the sequence of operations to perform inside the transactions. def callback(session): @@ -1061,60 +1115,276 @@ def callback(session): collection_two = session.client.mydb2.bar # Important:: You must pass the session to the operations. - collection_one.insert_one({'abc': 1}, session=session) - collection_two.insert_one({'xyz': 999}, session=session) + collection_one.insert_one({"abc": 1}, session=session) + collection_two.insert_one({"xyz": 999}, session=session) # Step 2: Start a client session. with client.start_session() as session: # Step 3: Use with_transaction to start a transaction, execute the callback, and commit (or abort on error). session.with_transaction( - callback, read_concern=ReadConcern('local'), + callback, + read_concern=ReadConcern("local"), write_concern=wc_majority, - read_preference=ReadPreference.PRIMARY) + read_preference=ReadPreference.PRIMARY, + ) # End Transactions withTxn API Example 1 class TestCausalConsistencyExamples(IntegrationTest): - @client_context.require_version_min(3, 6, 0) @client_context.require_secondaries_count(1) @client_context.require_no_mmap def test_causal_consistency(self): # Causal consistency examples client = self.client - self.addCleanup(client.drop_database, 'test') - client.test.drop_collection('items') - client.test.items.insert_one({ - 'sku': "111", 'name': 'Peanuts', - 'start':datetime.datetime.today()}) + self.addCleanup(client.drop_database, "test") + client.test.drop_collection("items") + client.test.items.insert_one( + {"sku": "111", "name": "Peanuts", "start": datetime.datetime.today()} + ) # Start Causal Consistency Example 1 with client.start_session(causal_consistency=True) as s1: current_date = datetime.datetime.today() items = client.get_database( - 'test', read_concern=ReadConcern('majority'), - write_concern=WriteConcern('majority', wtimeout=1000)).items + "test", + read_concern=ReadConcern("majority"), + write_concern=WriteConcern("majority", wtimeout=1000), + ).items items.update_one( - {'sku': "111", 'end': None}, - {'$set': {'end': current_date}}, session=s1) + {"sku": "111", "end": None}, {"$set": {"end": current_date}}, session=s1 + ) items.insert_one( - {'sku': "nuts-111", 'name': "Pecans", - 'start': current_date}, session=s1) + {"sku": "nuts-111", "name": "Pecans", "start": current_date}, session=s1 + ) # End Causal Consistency Example 1 + assert s1.cluster_time is not None + assert s1.operation_time is not None + # Start Causal Consistency Example 2 with client.start_session(causal_consistency=True) as s2: s2.advance_cluster_time(s1.cluster_time) s2.advance_operation_time(s1.operation_time) items = client.get_database( - 'test', read_preference=ReadPreference.SECONDARY, - read_concern=ReadConcern('majority'), - write_concern=WriteConcern('majority', wtimeout=1000)).items - for item in items.find({'end': None}, session=s2): + "test", + read_preference=ReadPreference.SECONDARY, + read_concern=ReadConcern("majority"), + write_concern=WriteConcern("majority", wtimeout=1000), + ).items + for item in items.find({"end": None}, session=s2): print(item) # End Causal Consistency Example 2 +class TestVersionedApiExamples(IntegrationTest): + @client_context.require_version_min(4, 7) + def test_versioned_api(self): + # Versioned API examples + def MongoClient(_, server_api): + return rs_client(server_api=server_api, connect=False) + + uri = None + + # Start Versioned API Example 1 + from pymongo.server_api import ServerApi + + MongoClient(uri, server_api=ServerApi("1")) + # End Versioned API Example 1 + + # Start Versioned API Example 2 + MongoClient(uri, server_api=ServerApi("1", strict=True)) + # End Versioned API Example 2 + + # Start Versioned API Example 3 + MongoClient(uri, server_api=ServerApi("1", strict=False)) + # End Versioned API Example 3 + + # Start Versioned API Example 4 + MongoClient(uri, server_api=ServerApi("1", deprecation_errors=True)) + # End Versioned API Example 4 + + @unittest.skip("PYTHON-3167 count has been added to API version 1") + @client_context.require_version_min(4, 7) + def test_versioned_api_migration(self): + # SERVER-58785 + if client_context.is_topology_type(["sharded"]) and not client_context.version.at_least( + 5, 0, 2 + ): + self.skipTest("This test needs MongoDB 5.0.2 or newer") + + client = rs_client(server_api=ServerApi("1", strict=True)) + client.db.sales.drop() + + # Start Versioned API Example 5 + def strptime(s): + return datetime.datetime.strptime(s, "%Y-%m-%dT%H:%M:%SZ") + + client.db.sales.insert_many( + [ + { + "_id": 1, + "item": "abc", + "price": 10, + "quantity": 2, + "date": strptime("2021-01-01T08:00:00Z"), + }, + { + "_id": 2, + "item": "jkl", + "price": 20, + "quantity": 1, + "date": strptime("2021-02-03T09:00:00Z"), + }, + { + "_id": 3, + "item": "xyz", + "price": 5, + "quantity": 5, + "date": strptime("2021-02-03T09:05:00Z"), + }, + { + "_id": 4, + "item": "abc", + "price": 10, + "quantity": 10, + "date": strptime("2021-02-15T08:00:00Z"), + }, + { + "_id": 5, + "item": "xyz", + "price": 5, + "quantity": 10, + "date": strptime("2021-02-15T09:05:00Z"), + }, + { + "_id": 6, + "item": "xyz", + "price": 5, + "quantity": 5, + "date": strptime("2021-02-15T12:05:10Z"), + }, + { + "_id": 7, + "item": "xyz", + "price": 5, + "quantity": 10, + "date": strptime("2021-02-15T14:12:12Z"), + }, + { + "_id": 8, + "item": "abc", + "price": 10, + "quantity": 5, + "date": strptime("2021-03-16T20:20:13Z"), + }, + ] + ) + # End Versioned API Example 5 + + with self.assertRaisesRegex( + OperationFailure, + "Provided apiStrict:true, but the command count is not in API Version 1", + ): + client.db.command("count", "sales", query={}) + # Start Versioned API Example 6 + # pymongo.errors.OperationFailure: Provided apiStrict:true, but the command count is not in API Version 1, full error: {'ok': 0.0, 'errmsg': 'Provided apiStrict:true, but the command count is not in API Version 1', 'code': 323, 'codeName': 'APIStrictError'} + # End Versioned API Example 6 + + # Start Versioned API Example 7 + client.db.sales.count_documents({}) + # End Versioned API Example 7 + + # Start Versioned API Example 8 + # 8 + # End Versioned API Example 8 + + +class TestSnapshotQueryExamples(IntegrationTest): + @client_context.require_version_min(5, 0) + def test_snapshot_query(self): + client = self.client + + if not client_context.is_topology_type(["replicaset", "sharded"]): + self.skipTest("Must be a sharded or replicaset") + + self.addCleanup(client.drop_database, "pets") + db = client.pets + db.drop_collection("cats") + db.drop_collection("dogs") + db.cats.insert_one({"name": "Whiskers", "color": "white", "age": 10, "adoptable": True}) + db.dogs.insert_one({"name": "Pebbles", "color": "Brown", "age": 10, "adoptable": True}) + wait_until(lambda: self.check_for_snapshot(db.cats), "success") + wait_until(lambda: self.check_for_snapshot(db.dogs), "success") + + # Start Snapshot Query Example 1 + + db = client.pets + with client.start_session(snapshot=True) as s: + adoptablePetsCount = db.cats.aggregate( + [{"$match": {"adoptable": True}}, {"$count": "adoptableCatsCount"}], session=s + ).next()["adoptableCatsCount"] + + adoptablePetsCount += db.dogs.aggregate( + [{"$match": {"adoptable": True}}, {"$count": "adoptableDogsCount"}], session=s + ).next()["adoptableDogsCount"] + + print(adoptablePetsCount) + + # End Snapshot Query Example 1 + db = client.retail + self.addCleanup(client.drop_database, "retail") + db.drop_collection("sales") + + saleDate = datetime.datetime.now() + db.sales.insert_one({"shoeType": "boot", "price": 30, "saleDate": saleDate}) + wait_until(lambda: self.check_for_snapshot(db.sales), "success") + + # Start Snapshot Query Example 2 + db = client.retail + with client.start_session(snapshot=True) as s: + db.sales.aggregate( + [ + { + "$match": { + "$expr": { + "$gt": [ + "$saleDate", + { + "$dateSubtract": { + "startDate": "$$NOW", + "unit": "day", + "amount": 1, + } + }, + ] + } + } + }, + {"$count": "totalDailySales"}, + ], + session=s, + ).next()["totalDailySales"] + + # End Snapshot Query Example 2 + + def check_for_snapshot(self, collection): + """Wait for snapshot reads to become available to prevent this error: + [246:SnapshotUnavailable]: Unable to read from a snapshot due to pending collection catalog changes; please retry the operation. Snapshot timestamp is Timestamp(1646666892, 4). Collection minimum is Timestamp(1646666892, 5) (on localhost:27017, modern retry, attempt 1) + From https://github.com/mongodb/mongo-ruby-driver/commit/7c4117b58e3d12e237f7536f7521e18fc15f79ac + """ + with self.client.start_session(snapshot=True) as s: + try: + if collection.find_one(session=s): + return True + return False + except OperationFailure as e: + # Retry them as the server demands... + if e.code == 246: # SnapshotUnavailable + return False + raise + + if __name__ == "__main__": unittest.main() diff --git a/test/test_fork.py b/test/test_fork.py new file mode 100644 index 0000000000..7b19e4cd8d --- /dev/null +++ b/test/test_fork.py @@ -0,0 +1,93 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test that pymongo resets its own locks after a fork.""" +from __future__ import annotations + +import os +import sys +import unittest +from multiprocessing import Pipe + +sys.path[0:0] = [""] + +from test import IntegrationTest +from test.utils import is_greenthread_patched + +from bson.objectid import ObjectId + + +@unittest.skipIf( + not hasattr(os, "register_at_fork"), "register_at_fork not available in this version of Python" +) +@unittest.skipIf( + is_greenthread_patched(), + "gevent and eventlet do not support POSIX-style forking.", +) +class TestFork(IntegrationTest): + def test_lock_client(self): + # Forks the client with some items locked. + # Parent => All locks should be as before the fork. + # Child => All locks should be reset. + with self.client._MongoClient__lock: + + def target(): + self.client.admin.command("ping") + + with self.fork(target): + pass + self.client.admin.command("ping") + + def test_lock_object_id(self): + # Forks the client with ObjectId's _inc_lock locked. + # Parent => _inc_lock should remain locked. + # Child => _inc_lock should be unlocked. + with ObjectId._inc_lock: + + def target(): + self.assertFalse(ObjectId._inc_lock.locked()) + self.assertTrue(ObjectId()) + + with self.fork(target): + pass + + def test_topology_reset(self): + # Tests that topologies are different from each other. + # Cannot use ID because virtual memory addresses may be the same. + # Cannot reinstantiate ObjectId in the topology settings. + # Relies on difference in PID when opened again. + parent_conn, child_conn = Pipe() + init_id = self.client._topology._pid + parent_cursor_exc = self.client._kill_cursors_executor + + def target(): + self.client.admin.command("ping") + child_conn.send(self.client._topology._pid) + child_conn.send( + ( + parent_cursor_exc != self.client._kill_cursors_executor, + "client._kill_cursors_executor was not reinitialized", + ) + ) + + with self.fork(target): + self.assertEqual(self.client._topology._pid, init_id) + child_id = parent_conn.recv() + self.assertNotEqual(child_id, init_id) + passed, msg = parent_conn.recv() + self.assertTrue(passed, msg) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_grid_file.py b/test/test_grid_file.py index 2de173cab9..344a248b45 100644 --- a/test/test_grid_file.py +++ b/test/test_grid_file.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright 2009-present MongoDB, Inc. # @@ -14,36 +13,42 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tests for the grid_file module. -""" +"""Tests for the grid_file module.""" +from __future__ import annotations import datetime +import io import sys import zipfile +from io import BytesIO + +from pymongo.database import Database + sys.path[0:0] = [""] +from test import IntegrationTest, qcheck, unittest +from test.utils import EventListener, rs_or_single_client + from bson.objectid import ObjectId -from bson.py3compat import StringIO from gridfs import GridFS -from gridfs.grid_file import (DEFAULT_CHUNK_SIZE, - _SEEK_CUR, - _SEEK_END, - GridIn, - GridOut, - GridOutCursor) from gridfs.errors import NoFile +from gridfs.grid_file import ( + _SEEK_CUR, + _SEEK_END, + DEFAULT_CHUNK_SIZE, + GridIn, + GridOut, + GridOutCursor, +) from pymongo import MongoClient from pymongo.errors import ConfigurationError, ServerSelectionTimeoutError from pymongo.message import _CursorAddress -from test import (IntegrationTest, - unittest, - qcheck) -from test.utils import rs_or_single_client, EventListener class TestGridFileNoConnect(unittest.TestCase): - """Test GridFile features on a client that does not connect. - """ + """Test GridFile features on a client that does not connect.""" + + db: Database @classmethod def setUpClass(cls): @@ -52,9 +57,17 @@ def setUpClass(cls): def test_grid_in_custom_opts(self): self.assertRaises(TypeError, GridIn, "foo") - a = GridIn(self.db.fs, _id=5, filename="my_file", - contentType="text/html", chunkSize=1000, aliases=["foo"], - metadata={"foo": 1, "bar": 2}, bar=3, baz="hello") + a = GridIn( + self.db.fs, + _id=5, + filename="my_file", + contentType="text/html", + chunkSize=1000, + aliases=["foo"], + metadata={"foo": 1, "bar": 2}, + bar=3, + baz="hello", + ) self.assertEqual(5, a._id) self.assertEqual("my_file", a.filename) @@ -67,18 +80,15 @@ def test_grid_in_custom_opts(self): self.assertEqual("hello", a.baz) self.assertRaises(AttributeError, getattr, a, "mike") - b = GridIn(self.db.fs, - content_type="text/html", chunk_size=1000, baz=100) + b = GridIn(self.db.fs, content_type="text/html", chunk_size=1000, baz=100) self.assertEqual("text/html", b.content_type) self.assertEqual(1000, b.chunk_size) self.assertEqual(100, b.baz) class TestGridFile(IntegrationTest): - def setUp(self): - self.db.drop_collection('fs.files') - self.db.drop_collection('fs.chunks') + self.cleanup_colls(self.db.fs.files, self.db.fs.chunks) def test_basic(self): f = GridIn(self.db.fs, filename="test") @@ -109,7 +119,7 @@ def test_md5(self): f = GridIn(self.db.fs) f.write(b"hello world\n") f.close() - self.assertEqual("6f5902ac237024bdd0c176cb93063dc4", f.md5) + self.assertEqual(None, f.md5) def test_alternate_collection(self): self.db.alt.files.delete_many({}) @@ -125,9 +135,6 @@ def test_alternate_collection(self): g = GridOut(self.db.alt, f._id) self.assertEqual(b"hello world", g.read()) - # test that md5 still works... - self.assertEqual("5eb63bbbe01eeed093cb22bb8f5acdc3", g.md5) - def test_grid_in_default_opts(self): self.assertRaises(TypeError, GridIn, "foo") @@ -191,7 +198,7 @@ def test_grid_in_default_opts(self): self.assertEqual({"foo": 1}, a.metadata) - self.assertEqual("d41d8cd98f00b204e9800998ecf8427e", a.md5) + self.assertEqual(None, a.md5) self.assertRaises(AttributeError, setattr, a, "md5", 5) # Make sure custom attributes that were set both before and after @@ -222,32 +229,50 @@ def test_grid_out_default_opts(self): self.assertTrue(isinstance(b.upload_date, datetime.datetime)) self.assertEqual(None, b.aliases) self.assertEqual(None, b.metadata) - self.assertEqual("d41d8cd98f00b204e9800998ecf8427e", b.md5) - - for attr in ["_id", "name", "content_type", "length", "chunk_size", - "upload_date", "aliases", "metadata", "md5"]: + self.assertEqual(None, b.md5) + + for attr in [ + "_id", + "name", + "content_type", + "length", + "chunk_size", + "upload_date", + "aliases", + "metadata", + "md5", + ]: self.assertRaises(AttributeError, setattr, b, attr, 5) def test_grid_out_cursor_options(self): - self.assertRaises(TypeError, GridOutCursor.__init__, self.db.fs, {}, - projection={"filename": 1}) + self.assertRaises( + TypeError, GridOutCursor.__init__, self.db.fs, {}, projection={"filename": 1} + ) cursor = GridOutCursor(self.db.fs, {}) cursor_clone = cursor.clone() - + cursor_dict = cursor.__dict__.copy() - cursor_dict.pop('_Cursor__session') + cursor_dict.pop("_Cursor__session") cursor_clone_dict = cursor_clone.__dict__.copy() - cursor_clone_dict.pop('_Cursor__session') - self.assertEqual(cursor_dict, cursor_clone_dict) + cursor_clone_dict.pop("_Cursor__session") + self.assertDictEqual(cursor_dict, cursor_clone_dict) self.assertRaises(NotImplementedError, cursor.add_option, 0) self.assertRaises(NotImplementedError, cursor.remove_option, 0) def test_grid_out_custom_opts(self): - one = GridIn(self.db.fs, _id=5, filename="my_file", - contentType="text/html", chunkSize=1000, aliases=["foo"], - metadata={"foo": 1, "bar": 2}, bar=3, baz="hello") + one = GridIn( + self.db.fs, + _id=5, + filename="my_file", + contentType="text/html", + chunkSize=1000, + aliases=["foo"], + metadata={"foo": 1, "bar": 2}, + bar=3, + baz="hello", + ) one.write(b"hello world") one.close() @@ -263,10 +288,19 @@ def test_grid_out_custom_opts(self): self.assertEqual(["foo"], two.aliases) self.assertEqual({"foo": 1, "bar": 2}, two.metadata) self.assertEqual(3, two.bar) - self.assertEqual("5eb63bbbe01eeed093cb22bb8f5acdc3", two.md5) - - for attr in ["_id", "name", "content_type", "length", "chunk_size", - "upload_date", "aliases", "metadata", "md5"]: + self.assertEqual(None, two.md5) + + for attr in [ + "_id", + "name", + "content_type", + "length", + "chunk_size", + "upload_date", + "aliases", + "metadata", + "md5", + ]: self.assertRaises(AttributeError, setattr, two, attr, 5) def test_grid_out_file_document(self): @@ -277,8 +311,7 @@ def test_grid_out_file_document(self): two = GridOut(self.db.fs, file_document=self.db.fs.files.find_one()) self.assertEqual(b"foo bar", two.read()) - three = GridOut(self.db.fs, 5, - file_document=self.db.fs.files.find_one()) + three = GridOut(self.db.fs, 5, file_document=self.db.fs.files.find_one()) self.assertEqual(b"foo bar", three.read()) four = GridOut(self.db.fs, file_document={}) @@ -301,12 +334,11 @@ def test_write_file_like(self): five = GridIn(self.db.fs, chunk_size=2) five.write(b"hello") - buffer = StringIO(b" world") + buffer = BytesIO(b" world") five.write(buffer) five.write(b" and mongodb") five.close() - self.assertEqual(b"hello world and mongodb", - GridOut(self.db.fs, five._id).read()) + self.assertEqual(b"hello world and mongodb", GridOut(self.db.fs, five._id).read()) def test_write_lines(self): a = GridIn(self.db.fs) @@ -321,8 +353,22 @@ def test_close(self): self.assertRaises(ValueError, f.write, "test") f.close() + def test_closed(self): + f = GridIn(self.db.fs, chunkSize=5) + f.write(b"Hello world.\nHow are you?") + f.close() + + g = GridOut(self.db.fs, f._id) + self.assertFalse(g.closed) + g.read(1) + self.assertFalse(g.closed) + g.read(100) + self.assertFalse(g.closed) + g.close() + self.assertTrue(g.closed) + def test_multi_chunk_file(self): - random_string = b'a' * (DEFAULT_CHUNK_SIZE + 1000) + random_string = b"a" * (DEFAULT_CHUNK_SIZE + 1000) f = GridIn(self.db.fs) f.write(random_string) @@ -356,8 +402,7 @@ def helper(data): self.assertEqual(data, g.read(10) + g.read(10)) return True - qcheck.check_unittest(self, helper, - qcheck.gen_string(qcheck.gen_range(0, 20))) + qcheck.check_unittest(self, helper, qcheck.gen_string(qcheck.gen_range(0, 20))) def test_seek(self): f = GridIn(self.db.fs, chunkSize=3) @@ -415,10 +460,12 @@ def test_multiple_reads(self): def test_readline(self): f = GridIn(self.db.fs, chunkSize=5) - f.write((b"""Hello world, + f.write( + b"""Hello world, How are you? Hope all is well. -Bye""")) +Bye""" + ) f.close() # Try read(), then readline(). @@ -445,6 +492,60 @@ def test_readline(self): self.assertEqual(b"e", g.readline(1)) self.assertEqual(b"llo world,\n", g.readline()) + def test_readlines(self): + f = GridIn(self.db.fs, chunkSize=5) + f.write( + b"""Hello world, +How are you? +Hope all is well. +Bye""" + ) + f.close() + + # Try read(), then readlines(). + g = GridOut(self.db.fs, f._id) + self.assertEqual(b"He", g.read(2)) + self.assertEqual([b"llo world,\n", b"How are you?\n"], g.readlines(11)) + self.assertEqual([b"Hope all is well.\n", b"Bye"], g.readlines()) + self.assertEqual([], g.readlines()) + + # Try readline(), then readlines(). + g = GridOut(self.db.fs, f._id) + self.assertEqual(b"Hello world,\n", g.readline()) + self.assertEqual([b"How are you?\n", b"Hope all is well.\n"], g.readlines(13)) + self.assertEqual(b"Bye", g.readline()) + self.assertEqual([], g.readlines()) + + # Only readlines(). + g = GridOut(self.db.fs, f._id) + self.assertEqual( + [b"Hello world,\n", b"How are you?\n", b"Hope all is well.\n", b"Bye"], g.readlines() + ) + + g = GridOut(self.db.fs, f._id) + self.assertEqual( + [b"Hello world,\n", b"How are you?\n", b"Hope all is well.\n", b"Bye"], g.readlines(0) + ) + + g = GridOut(self.db.fs, f._id) + self.assertEqual([b"Hello world,\n"], g.readlines(1)) + self.assertEqual([b"How are you?\n"], g.readlines(12)) + self.assertEqual([b"Hope all is well.\n", b"Bye"], g.readlines(18)) + + # Try readlines() first, then read(). + g = GridOut(self.db.fs, f._id) + self.assertEqual([b"Hello world,\n"], g.readlines(1)) + self.assertEqual(b"H", g.read(1)) + self.assertEqual([b"ow are you?\n", b"Hope all is well.\n"], g.readlines(29)) + self.assertEqual([b"Bye"], g.readlines(1)) + + # Try readlines() first, then readline(). + g = GridOut(self.db.fs, f._id) + self.assertEqual([b"Hello world,\n"], g.readlines(1)) + self.assertEqual(b"How are you?\n", g.readline()) + self.assertEqual([b"Hope all is well.\n"], g.readlines(17)) + self.assertEqual(b"Bye", g.readline()) + def test_iterator(self): f = GridIn(self.db.fs) f.close() @@ -452,30 +553,35 @@ def test_iterator(self): self.assertEqual([], list(g)) f = GridIn(self.db.fs) - f.write(b"hello world") + f.write(b"hello world\nhere are\nsome lines.") f.close() g = GridOut(self.db.fs, f._id) - self.assertEqual([b"hello world"], list(g)) - self.assertEqual(b"hello", g.read(5)) - self.assertEqual([b"hello world"], list(g)) - self.assertEqual(b" worl", g.read(5)) + self.assertEqual([b"hello world\n", b"here are\n", b"some lines."], list(g)) + self.assertEqual(b"", g.read(5)) + self.assertEqual([], list(g)) + + g = GridOut(self.db.fs, f._id) + self.assertEqual(b"hello world\n", next(iter(g))) + self.assertEqual(b"here", g.read(4)) + self.assertEqual(b" are\n", next(iter(g))) + self.assertEqual(b"some lines", g.read(10)) + self.assertEqual(b".", next(iter(g))) + self.assertRaises(StopIteration, iter(g).__next__) f = GridIn(self.db.fs, chunk_size=2) f.write(b"hello world") f.close() g = GridOut(self.db.fs, f._id) - self.assertEqual([b"he", b"ll", b"o ", - b"wo", b"rl", b"d"], list(g)) + self.assertEqual([b"hello world"], list(g)) def test_read_unaligned_buffer_size(self): - in_data = (b"This is a text that doesn't " - b"quite fit in a single 16-byte chunk.") + in_data = b"This is a text that doesn't quite fit in a single 16-byte chunk." f = GridIn(self.db.fs, chunkSize=16) f.write(in_data) f.close() g = GridOut(self.db.fs, f._id) - out_data = b'' + out_data = b"" while 1: s = g.read(13) if not s: @@ -485,7 +591,7 @@ def test_read_unaligned_buffer_size(self): self.assertEqual(in_data, out_data) def test_readchunk(self): - in_data = b'a' * 10 + in_data = b"a" * 10 f = GridIn(self.db.fs, chunkSize=3) f.write(in_data) f.close() @@ -504,21 +610,21 @@ def test_readchunk(self): def test_write_unicode(self): f = GridIn(self.db.fs) - self.assertRaises(TypeError, f.write, u"foo") + self.assertRaises(TypeError, f.write, "foo") f = GridIn(self.db.fs, encoding="utf-8") - f.write(u"foo") + f.write("foo") f.close() g = GridOut(self.db.fs, f._id) self.assertEqual(b"foo", g.read()) f = GridIn(self.db.fs, encoding="iso-8859-1") - f.write(u"aé") + f.write("aé") f.close() g = GridOut(self.db.fs, f._id) - self.assertEqual(u"aé".encode("iso-8859-1"), g.read()) + self.assertEqual("aé".encode("iso-8859-1"), g.read()) def test_set_after_close(self): f = GridIn(self.db.fs, _id="foo", bar="baz") @@ -564,14 +670,29 @@ def test_context_manager(self): with GridOut(self.db.fs, infile._id) as outfile: self.assertEqual(contents, outfile.read()) - def test_prechunked_string(self): + def test_exception_file_non_existence(self): + contents = b"Imagine this is some important data..." + + with self.assertRaises(ConnectionError): + with GridIn(self.db.fs, filename="important") as infile: + infile.write(contents) + raise ConnectionError("Test exception") + + # Expectation: File chunks are written, entry in files doesn't appear. + self.assertEqual( + self.db.fs.chunks.count_documents({"files_id": infile._id}), infile._chunk_number + ) + self.assertIsNone(self.db.fs.files.find_one({"_id": infile._id})) + self.assertTrue(infile.closed) + + def test_prechunked_string(self): def write_me(s, chunk_size): - buf = StringIO(s) + buf = BytesIO(s) infile = GridIn(self.db.fs) while True: to_write = buf.read(chunk_size) - if to_write == b'': + if to_write == b"": break infile.write(to_write) infile.close() @@ -581,7 +702,7 @@ def write_me(s, chunk_size): data = outfile.read() self.assertEqual(s, data) - s = b'x' * DEFAULT_CHUNK_SIZE * 4 + s = b"x" * DEFAULT_CHUNK_SIZE * 4 # Test with default chunk size write_me(s, DEFAULT_CHUNK_SIZE) # Multiple @@ -593,7 +714,7 @@ def test_grid_out_lazy_connect(self): fs = self.db.fs outfile = GridOut(fs, file_id=-1) self.assertRaises(NoFile, outfile.read) - self.assertRaises(NoFile, getattr, outfile, 'filename') + self.assertRaises(NoFile, getattr, outfile, "filename") infile = GridIn(fs, filename=1) infile.close() @@ -606,11 +727,10 @@ def test_grid_out_lazy_connect(self): outfile.readchunk() def test_grid_in_lazy_connect(self): - client = MongoClient('badhost', connect=False, - serverSelectionTimeoutMS=10) + client = MongoClient("badhost", connect=False, serverSelectionTimeoutMS=10) fs = client.db.fs infile = GridIn(fs, file_id=-1, chunk_size=1) - self.assertRaises(ServerSelectionTimeoutError, infile.write, b'data') + self.assertRaises(ServerSelectionTimeoutError, infile.write, b"data") self.assertRaises(ServerSelectionTimeoutError, infile.close) def test_unacknowledged(self): @@ -622,7 +742,7 @@ def test_survive_cursor_not_found(self): # By default the find command returns 101 documents in the first batch. # Use 102 batches to cause a single getMore. chunk_size = 1024 - data = b'd' * (102 * chunk_size) + data = b"d" * (102 * chunk_size) listener = EventListener() client = rs_or_single_client(event_listeners=[listener]) db = client.pymongo_test @@ -635,9 +755,11 @@ def test_survive_cursor_not_found(self): # Kill the cursor to simulate the cursor timing out on the server # when an application spends a long time between two calls to # readchunk(). + assert client.address is not None client._close_cursor_now( outfile._GridOut__chunk_iter._cursor.cursor_id, - _CursorAddress(client.address, db.fs.chunks.full_name)) + _CursorAddress(client.address, db.fs.chunks.full_name), + ) # Read the rest of the file without error. self.assertEqual(len(outfile.read()), len(data) - chunk_size) @@ -646,7 +768,7 @@ def test_survive_cursor_not_found(self): self.assertIn("getMore", listener.started_command_names()) def test_zip(self): - zf = StringIO() + zf = BytesIO() z = zipfile.ZipFile(zf, "w") z.writestr("test.txt", b"hello world") z.close() @@ -663,6 +785,21 @@ def test_zip(self): self.assertSequenceEqual(z.namelist(), ["test.txt"]) self.assertEqual(z.read("test.txt"), b"hello world") + def test_grid_out_unsupported_operations(self): + f = GridIn(self.db.fs, chunkSize=3) + f.write(b"hello world") + f.close() + + g = GridOut(self.db.fs, f._id) + + self.assertRaises(io.UnsupportedOperation, g.writelines, [b"some", b"lines"]) + self.assertRaises(io.UnsupportedOperation, g.write, b"some text") + self.assertRaises(io.UnsupportedOperation, g.fileno) + self.assertRaises(io.UnsupportedOperation, g.truncate) + + self.assertFalse(g.writable()) + self.assertFalse(g.isatty()) + if __name__ == "__main__": unittest.main() diff --git a/test/test_gridfs.py b/test/test_gridfs.py index 82422f968f..f94736708e 100644 --- a/test/test_gridfs.py +++ b/test/test_gridfs.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright 2009-present MongoDB, Inc. # @@ -14,43 +13,40 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tests for the gridfs package. -""" -import sys -sys.path[0:0] = [""] +"""Tests for the gridfs package.""" +from __future__ import annotations import datetime +import sys import threading import time -import gridfs +from io import BytesIO + +sys.path[0:0] = [""] +from test import IntegrationTest, client_context, unittest +from test.utils import joinall, one, rs_client, rs_or_single_client, single_client + +import gridfs from bson.binary import Binary -from bson.py3compat import StringIO, string_type +from gridfs.errors import CorruptGridFile, FileExists, NoFile +from gridfs.grid_file import GridOutCursor +from pymongo.database import Database +from pymongo.errors import ( + ConfigurationError, + NotPrimaryError, + ServerSelectionTimeoutError, +) from pymongo.mongo_client import MongoClient -from pymongo.errors import (ConfigurationError, - ConnectionFailure, - ServerSelectionTimeoutError) from pymongo.read_preferences import ReadPreference -from gridfs.errors import CorruptGridFile, FileExists, NoFile -from test.test_replica_set_client import TestReplicaSetClientBase -from test import (client_context, - unittest, - IntegrationTest) -from test.utils import (ignore_deprecations, - joinall, - one, - rs_client, - rs_or_single_client, - single_client) class JustWrite(threading.Thread): - def __init__(self, fs, n): threading.Thread.__init__(self) self.fs = fs self.n = n - self.setDaemon(True) + self.daemon = True def run(self): for _ in range(self.n): @@ -60,13 +56,12 @@ def run(self): class JustRead(threading.Thread): - def __init__(self, fs, n, results): threading.Thread.__init__(self) self.fs = fs self.n = n self.results = results - self.setDaemon(True) + self.daemon = True def run(self): for _ in range(self.n): @@ -77,6 +72,7 @@ def run(self): class TestGridfsNoConnect(unittest.TestCase): + db: Database @classmethod def setUpClass(cls): @@ -88,18 +84,19 @@ def test_gridfs(self): class TestGridfs(IntegrationTest): + fs: gridfs.GridFS + alt: gridfs.GridFS @classmethod def setUpClass(cls): - super(TestGridfs, cls).setUpClass() + super().setUpClass() cls.fs = gridfs.GridFS(cls.db) cls.alt = gridfs.GridFS(cls.db, "alt") def setUp(self): - self.db.drop_collection("fs.files") - self.db.drop_collection("fs.chunks") - self.db.drop_collection("alt.files") - self.db.drop_collection("alt.chunks") + self.cleanup_colls( + self.db.fs.files, self.db.fs.chunks, self.db.alt.files, self.db.alt.chunks + ) def test_basic(self): oid = self.fs.put(b"hello world") @@ -143,8 +140,7 @@ def test_list(self): self.fs.put(b"foo", filename="test") self.fs.put(b"", filename="hello world") - self.assertEqual(set(["mike", "test", "hello world"]), - set(self.fs.list())) + self.assertEqual({"mike", "test", "hello world"}, set(self.fs.list())) def test_empty_file(self): oid = self.fs.put(b"") @@ -153,16 +149,16 @@ def test_empty_file(self): self.assertEqual(0, self.db.fs.chunks.count_documents({})) raw = self.db.fs.files.find_one() + assert raw is not None self.assertEqual(0, raw["length"]) self.assertEqual(oid, raw["_id"]) self.assertTrue(isinstance(raw["uploadDate"], datetime.datetime)) self.assertEqual(255 * 1024, raw["chunkSize"]) - self.assertTrue(isinstance(raw["md5"], string_type)) + self.assertNotIn("md5", raw) def test_corrupt_chunk(self): - files_id = self.fs.put(b'foobar') - self.db.fs.chunks.update_one({'files_id': files_id}, - {'$set': {'data': Binary(b'foo', 0)}}) + files_id = self.fs.put(b"foobar") + self.db.fs.chunks.update_one({"files_id": files_id}, {"$set": {"data": Binary(b"foo", 0)}}) try: out = self.fs.get(files_id) self.assertRaises(CorruptGridFile, out.read) @@ -173,20 +169,25 @@ def test_corrupt_chunk(self): self.fs.delete(files_id) def test_put_ensures_index(self): - # setUp has dropped collections. - names = self.db.list_collection_names() - self.assertFalse([name for name in names if name.startswith('fs')]) - chunks = self.db.fs.chunks files = self.db.fs.files + # Ensure the collections are removed. + chunks.drop() + files.drop() self.fs.put(b"junk") - self.assertTrue(any( - info.get('key') == [('files_id', 1), ('n', 1)] - for info in chunks.index_information().values())) - self.assertTrue(any( - info.get('key') == [('filename', 1), ('uploadDate', 1)] - for info in files.index_information().values())) + self.assertTrue( + any( + info.get("key") == [("files_id", 1), ("n", 1)] + for info in chunks.index_information().values() + ) + ) + self.assertTrue( + any( + info.get("key") == [("filename", 1), ("uploadDate", 1)] + for info in files.index_information().values() + ) + ) def test_alt_collection(self): oid = self.alt.put(b"hello world") @@ -208,24 +209,20 @@ def test_alt_collection(self): self.alt.put(b"foo", filename="test") self.alt.put(b"", filename="hello world") - self.assertEqual(set(["mike", "test", "hello world"]), - set(self.alt.list())) + self.assertEqual({"mike", "test", "hello world"}, set(self.alt.list())) def test_threaded_reads(self): self.fs.put(b"hello", _id="test") threads = [] - results = [] + results: list = [] for i in range(10): threads.append(JustRead(self.fs, 10, results)) threads[i].start() joinall(threads) - self.assertEqual( - 100 * [b'hello'], - results - ) + self.assertEqual(100 * [b"hello"], results) def test_threaded_writes(self): threads = [] @@ -239,10 +236,7 @@ def test_threaded_writes(self): self.assertEqual(f.read(), b"hello") # Should have created 100 versions of 'test' file - self.assertEqual( - 100, - self.db.fs.files.count_documents({'filename': 'test'}) - ) + self.assertEqual(100, self.db.fs.files.count_documents({"filename": "test"})) def test_get_last_version(self): one = self.fs.put(b"foo", filename="test") @@ -312,11 +306,21 @@ def test_get_version_with_metadata(self): time.sleep(0.01) three = self.fs.put(b"baz", filename="test", author="author2") - self.assertEqual(b"foo", self.fs.get_version(filename="test", author="author1", version=-2).read()) - self.assertEqual(b"bar", self.fs.get_version(filename="test", author="author1", version=-1).read()) - self.assertEqual(b"foo", self.fs.get_version(filename="test", author="author1", version=0).read()) - self.assertEqual(b"bar", self.fs.get_version(filename="test", author="author1", version=1).read()) - self.assertEqual(b"baz", self.fs.get_version(filename="test", author="author2", version=0).read()) + self.assertEqual( + b"foo", self.fs.get_version(filename="test", author="author1", version=-2).read() + ) + self.assertEqual( + b"bar", self.fs.get_version(filename="test", author="author1", version=-1).read() + ) + self.assertEqual( + b"foo", self.fs.get_version(filename="test", author="author1", version=0).read() + ) + self.assertEqual( + b"bar", self.fs.get_version(filename="test", author="author1", version=1).read() + ) + self.assertEqual( + b"baz", self.fs.get_version(filename="test", author="author2", version=0).read() + ) self.assertEqual(b"baz", self.fs.get_version(filename="test", version=-1).read()) self.assertEqual(b"baz", self.fs.get_version(filename="test", version=2).read()) @@ -328,7 +332,7 @@ def test_get_version_with_metadata(self): self.fs.delete(three) def test_put_filelike(self): - oid = self.fs.put(StringIO(b"hello world"), chunk_size=1) + oid = self.fs.put(BytesIO(b"hello world"), chunk_size=1) self.assertEqual(11, self.db.fs.chunks.count_documents({})) self.assertEqual(b"hello world", self.fs.get(oid).read()) @@ -341,7 +345,7 @@ def test_file_exists(self): one.close() two = self.fs.new_file(_id=123) - self.assertRaises(FileExists, two.write, b'x' * 262146) + self.assertRaises(FileExists, two.write, b"x" * 262146) def test_exists(self): oid = self.fs.put(b"hello") @@ -369,34 +373,34 @@ def test_exists(self): self.assertFalse(self.fs.exists({"foo": {"$gt": 12}})) def test_put_unicode(self): - self.assertRaises(TypeError, self.fs.put, u"hello") + self.assertRaises(TypeError, self.fs.put, "hello") - oid = self.fs.put(u"hello", encoding="utf-8") + oid = self.fs.put("hello", encoding="utf-8") self.assertEqual(b"hello", self.fs.get(oid).read()) self.assertEqual("utf-8", self.fs.get(oid).encoding) - oid = self.fs.put(u"aé", encoding="iso-8859-1") - self.assertEqual(u"aé".encode("iso-8859-1"), self.fs.get(oid).read()) + oid = self.fs.put("aé", encoding="iso-8859-1") + self.assertEqual("aé".encode("iso-8859-1"), self.fs.get(oid).read()) self.assertEqual("iso-8859-1", self.fs.get(oid).encoding) def test_missing_length_iter(self): # Test fix that guards against PHP-237 self.fs.put(b"", filename="empty") doc = self.db.fs.files.find_one({"filename": "empty"}) + assert doc is not None doc.pop("length") self.db.fs.files.replace_one({"_id": doc["_id"]}, doc) f = self.fs.get_last_version(filename="empty") def iterate_file(grid_file): - for chunk in grid_file: + for _chunk in grid_file: pass return True self.assertTrue(iterate_file(f)) def test_gridfs_lazy_connect(self): - client = MongoClient('badhost', connect=False, - serverSelectionTimeoutMS=10) + client = MongoClient("badhost", connect=False, serverSelectionTimeoutMS=10) db = client.db gfs = gridfs.GridFS(db) self.assertRaises(ServerSelectionTimeoutError, gfs.list) @@ -405,7 +409,6 @@ def test_gridfs_lazy_connect(self): f = fs.new_file() self.assertRaises(ServerSelectionTimeoutError, f.close) - @ignore_deprecations def test_gridfs_find(self): self.fs.put(b"test2", filename="two") time.sleep(0.01) @@ -414,10 +417,10 @@ def test_gridfs_find(self): self.fs.put(b"test1", filename="one") time.sleep(0.01) self.fs.put(b"test2++", filename="two") - self.assertEqual(3, self.fs.find({"filename": "two"}).count()) - self.assertEqual(4, self.fs.find().count()) - cursor = self.fs.find( - no_cursor_timeout=False).sort("uploadDate", -1).skip(1).limit(2) + files = self.db.fs.files + self.assertEqual(3, files.count_documents({"filename": "two"})) + self.assertEqual(4, files.count_documents({})) + cursor = self.fs.find(no_cursor_timeout=False).sort("uploadDate", -1).skip(1).limit(2) gout = next(cursor) self.assertEqual(b"test1", gout.read()) cursor.rewind() @@ -429,29 +432,45 @@ def test_gridfs_find(self): cursor.close() self.assertRaises(TypeError, self.fs.find, {}, {"_id": True}) + def test_delete_not_initialized(self): + # Creating a cursor with invalid arguments will not run __init__ + # but will still call __del__. + cursor = GridOutCursor.__new__(GridOutCursor) # Skip calling __init__ + with self.assertRaises(TypeError): + cursor.__init__(self.db.fs.files, {}, {"_id": True}) # type: ignore + cursor.__del__() # no error + def test_gridfs_find_one(self): self.assertEqual(None, self.fs.find_one()) - id1 = self.fs.put(b'test1', filename='file1') - self.assertEqual(b'test1', self.fs.find_one().read()) + id1 = self.fs.put(b"test1", filename="file1") + res = self.fs.find_one() + assert res is not None + self.assertEqual(b"test1", res.read()) - id2 = self.fs.put(b'test2', filename='file2', meta='data') - self.assertEqual(b'test1', self.fs.find_one(id1).read()) - self.assertEqual(b'test2', self.fs.find_one(id2).read()) + id2 = self.fs.put(b"test2", filename="file2", meta="data") + res1 = self.fs.find_one(id1) + assert res1 is not None + self.assertEqual(b"test1", res1.read()) + res2 = self.fs.find_one(id2) + assert res2 is not None + self.assertEqual(b"test2", res2.read()) - self.assertEqual(b'test1', - self.fs.find_one({'filename': 'file1'}).read()) + res3 = self.fs.find_one({"filename": "file1"}) + assert res3 is not None + self.assertEqual(b"test1", res3.read()) - self.assertEqual('data', self.fs.find_one(id2).meta) + res4 = self.fs.find_one(id2) + assert res4 is not None + self.assertEqual("data", res4.meta) def test_grid_in_non_int_chunksize(self): # Lua, and perhaps other buggy GridFS clients, store size as a float. - data = b'data' - self.fs.put(data, filename='f') - self.db.fs.files.update_one({'filename': 'f'}, - {'$set': {'chunkSize': 100.0}}) + data = b"data" + self.fs.put(data, filename="f") + self.db.fs.files.update_one({"filename": "f"}, {"$set": {"chunkSize": 100.0}}) - self.assertEqual(data, self.fs.get_version('f').read()) + self.assertEqual(data, self.fs.get_version("f").read()) def test_unacknowledged(self): # w=0 is prohibited. @@ -460,21 +479,6 @@ def test_unacknowledged(self): def test_md5(self): gin = self.fs.new_file() - gin.write(b"includes md5 sum") - gin.close() - self.assertIsNotNone(gin.md5) - md5sum = gin.md5 - - gout = self.fs.get(gin._id) - self.assertIsNotNone(gout.md5) - self.assertEqual(md5sum, gout.md5) - - _id = self.fs.put(b"also includes md5 sum") - gout = self.fs.get(_id) - self.assertIsNotNone(gout.md5) - - fs = gridfs.GridFS(self.db, disable_md5=True) - gin = fs.new_file() gin.write(b"no md5 sum") gin.close() self.assertIsNone(gin.md5) @@ -482,68 +486,60 @@ def test_md5(self): gout = self.fs.get(gin._id) self.assertIsNone(gout.md5) - _id = fs.put(b"still no md5 sum") + _id = self.fs.put(b"still no md5 sum") gout = self.fs.get(_id) self.assertIsNone(gout.md5) -class TestGridfsReplicaSet(TestReplicaSetClientBase): - +class TestGridfsReplicaSet(IntegrationTest): @classmethod @client_context.require_secondaries_count(1) def setUpClass(cls): - super(TestGridfsReplicaSet, cls).setUpClass() + super().setUpClass() @classmethod def tearDownClass(cls): - client_context.client.drop_database('gfsreplica') + client_context.client.drop_database("gfsreplica") def test_gridfs_replica_set(self): - rsc = rs_client( - w=self.w, - read_preference=ReadPreference.SECONDARY) + rsc = rs_client(w=client_context.w, read_preference=ReadPreference.SECONDARY) - fs = gridfs.GridFS(rsc.gfsreplica, 'gfsreplicatest') + fs = gridfs.GridFS(rsc.gfsreplica, "gfsreplicatest") gin = fs.new_file() self.assertEqual(gin._coll.read_preference, ReadPreference.PRIMARY) - oid = fs.put(b'foo') + oid = fs.put(b"foo") content = fs.get(oid).read() - self.assertEqual(b'foo', content) + self.assertEqual(b"foo", content) def test_gridfs_secondary(self): - primary_host, primary_port = self.primary - primary_connection = single_client(primary_host, primary_port) - - secondary_host, secondary_port = one(self.secondaries) + secondary_host, secondary_port = one(self.client.secondaries) secondary_connection = single_client( - secondary_host, secondary_port, - read_preference=ReadPreference.SECONDARY) + secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY + ) # Should detect it's connected to secondary and not attempt to # create index - fs = gridfs.GridFS(secondary_connection.gfsreplica, 'gfssecondarytest') + fs = gridfs.GridFS(secondary_connection.gfsreplica, "gfssecondarytest") # This won't detect secondary, raises error - self.assertRaises(ConnectionFailure, fs.put, b'foo') + self.assertRaises(NotPrimaryError, fs.put, b"foo") def test_gridfs_secondary_lazy(self): # Should detect it's connected to secondary and not attempt to # create index. - secondary_host, secondary_port = one(self.secondaries) + secondary_host, secondary_port = one(self.client.secondaries) client = single_client( - secondary_host, - secondary_port, - read_preference=ReadPreference.SECONDARY, - connect=False) + secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY, connect=False + ) # Still no connection. - fs = gridfs.GridFS(client.gfsreplica, 'gfssecondarylazytest') + fs = gridfs.GridFS(client.gfsreplica, "gfssecondarylazytest") # Connects, doesn't create index. self.assertRaises(NoFile, fs.get_last_version) - self.assertRaises(ConnectionFailure, fs.put, 'data') + self.assertRaises(NotPrimaryError, fs.put, "data", encoding="utf-8") if __name__ == "__main__": diff --git a/test/test_gridfs_bucket.py b/test/test_gridfs_bucket.py index 9addb84309..53e5cad54e 100644 --- a/test/test_gridfs_bucket.py +++ b/test/test_gridfs_bucket.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright 2015-present MongoDB, Inc. # @@ -14,42 +13,38 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tests for the gridfs package. -""" +"""Tests for the gridfs package.""" +from __future__ import annotations + import datetime +import itertools import threading import time +from io import BytesIO +from test import IntegrationTest, client_context, unittest +from test.utils import joinall, one, rs_client, rs_or_single_client, single_client import gridfs - from bson.binary import Binary +from bson.int64 import Int64 from bson.objectid import ObjectId -from bson.py3compat import StringIO, string_type -from gridfs.errors import NoFile, CorruptGridFile -from pymongo.errors import (ConfigurationError, - ConnectionFailure, - ServerSelectionTimeoutError) +from bson.son import SON +from gridfs.errors import CorruptGridFile, NoFile +from pymongo.errors import ( + ConfigurationError, + NotPrimaryError, + ServerSelectionTimeoutError, +) from pymongo.mongo_client import MongoClient from pymongo.read_preferences import ReadPreference -from test import (client_context, - unittest, - IntegrationTest) -from test.test_replica_set_client import TestReplicaSetClientBase -from test.utils import (ignore_deprecations, - joinall, - one, - rs_client, - rs_or_single_client, - single_client) class JustWrite(threading.Thread): - def __init__(self, gfs, num): threading.Thread.__init__(self) self.gfs = gfs self.num = num - self.setDaemon(True) + self.daemon = True def run(self): for _ in range(self.num): @@ -59,13 +54,12 @@ def run(self): class JustRead(threading.Thread): - def __init__(self, gfs, num, results): threading.Thread.__init__(self) self.gfs = gfs self.num = num self.results = results - self.setDaemon(True) + self.daemon = True def run(self): for _ in range(self.num): @@ -76,25 +70,23 @@ def run(self): class TestGridfs(IntegrationTest): + fs: gridfs.GridFSBucket + alt: gridfs.GridFSBucket @classmethod def setUpClass(cls): - super(TestGridfs, cls).setUpClass() + super().setUpClass() cls.fs = gridfs.GridFSBucket(cls.db) - cls.alt = gridfs.GridFSBucket( - cls.db, bucket_name="alt") + cls.alt = gridfs.GridFSBucket(cls.db, bucket_name="alt") def setUp(self): - self.db.drop_collection("fs.files") - self.db.drop_collection("fs.chunks") - self.db.drop_collection("alt.files") - self.db.drop_collection("alt.chunks") + self.cleanup_colls( + self.db.fs.files, self.db.fs.chunks, self.db.alt.files, self.db.alt.chunks + ) def test_basic(self): - oid = self.fs.upload_from_stream("test_filename", - b"hello world") - self.assertEqual(b"hello world", - self.fs.open_download_stream(oid).read()) + oid = self.fs.upload_from_stream("test_filename", b"hello world") + self.assertEqual(b"hello world", self.fs.open_download_stream(oid).read()) self.assertEqual(1, self.db.fs.files.count_documents({})) self.assertEqual(1, self.db.fs.chunks.count_documents({})) @@ -104,13 +96,10 @@ def test_basic(self): self.assertEqual(0, self.db.fs.chunks.count_documents({})) def test_multi_chunk_delete(self): - self.db.fs.drop() self.assertEqual(0, self.db.fs.files.count_documents({})) self.assertEqual(0, self.db.fs.chunks.count_documents({})) gfs = gridfs.GridFSBucket(self.db) - oid = gfs.upload_from_stream("test_filename", - b"hello", - chunk_size_bytes=1) + oid = gfs.upload_from_stream("test_filename", b"hello", chunk_size_bytes=1) self.assertEqual(1, self.db.fs.files.count_documents({})) self.assertEqual(5, self.db.fs.chunks.count_documents({})) gfs.delete(oid) @@ -118,24 +107,22 @@ def test_multi_chunk_delete(self): self.assertEqual(0, self.db.fs.chunks.count_documents({})) def test_empty_file(self): - oid = self.fs.upload_from_stream("test_filename", - b"") + oid = self.fs.upload_from_stream("test_filename", b"") self.assertEqual(b"", self.fs.open_download_stream(oid).read()) self.assertEqual(1, self.db.fs.files.count_documents({})) self.assertEqual(0, self.db.fs.chunks.count_documents({})) raw = self.db.fs.files.find_one() + assert raw is not None self.assertEqual(0, raw["length"]) self.assertEqual(oid, raw["_id"]) self.assertTrue(isinstance(raw["uploadDate"], datetime.datetime)) self.assertEqual(255 * 1024, raw["chunkSize"]) - self.assertTrue(isinstance(raw["md5"], string_type)) + self.assertNotIn("md5", raw) def test_corrupt_chunk(self): - files_id = self.fs.upload_from_stream("test_filename", - b'foobar') - self.db.fs.chunks.update_one({'files_id': files_id}, - {'$set': {'data': Binary(b'foo', 0)}}) + files_id = self.fs.upload_from_stream("test_filename", b"foobar") + self.db.fs.chunks.update_one({"files_id": files_id}, {"$set": {"data": Binary(b"foo", 0)}}) try: out = self.fs.open_download_stream(files_id) self.assertRaises(CorruptGridFile, out.read) @@ -146,26 +133,52 @@ def test_corrupt_chunk(self): self.fs.delete(files_id) def test_upload_ensures_index(self): - # setUp has dropped collections. - names = self.db.list_collection_names() - self.assertFalse([name for name in names if name.startswith('fs')]) - chunks = self.db.fs.chunks files = self.db.fs.files + # Ensure the collections are removed. + chunks.drop() + files.drop() self.fs.upload_from_stream("filename", b"junk") - self.assertTrue(any( - info.get('key') == [('files_id', 1), ('n', 1)] - for info in chunks.index_information().values())) - self.assertTrue(any( - info.get('key') == [('filename', 1), ('uploadDate', 1)] - for info in files.index_information().values())) + self.assertTrue( + any( + info.get("key") == [("files_id", 1), ("n", 1)] + for info in chunks.index_information().values() + ) + ) + self.assertTrue( + any( + info.get("key") == [("filename", 1), ("uploadDate", 1)] + for info in files.index_information().values() + ) + ) + + def test_ensure_index_shell_compat(self): + files = self.db.fs.files + for i, j in itertools.combinations_with_replacement([1, 1.0, Int64(1)], 2): + # Create the index with different numeric types (as might be done + # from the mongo shell). + shell_index = [("filename", i), ("uploadDate", j)] + self.db.command( + "createIndexes", + files.name, + indexes=[{"key": SON(shell_index), "name": "filename_1.0_uploadDate_1.0"}], + ) + + # No error. + self.fs.upload_from_stream("filename", b"data") + + self.assertTrue( + any( + info.get("key") == [("filename", 1), ("uploadDate", 1)] + for info in files.index_information().values() + ) + ) + files.drop() def test_alt_collection(self): - oid = self.alt.upload_from_stream("test_filename", - b"hello world") - self.assertEqual(b"hello world", - self.alt.open_download_stream(oid).read()) + oid = self.alt.upload_from_stream("test_filename", b"hello world") + self.assertEqual(b"hello world", self.alt.open_download_stream(oid).read()) self.assertEqual(1, self.db.alt.files.count_documents({})) self.assertEqual(1, self.db.alt.chunks.count_documents({})) @@ -175,34 +188,30 @@ def test_alt_collection(self): self.assertEqual(0, self.db.alt.chunks.count_documents({})) self.assertRaises(NoFile, self.alt.open_download_stream, "foo") - self.alt.upload_from_stream("foo", - b"hello world") - self.assertEqual(b"hello world", - self.alt.open_download_stream_by_name("foo").read()) + self.alt.upload_from_stream("foo", b"hello world") + self.assertEqual(b"hello world", self.alt.open_download_stream_by_name("foo").read()) self.alt.upload_from_stream("mike", b"") self.alt.upload_from_stream("test", b"foo") self.alt.upload_from_stream("hello world", b"") - self.assertEqual(set(["mike", "test", "hello world", "foo"]), - set(k["filename"] for k in list( - self.db.alt.files.find()))) + self.assertEqual( + {"mike", "test", "hello world", "foo"}, + {k["filename"] for k in list(self.db.alt.files.find())}, + ) def test_threaded_reads(self): self.fs.upload_from_stream("test", b"hello") threads = [] - results = [] + results: list = [] for i in range(10): threads.append(JustRead(self.fs, 10, results)) threads[i].start() joinall(threads) - self.assertEqual( - 100 * [b'hello'], - results - ) + self.assertEqual(100 * [b"hello"], results) def test_threaded_writes(self): threads = [] @@ -216,10 +225,7 @@ def test_threaded_writes(self): self.assertEqual(fstr.read(), b"hello") # Should have created 100 versions of 'test' file - self.assertEqual( - 100, - self.db.fs.files.count_documents({'filename': 'test'}) - ) + self.assertEqual(100, self.db.fs.files.count_documents({"filename": "test"})) def test_get_last_version(self): one = self.fs.upload_from_stream("test", b"foo") @@ -231,17 +237,13 @@ def test_get_last_version(self): two = two._id three = self.fs.upload_from_stream("test", b"baz") - self.assertEqual(b"baz", - self.fs.open_download_stream_by_name("test").read()) + self.assertEqual(b"baz", self.fs.open_download_stream_by_name("test").read()) self.fs.delete(three) - self.assertEqual(b"bar", - self.fs.open_download_stream_by_name("test").read()) + self.assertEqual(b"bar", self.fs.open_download_stream_by_name("test").read()) self.fs.delete(two) - self.assertEqual(b"foo", - self.fs.open_download_stream_by_name("test").read()) + self.assertEqual(b"foo", self.fs.open_download_stream_by_name("test").read()) self.fs.delete(one) - self.assertRaises(NoFile, - self.fs.open_download_stream_by_name, "test") + self.assertRaises(NoFile, self.fs.open_download_stream_by_name, "test") def test_get_version(self): self.fs.upload_from_stream("test", b"foo") @@ -251,61 +253,47 @@ def test_get_version(self): self.fs.upload_from_stream("test", b"baz") time.sleep(0.01) - self.assertEqual(b"foo", self.fs.open_download_stream_by_name( - "test", revision=0).read()) - self.assertEqual(b"bar", self.fs.open_download_stream_by_name( - "test", revision=1).read()) - self.assertEqual(b"baz", self.fs.open_download_stream_by_name( - "test", revision=2).read()) - - self.assertEqual(b"baz", self.fs.open_download_stream_by_name( - "test", revision=-1).read()) - self.assertEqual(b"bar", self.fs.open_download_stream_by_name( - "test", revision=-2).read()) - self.assertEqual(b"foo", self.fs.open_download_stream_by_name( - "test", revision=-3).read()) - - self.assertRaises(NoFile, self.fs.open_download_stream_by_name, - "test", revision=3) - self.assertRaises(NoFile, self.fs.open_download_stream_by_name, - "test", revision=-4) + self.assertEqual(b"foo", self.fs.open_download_stream_by_name("test", revision=0).read()) + self.assertEqual(b"bar", self.fs.open_download_stream_by_name("test", revision=1).read()) + self.assertEqual(b"baz", self.fs.open_download_stream_by_name("test", revision=2).read()) + + self.assertEqual(b"baz", self.fs.open_download_stream_by_name("test", revision=-1).read()) + self.assertEqual(b"bar", self.fs.open_download_stream_by_name("test", revision=-2).read()) + self.assertEqual(b"foo", self.fs.open_download_stream_by_name("test", revision=-3).read()) + + self.assertRaises(NoFile, self.fs.open_download_stream_by_name, "test", revision=3) + self.assertRaises(NoFile, self.fs.open_download_stream_by_name, "test", revision=-4) def test_upload_from_stream(self): - oid = self.fs.upload_from_stream("test_file", - StringIO(b"hello world"), - chunk_size_bytes=1) + oid = self.fs.upload_from_stream("test_file", BytesIO(b"hello world"), chunk_size_bytes=1) self.assertEqual(11, self.db.fs.chunks.count_documents({})) - self.assertEqual(b"hello world", - self.fs.open_download_stream(oid).read()) + self.assertEqual(b"hello world", self.fs.open_download_stream(oid).read()) def test_upload_from_stream_with_id(self): oid = ObjectId() - self.fs.upload_from_stream_with_id(oid, - "test_file_custom_id", - StringIO(b"custom id"), - chunk_size_bytes=1) - self.assertEqual(b"custom id", - self.fs.open_download_stream(oid).read()) + self.fs.upload_from_stream_with_id( + oid, "test_file_custom_id", BytesIO(b"custom id"), chunk_size_bytes=1 + ) + self.assertEqual(b"custom id", self.fs.open_download_stream(oid).read()) def test_open_upload_stream(self): gin = self.fs.open_upload_stream("from_stream") gin.write(b"from stream") gin.close() - self.assertEqual(b"from stream", - self.fs.open_download_stream(gin._id).read()) + self.assertEqual(b"from stream", self.fs.open_download_stream(gin._id).read()) def test_open_upload_stream_with_id(self): oid = ObjectId() gin = self.fs.open_upload_stream_with_id(oid, "from_stream_custom_id") gin.write(b"from stream with custom id") gin.close() - self.assertEqual(b"from stream with custom id", - self.fs.open_download_stream(oid).read()) + self.assertEqual(b"from stream with custom id", self.fs.open_download_stream(oid).read()) def test_missing_length_iter(self): # Test fix that guards against PHP-237 self.fs.upload_from_stream("empty", b"") doc = self.db.fs.files.find_one({"filename": "empty"}) + assert doc is not None doc.pop("length") self.db.fs.files.replace_one({"_id": doc["_id"]}, doc) fstr = self.fs.open_download_stream_by_name("empty") @@ -318,18 +306,16 @@ def iterate_file(grid_file): self.assertTrue(iterate_file(fstr)) def test_gridfs_lazy_connect(self): - client = MongoClient('badhost', connect=False, - serverSelectionTimeoutMS=0) + client = MongoClient("badhost", connect=False, serverSelectionTimeoutMS=0) cdb = client.db gfs = gridfs.GridFSBucket(cdb) self.assertRaises(ServerSelectionTimeoutError, gfs.delete, 0) gfs = gridfs.GridFSBucket(cdb) self.assertRaises( - ServerSelectionTimeoutError, - gfs.upload_from_stream, "test", b"") # Still no connection. + ServerSelectionTimeoutError, gfs.upload_from_stream, "test", b"" + ) # Still no connection. - @ignore_deprecations def test_gridfs_find(self): self.fs.upload_from_stream("two", b"test2") time.sleep(0.01) @@ -338,11 +324,12 @@ def test_gridfs_find(self): self.fs.upload_from_stream("one", b"test1") time.sleep(0.01) self.fs.upload_from_stream("two", b"test2++") - self.assertEqual(3, self.fs.find({"filename": "two"}).count()) - self.assertEqual(4, self.fs.find({}).count()) + files = self.db.fs.files + self.assertEqual(3, files.count_documents({"filename": "two"})) + self.assertEqual(4, files.count_documents({})) cursor = self.fs.find( - {}, no_cursor_timeout=False, sort=[("uploadDate", -1)], - skip=1, limit=2) + {}, no_cursor_timeout=False, sort=[("uploadDate", -1)], skip=1, limit=2 + ) gout = next(cursor) self.assertEqual(b"test1", gout.read()) cursor.rewind() @@ -356,13 +343,11 @@ def test_gridfs_find(self): def test_grid_in_non_int_chunksize(self): # Lua, and perhaps other buggy GridFS clients, store size as a float. - data = b'data' - self.fs.upload_from_stream('f', data) - self.db.fs.files.update_one({'filename': 'f'}, - {'$set': {'chunkSize': 100.0}}) + data = b"data" + self.fs.upload_from_stream("f", data) + self.db.fs.files.update_one({"filename": "f"}, {"$set": {"chunkSize": 100.0}}) - self.assertEqual(data, - self.fs.open_download_stream_by_name('f').read()) + self.assertEqual(data, self.fs.open_download_stream_by_name("f").read()) def test_unacknowledged(self): # w=0 is prohibited. @@ -370,36 +355,30 @@ def test_unacknowledged(self): gridfs.GridFSBucket(rs_or_single_client(w=0).pymongo_test) def test_rename(self): - _id = self.fs.upload_from_stream("first_name", b'testing') - self.assertEqual(b'testing', self.fs.open_download_stream_by_name( - "first_name").read()) + _id = self.fs.upload_from_stream("first_name", b"testing") + self.assertEqual(b"testing", self.fs.open_download_stream_by_name("first_name").read()) self.fs.rename(_id, "second_name") - self.assertRaises(NoFile, self.fs.open_download_stream_by_name, - "first_name") - self.assertEqual(b"testing", self.fs.open_download_stream_by_name( - "second_name").read()) + self.assertRaises(NoFile, self.fs.open_download_stream_by_name, "first_name") + self.assertEqual(b"testing", self.fs.open_download_stream_by_name("second_name").read()) def test_abort(self): - gin = self.fs.open_upload_stream("test_filename", - chunk_size_bytes=5) + gin = self.fs.open_upload_stream("test_filename", chunk_size_bytes=5) gin.write(b"test1") gin.write(b"test2") gin.write(b"test3") - self.assertEqual(3, self.db.fs.chunks.count_documents( - {"files_id": gin._id})) + self.assertEqual(3, self.db.fs.chunks.count_documents({"files_id": gin._id})) gin.abort() self.assertTrue(gin.closed) self.assertRaises(ValueError, gin.write, b"test4") - self.assertEqual(0, self.db.fs.chunks.count_documents( - {"files_id": gin._id})) + self.assertEqual(0, self.db.fs.chunks.count_documents({"files_id": gin._id})) def test_download_to_stream(self): - file1 = StringIO(b"hello world") + file1 = BytesIO(b"hello world") # Test with one chunk. oid = self.fs.upload_from_stream("one_chunk", file1) self.assertEqual(1, self.db.fs.chunks.count_documents({})) - file2 = StringIO() + file2 = BytesIO() self.fs.download_to_stream(oid, file2) file1.seek(0) file2.seek(0) @@ -409,22 +388,20 @@ def test_download_to_stream(self): self.db.drop_collection("fs.files") self.db.drop_collection("fs.chunks") file1.seek(0) - oid = self.fs.upload_from_stream("many_chunks", - file1, - chunk_size_bytes=1) + oid = self.fs.upload_from_stream("many_chunks", file1, chunk_size_bytes=1) self.assertEqual(11, self.db.fs.chunks.count_documents({})) - file2 = StringIO() + file2 = BytesIO() self.fs.download_to_stream(oid, file2) file1.seek(0) file2.seek(0) self.assertEqual(file1.read(), file2.read()) def test_download_to_stream_by_name(self): - file1 = StringIO(b"hello world") + file1 = BytesIO(b"hello world") # Test with one chunk. - oid = self.fs.upload_from_stream("one_chunk", file1) + _ = self.fs.upload_from_stream("one_chunk", file1) self.assertEqual(1, self.db.fs.chunks.count_documents({})) - file2 = StringIO() + file2 = BytesIO() self.fs.download_to_stream_by_name("one_chunk", file2) file1.seek(0) file2.seek(0) @@ -437,109 +414,75 @@ def test_download_to_stream_by_name(self): self.fs.upload_from_stream("many_chunks", file1, chunk_size_bytes=1) self.assertEqual(11, self.db.fs.chunks.count_documents({})) - file2 = StringIO() + file2 = BytesIO() self.fs.download_to_stream_by_name("many_chunks", file2) file1.seek(0) file2.seek(0) self.assertEqual(file1.read(), file2.read()) def test_md5(self): - gin = self.fs.open_upload_stream("has md5") - gin.write(b"includes md5 sum") - gin.close() - self.assertIsNotNone(gin.md5) - md5sum = gin.md5 - - gout = self.fs.open_download_stream(gin._id) - self.assertIsNotNone(gout.md5) - self.assertEqual(md5sum, gout.md5) - - gin = self.fs.open_upload_stream_with_id(ObjectId(), "also has md5") - gin.write(b"also includes md5 sum") - gin.close() - self.assertIsNotNone(gin.md5) - md5sum = gin.md5 - - gout = self.fs.open_download_stream(gin._id) - self.assertIsNotNone(gout.md5) - self.assertEqual(md5sum, gout.md5) - - fs = gridfs.GridFSBucket(self.db, disable_md5=True) - gin = fs.open_upload_stream("no md5") + gin = self.fs.open_upload_stream("no md5") gin.write(b"no md5 sum") gin.close() self.assertIsNone(gin.md5) - gout = fs.open_download_stream(gin._id) + gout = self.fs.open_download_stream(gin._id) self.assertIsNone(gout.md5) - gin = fs.open_upload_stream_with_id(ObjectId(), "also no md5") + gin = self.fs.open_upload_stream_with_id(ObjectId(), "also no md5") gin.write(b"also no md5 sum") gin.close() self.assertIsNone(gin.md5) - gout = fs.open_download_stream(gin._id) + gout = self.fs.open_download_stream(gin._id) self.assertIsNone(gout.md5) -class TestGridfsBucketReplicaSet(TestReplicaSetClientBase): - +class TestGridfsBucketReplicaSet(IntegrationTest): @classmethod @client_context.require_secondaries_count(1) def setUpClass(cls): - super(TestGridfsBucketReplicaSet, cls).setUpClass() + super().setUpClass() @classmethod def tearDownClass(cls): - client_context.client.drop_database('gfsbucketreplica') + client_context.client.drop_database("gfsbucketreplica") def test_gridfs_replica_set(self): - rsc = rs_client( - w=self.w, - read_preference=ReadPreference.SECONDARY) + rsc = rs_client(w=client_context.w, read_preference=ReadPreference.SECONDARY) - gfs = gridfs.GridFSBucket(rsc.gfsbucketreplica, 'gfsbucketreplicatest') - oid = gfs.upload_from_stream("test_filename", b'foo') + gfs = gridfs.GridFSBucket(rsc.gfsbucketreplica, "gfsbucketreplicatest") + oid = gfs.upload_from_stream("test_filename", b"foo") content = gfs.open_download_stream(oid).read() - self.assertEqual(b'foo', content) + self.assertEqual(b"foo", content) def test_gridfs_secondary(self): - primary_host, primary_port = self.primary - primary_connection = single_client(primary_host, primary_port) - - secondary_host, secondary_port = one(self.secondaries) + secondary_host, secondary_port = one(self.client.secondaries) secondary_connection = single_client( - secondary_host, secondary_port, - read_preference=ReadPreference.SECONDARY) + secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY + ) # Should detect it's connected to secondary and not attempt to # create index - gfs = gridfs.GridFSBucket( - secondary_connection.gfsbucketreplica, 'gfsbucketsecondarytest') + gfs = gridfs.GridFSBucket(secondary_connection.gfsbucketreplica, "gfsbucketsecondarytest") # This won't detect secondary, raises error - self.assertRaises(ConnectionFailure, gfs.upload_from_stream, - "test_filename", b'foo') + self.assertRaises(NotPrimaryError, gfs.upload_from_stream, "test_filename", b"foo") def test_gridfs_secondary_lazy(self): # Should detect it's connected to secondary and not attempt to # create index. - secondary_host, secondary_port = one(self.secondaries) + secondary_host, secondary_port = one(self.client.secondaries) client = single_client( - secondary_host, - secondary_port, - read_preference=ReadPreference.SECONDARY, - connect=False) + secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY, connect=False + ) # Still no connection. - gfs = gridfs.GridFSBucket( - client.gfsbucketreplica, 'gfsbucketsecondarylazytest') + gfs = gridfs.GridFSBucket(client.gfsbucketreplica, "gfsbucketsecondarylazytest") # Connects, doesn't create index. - self.assertRaises(NoFile, gfs.open_download_stream_by_name, - "test_filename") - self.assertRaises(ConnectionFailure, gfs.upload_from_stream, - "test_filename", b'data') + self.assertRaises(NoFile, gfs.open_download_stream_by_name, "test_filename") + self.assertRaises(NotPrimaryError, gfs.upload_from_stream, "test_filename", b"data") if __name__ == "__main__": diff --git a/test/test_gridfs_spec.py b/test/test_gridfs_spec.py index bda6f52816..6840b6ae0c 100644 --- a/test/test_gridfs_spec.py +++ b/test/test_gridfs_spec.py @@ -1,4 +1,4 @@ -# Copyright 2015 MongoDB, Inc. +# Copyright 2015-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,222 +12,22 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Test GridFSBucket class.""" -import copy -import datetime +"""Test the GridFS unified spec tests.""" +from __future__ import annotations + import os import sys -import re - -from json import loads - -import gridfs sys.path[0:0] = [""] -from bson import Binary -from bson.int64 import Int64 -from bson.json_util import object_hook -from bson.py3compat import bytes_from_hex -from gridfs.errors import NoFile, CorruptGridFile -from test import (unittest, - IntegrationTest) - -# Commands. -_COMMANDS = {"delete": lambda coll, doc: [coll.delete_many(d["q"]) - for d in doc['deletes']], - "insert": lambda coll, doc: coll.insert_many(doc['documents']), - "update": lambda coll, doc: [coll.update_many(u["q"], u["u"]) - for u in doc['updates']] - } +from test import unittest +from test.unified_format import generate_test_classes # Location of JSON test specifications. -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), - 'gridfs') - - -def camel_to_snake(camel): - # Regex to convert CamelCase to snake_case. Special case for _id. - if camel == "id": - return "file_id" - snake = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camel) - return re.sub('([a-z0-9])([A-Z])', r'\1_\2', snake).lower() - - -class TestAllScenarios(IntegrationTest): - @classmethod - def setUpClass(cls): - super(TestAllScenarios, cls).setUpClass() - cls.fs = gridfs.GridFSBucket(cls.db) - cls.str_to_cmd = { - "upload": cls.fs.upload_from_stream, - "download": cls.fs.open_download_stream, - "delete": cls.fs.delete, - "download_by_name": cls.fs.open_download_stream_by_name} - - def init_db(self, data, test): - self.db.drop_collection("fs.files") - self.db.drop_collection("fs.chunks") - self.db.drop_collection("expected.files") - self.db.drop_collection("expected.chunks") - - # Read in data. - if data['files']: - self.db.fs.files.insert_many(data['files']) - self.db.expected.files.insert_many(data['files']) - if data['chunks']: - self.db.fs.chunks.insert_many(data['chunks']) - self.db.expected.chunks.insert_many(data['chunks']) - - # Make initial modifications. - if "arrange" in test: - for cmd in test['arrange'].get('data', []): - for key in cmd.keys(): - if key in _COMMANDS: - coll = self.db.get_collection(cmd[key]) - _COMMANDS[key](coll, cmd) - - def init_expected_db(self, test, result): - # Modify outcome DB. - for cmd in test['assert'].get('data', []): - for key in cmd.keys(): - if key in _COMMANDS: - # Replace wildcards in inserts. - for doc in cmd.get('documents', []): - keylist = doc.keys() - for dockey in copy.deepcopy(list(keylist)): - if "result" in str(doc[dockey]): - doc[dockey] = result - if "actual" in str(doc[dockey]): # Avoid duplicate - doc.pop(dockey) - # Move contentType to metadata. - if dockey == "contentType": - doc["metadata"] = {dockey: doc.pop(dockey)} - coll = self.db.get_collection(cmd[key]) - _COMMANDS[key](coll, cmd) - - if test['assert'].get('result') == "&result": - test['assert']['result'] = result - - def sorted_list(self, coll, ignore_id): - to_sort = [] - for doc in coll.find(): - docstr = "{" - if ignore_id: # Cannot compare _id in chunks collection. - doc.pop("_id") - for k in sorted(doc.keys()): - if k == "uploadDate": # Can't compare datetime. - self.assertTrue(isinstance(doc[k], datetime.datetime)) - else: - docstr += "%s:%s " % (k, repr(doc[k])) - to_sort.append(docstr + "}") - return to_sort - - -def create_test(scenario_def): - def run_scenario(self): - - # Run tests. - self.assertTrue(scenario_def['tests'], "tests cannot be empty") - for test in scenario_def['tests']: - self.init_db(scenario_def['data'], test) - - # Run GridFs Operation. - operation = self.str_to_cmd[test['act']['operation']] - args = test['act']['arguments'] - extra_opts = args.pop("options", {}) - if "contentType" in extra_opts: - extra_opts["metadata"] = { - "contentType": extra_opts.pop("contentType")} - - args.update(extra_opts) - - converted_args = dict((camel_to_snake(c), v) - for c, v in args.items()) - - expect_error = test['assert'].get("error", False) - result = None - error = None - try: - result = operation(**converted_args) - - if 'download' in test['act']['operation']: - result = Binary(result.read()) - except Exception as exc: - if not expect_error: - raise - error = exc - - self.init_expected_db(test, result) - - # Asserts. - errors = {"FileNotFound": NoFile, - "ChunkIsMissing": CorruptGridFile, - "ExtraChunk": CorruptGridFile, - "ChunkIsWrongSize": CorruptGridFile, - "RevisionNotFound": NoFile} - - if expect_error: - self.assertIsNotNone(error) - self.assertIsInstance(error, errors[test['assert']['error']], - test['description']) - else: - self.assertIsNone(error) - - if 'result' in test['assert']: - if test['assert']['result'] == 'void': - test['assert']['result'] = None - self.assertEqual(result, test['assert'].get('result')) - - if 'data' in test['assert']: - # Create alphabetized list - self.assertEqual( - set(self.sorted_list(self.db.fs.chunks, True)), - set(self.sorted_list(self.db.expected.chunks, True))) - - self.assertEqual( - set(self.sorted_list(self.db.fs.files, False)), - set(self.sorted_list(self.db.expected.files, False))) - - return run_scenario - -def _object_hook(dct): - if 'length' in dct: - dct['length'] = Int64(dct['length']) - return object_hook(dct) - -def create_tests(): - for dirpath, _, filenames in os.walk(_TEST_PATH): - for filename in filenames: - with open(os.path.join(dirpath, filename)) as scenario_stream: - scenario_def = loads( - scenario_stream.read(), object_hook=_object_hook) - - # Because object_hook is already defined by bson.json_util, - # and everything is named 'data' - def str2hex(jsn): - for key, val in jsn.items(): - if key in ("data", "source", "result"): - if "$hex" in val: - jsn[key] = Binary(bytes_from_hex(val['$hex'])) - if isinstance(jsn[key], dict): - str2hex(jsn[key]) - if isinstance(jsn[key], list): - for k in jsn[key]: - str2hex(k) - - str2hex(scenario_def) - - # Construct test from scenario. - new_test = create_test(scenario_def) - test_name = 'test_%s' % ( - os.path.splitext(filename)[0]) - new_test.__name__ = test_name - setattr(TestAllScenarios, new_test.__name__, new_test) - +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "gridfs") -create_tests() +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) if __name__ == "__main__": unittest.main() diff --git a/test/test_heartbeat_monitoring.py b/test/test_heartbeat_monitoring.py index 61a0afc15c..5c75ab01df 100644 --- a/test/test_heartbeat_monitoring.py +++ b/test/test_heartbeat_monitoring.py @@ -13,56 +13,50 @@ # limitations under the License. """Test the monitoring of the server heartbeats.""" +from __future__ import annotations import sys -import threading sys.path[0:0] = [""] +from test import IntegrationTest, client_knobs, unittest +from test.utils import HeartbeatEventListener, MockPool, single_client, wait_until + from pymongo.errors import ConnectionFailure -from pymongo.ismaster import IsMaster +from pymongo.hello import Hello, HelloCompat from pymongo.monitor import Monitor -from test import unittest, client_knobs -from test.utils import (HeartbeatEventListener, MockPool, single_client, - wait_until) - -class TestHeartbeatMonitoring(unittest.TestCase): +class TestHeartbeatMonitoring(IntegrationTest): def create_mock_monitor(self, responses, uri, expected_results): listener = HeartbeatEventListener() - with client_knobs(heartbeat_frequency=0.1, - min_heartbeat_interval=0.1, - events_queue_frequency=0.1): + with client_knobs( + heartbeat_frequency=0.1, min_heartbeat_interval=0.1, events_queue_frequency=0.1 + ): + class MockMonitor(Monitor): def _check_with_socket(self, *args, **kwargs): if isinstance(responses[1], Exception): raise responses[1] - return IsMaster(responses[1]), 99 + return Hello(responses[1]), 99 m = single_client( - h=uri, - event_listeners=(listener,), - _monitor_class=MockMonitor, - _pool_class=MockPool) + h=uri, event_listeners=(listener,), _monitor_class=MockMonitor, _pool_class=MockPool + ) expected_len = len(expected_results) # Wait for *at least* expected_len number of results. The # monitor thread may run multiple times during the execution # of this test. - wait_until( - lambda: len(listener.results) >= expected_len, - "publish all events") + wait_until(lambda: len(listener.events) >= expected_len, "publish all events") try: # zip gives us len(expected_results) pairs. - for expected, actual in zip(expected_results, listener.results): - self.assertEqual(expected, - actual.__class__.__name__) - self.assertEqual(actual.connection_id, - responses[0]) - if expected != 'ServerHeartbeatStartedEvent': - if isinstance(actual.reply, IsMaster): + for expected, actual in zip(expected_results, listener.events): + self.assertEqual(expected, actual.__class__.__name__) + self.assertEqual(actual.connection_id, responses[0]) + if expected != "ServerHeartbeatStartedEvent": + if isinstance(actual.reply, Hello): self.assertEqual(actual.duration, 99) self.assertEqual(actual.reply._doc, responses[1]) else: @@ -72,28 +66,25 @@ def _check_with_socket(self, *args, **kwargs): m.close() def test_standalone(self): - responses = (('a', 27017), - { - "ismaster": True, - "maxWireVersion": 4, - "minWireVersion": 0, - "ok": 1 - }) + responses = ( + ("a", 27017), + {HelloCompat.LEGACY_CMD: True, "maxWireVersion": 4, "minWireVersion": 0, "ok": 1}, + ) uri = "mongodb://a:27017" - expected_results = ['ServerHeartbeatStartedEvent', - 'ServerHeartbeatSucceededEvent'] + expected_results = ["ServerHeartbeatStartedEvent", "ServerHeartbeatSucceededEvent"] self.create_mock_monitor(responses, uri, expected_results) def test_standalone_error(self): - responses = (('a', 27017), - ConnectionFailure("SPECIAL MESSAGE")) + responses = (("a", 27017), ConnectionFailure("SPECIAL MESSAGE")) uri = "mongodb://a:27017" # _check_with_socket failing results in a second attempt. - expected_results = ['ServerHeartbeatStartedEvent', - 'ServerHeartbeatFailedEvent', - 'ServerHeartbeatStartedEvent', - 'ServerHeartbeatFailedEvent'] + expected_results = [ + "ServerHeartbeatStartedEvent", + "ServerHeartbeatFailedEvent", + "ServerHeartbeatStartedEvent", + "ServerHeartbeatFailedEvent", + ] self.create_mock_monitor(responses, uri, expected_results) diff --git a/test/test_index_management.py b/test/test_index_management.py new file mode 100644 index 0000000000..9db9a22aea --- /dev/null +++ b/test/test_index_management.py @@ -0,0 +1,240 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the auth spec tests.""" +from __future__ import annotations + +import os +import sys +import time +import uuid +from typing import Any, Mapping + +sys.path[0:0] = [""] + +from test import IntegrationTest, unittest +from test.unified_format import generate_test_classes + +from pymongo import MongoClient +from pymongo.errors import OperationFailure +from pymongo.operations import SearchIndexModel + +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "index_management") + +_NAME = "test-search-index" + + +class TestCreateSearchIndex(IntegrationTest): + def test_inputs(self): + if not os.environ.get("TEST_INDEX_MANAGEMENT"): + raise unittest.SkipTest("Skipping index management tests") + client = MongoClient() + self.addCleanup(client.close) + coll = client.test.test + coll.drop() + definition = dict(mappings=dict(dynamic=True)) + model_kwarg_list: list[Mapping[str, Any]] = [ + dict(definition=definition, name=None), + dict(definition=definition, name="test"), + ] + for model_kwargs in model_kwarg_list: + model = SearchIndexModel(**model_kwargs) + with self.assertRaises(OperationFailure): + coll.create_search_index(model) + with self.assertRaises(OperationFailure): + coll.create_search_index(model_kwargs) + + +class TestSearchIndexProse(unittest.TestCase): + @classmethod + def setUpClass(cls) -> None: + super().setUpClass() + if not os.environ.get("TEST_INDEX_MANAGEMENT"): + raise unittest.SkipTest("Skipping index management tests") + url = os.environ.get("MONGODB_URI") + username = os.environ["DB_USER"] + password = os.environ["DB_PASSWORD"] + cls.client = MongoClient(url, username=username, password=password) + cls.client.drop_database(_NAME) + cls.db = cls.client.test_search_index_prose + + @classmethod + def tearDownClass(cls): + cls.client.drop_database(_NAME) + cls.client.close() + + def wait_for_ready(self, coll, name=_NAME, predicate=None): + """Wait for a search index to be ready.""" + indices: list[Mapping[str, Any]] = [] + if predicate is None: + predicate = lambda index: index.get("queryable") is True + + while True: + indices = list(coll.list_search_indexes(name)) + if len(indices) and predicate(indices[0]): + return indices[0] + break + time.sleep(5) + + def test_case_1(self): + """Driver can successfully create and list search indexes.""" + + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + + # Create a new search index on ``coll0`` with the ``createSearchIndex`` helper. Use the following definition: + model = {"name": _NAME, "definition": {"mappings": {"dynamic": False}}} + coll0.insert_one({}) + resp = coll0.create_search_index(model) + + # Assert that the command returns the name of the index: ``"test-search-index"``. + self.assertEqual(resp, _NAME) + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied and store the value in a variable ``index``: + # An index with the ``name`` of ``test-search-index`` is present and the index has a field ``queryable`` with a value of ``true``. + index = self.wait_for_ready(coll0) + + # . Assert that ``index`` has a property ``latestDefinition`` whose value is ``{ 'mappings': { 'dynamic': false } }`` + self.assertIn("latestDefinition", index) + self.assertEqual(index["latestDefinition"], model["definition"]) + + def test_case_2(self): + """Driver can successfully create multiple indexes in batch.""" + + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + coll0.insert_one({}) + + # Create two new search indexes on ``coll0`` with the ``createSearchIndexes`` helper. + name1 = "test-search-index-1" + name2 = "test-search-index-2" + definition = {"mappings": {"dynamic": False}} + index_definitions: list[dict[str, Any]] = [ + {"name": name1, "definition": definition}, + {"name": name2, "definition": definition}, + ] + coll0.create_search_indexes( + [SearchIndexModel(i["definition"], i["name"]) for i in index_definitions] + ) + + # .Assert that the command returns an array containing the new indexes' names: ``["test-search-index-1", "test-search-index-2"]``. + indices = list(coll0.list_search_indexes()) + names = [i["name"] for i in indices] + self.assertIn(name1, names) + self.assertIn(name2, names) + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied. + # An index with the ``name`` of ``test-search-index-1`` is present and index has a field ``queryable`` with the value of ``true``. Store result in ``index1``. + # An index with the ``name`` of ``test-search-index-2`` is present and index has a field ``queryable`` with the value of ``true``. Store result in ``index2``. + index1 = self.wait_for_ready(coll0, name1) + index2 = self.wait_for_ready(coll0, name2) + + # Assert that ``index1`` and ``index2`` have the property ``latestDefinition`` whose value is ``{ "mappings" : { "dynamic" : false } }`` + for index in [index1, index2]: + self.assertIn("latestDefinition", index) + self.assertEqual(index["latestDefinition"], definition) + + def test_case_3(self): + """Driver can successfully drop search indexes.""" + + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + coll0.insert_one({}) + + # Create a new search index on ``coll0``. + model = {"name": _NAME, "definition": {"mappings": {"dynamic": False}}} + resp = coll0.create_search_index(model) + + # Assert that the command returns the name of the index: ``"test-search-index"``. + self.assertEqual(resp, "test-search-index") + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied: + # An index with the ``name`` of ``test-search-index`` is present and index has a field ``queryable`` with the value of ``true``. + self.wait_for_ready(coll0) + + # Run a ``dropSearchIndex`` on ``coll0``, using ``test-search-index`` for the name. + coll0.drop_search_index(_NAME) + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until ``listSearchIndexes`` returns an empty array. + t0 = time.time() + while True: + indices = list(coll0.list_search_indexes()) + if indices: + break + if (time.time() - t0) / 60 > 5: + raise TimeoutError("Timed out waiting for index deletion") + time.sleep(5) + + def test_case_4(self): + """Driver can update a search index.""" + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + coll0.insert_one({}) + + # Create a new search index on ``coll0``. + model = {"name": _NAME, "definition": {"mappings": {"dynamic": False}}} + resp = coll0.create_search_index(model) + + # Assert that the command returns the name of the index: ``"test-search-index"``. + self.assertEqual(resp, _NAME) + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied: + # An index with the ``name`` of ``test-search-index`` is present and index has a field ``queryable`` with the value of ``true``. + self.wait_for_ready(coll0) + + # Run a ``updateSearchIndex`` on ``coll0``. + # Assert that the command does not error and the server responds with a success. + model2: dict[str, Any] = {"name": _NAME, "definition": {"mappings": {"dynamic": True}}} + coll0.update_search_index(_NAME, model2["definition"]) + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied: + # An index with the ``name`` of ``test-search-index`` is present. This index is referred to as ``index``. + # The index has a field ``queryable`` with a value of ``true`` and has a field ``status`` with the value of ``READY``. + predicate = lambda index: index.get("queryable") is True and index.get("status") == "READY" + self.wait_for_ready(coll0, predicate=predicate) + + # Assert that an index is present with the name ``test-search-index`` and the definition has a property ``latestDefinition`` whose value is ``{ 'mappings': { 'dynamic': true } }``. + index = list(coll0.list_search_indexes(_NAME))[0] + self.assertIn("latestDefinition", index) + self.assertEqual(index["latestDefinition"], model2["definition"]) + + def test_case_5(self): + """``dropSearchIndex`` suppresses namespace not found errors.""" + # Create a driver-side collection object for a randomly generated collection name. Do not create this collection on the server. + coll0 = self.db[f"col{uuid.uuid4()}"] + + # Run a ``dropSearchIndex`` command and assert that no error is thrown. + coll0.drop_search_index("foo") + + +if os.environ.get("TEST_INDEX_MANAGEMENT"): + globals().update( + generate_test_classes( + _TEST_PATH, + module=__name__, + ) + ) +else: + + class TestIndexManagementUnifiedTests(unittest.TestCase): + @classmethod + def setUpClass(cls) -> None: + raise unittest.SkipTest("Skipping index management pending PYTHON-3951") + + def test_placeholder(self): + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_json_util.py b/test/test_json_util.py index 75b177e442..a35d736ebd 100644 --- a/test/test_json_util.py +++ b/test/test_json_util.py @@ -13,26 +13,40 @@ # limitations under the License. """Test some utilities for working with JSON and PyMongo.""" +from __future__ import annotations import datetime import json import re import sys import uuid +from typing import Any, List, MutableMapping -sys.path[0:0] = [""] +from bson.codec_options import CodecOptions, DatetimeConversion -from pymongo.errors import ConfigurationError +sys.path[0:0] = [""] -from bson import json_util, EPOCH_AWARE, EPOCH_NAIVE, SON -from bson.json_util import (DatetimeRepresentation, - STRICT_JSON_OPTIONS) -from bson.binary import (ALL_UUID_REPRESENTATIONS, Binary, MD5_SUBTYPE, - USER_DEFINED_SUBTYPE, JAVA_LEGACY, CSHARP_LEGACY, - STANDARD) +from test import IntegrationTest, unittest + +from bson import EPOCH_AWARE, EPOCH_NAIVE, SON, DatetimeMS, json_util +from bson.binary import ( + ALL_UUID_REPRESENTATIONS, + MD5_SUBTYPE, + STANDARD, + USER_DEFINED_SUBTYPE, + Binary, + UuidRepresentation, +) from bson.code import Code +from bson.datetime_ms import _max_datetime_ms from bson.dbref import DBRef from bson.int64 import Int64 +from bson.json_util import ( + LEGACY_JSON_OPTIONS, + DatetimeRepresentation, + JSONMode, + JSONOptions, +) from bson.max_key import MaxKey from bson.min_key import MinKey from bson.objectid import ObjectId @@ -40,9 +54,12 @@ from bson.timestamp import Timestamp from bson.tz_util import FixedOffset, utc -from test import unittest, IntegrationTest - -PY3 = sys.version_info[0] == 3 +STRICT_JSON_OPTIONS = JSONOptions( + strict_number_long=True, + datetime_representation=DatetimeRepresentation.ISO8601, + strict_uuid=True, + json_mode=JSONMode.LEGACY, +) class TestJsonUtil(unittest.TestCase): @@ -55,6 +72,35 @@ def round_trip(self, doc, **kwargs): def test_basic(self): self.round_trip({"hello": "world"}) + def test_loads_bytes(self): + string = b'{"hello": "world"}' + self.assertEqual(json_util.loads(bytes(string)), {"hello": "world"}) + self.assertEqual(json_util.loads(bytearray(string)), {"hello": "world"}) + + def test_json_options_with_options(self): + opts = JSONOptions( + datetime_representation=DatetimeRepresentation.NUMBERLONG, json_mode=JSONMode.LEGACY + ) + self.assertEqual(opts.datetime_representation, DatetimeRepresentation.NUMBERLONG) + opts2 = opts.with_options( + datetime_representation=DatetimeRepresentation.ISO8601, json_mode=JSONMode.LEGACY + ) + self.assertEqual(opts2.datetime_representation, DatetimeRepresentation.ISO8601) + + opts = JSONOptions(strict_number_long=True, json_mode=JSONMode.LEGACY) + self.assertEqual(opts.strict_number_long, True) + opts2 = opts.with_options(strict_number_long=False) + self.assertEqual(opts2.strict_number_long, False) + + opts = json_util.CANONICAL_JSON_OPTIONS + self.assertNotEqual(opts.uuid_representation, UuidRepresentation.JAVA_LEGACY) + opts2 = opts.with_options(uuid_representation=UuidRepresentation.JAVA_LEGACY) + self.assertEqual(opts2.uuid_representation, UuidRepresentation.JAVA_LEGACY) + self.assertEqual(opts2.document_class, dict) + opts3 = opts2.with_options(document_class=SON) + self.assertEqual(opts3.uuid_representation, UuidRepresentation.JAVA_LEGACY) + self.assertEqual(opts3.document_class, SON) + def test_objectid(self): self.round_trip({"id": ObjectId()}) @@ -66,137 +112,210 @@ def test_dbref(self): # Check order. self.assertEqual( '{"$ref": "collection", "$id": 1, "$db": "db"}', - json_util.dumps(DBRef('collection', 1, 'db'))) + json_util.dumps(DBRef("collection", 1, "db")), + ) def test_datetime(self): + tz_aware_opts = json_util.DEFAULT_JSON_OPTIONS.with_options(tz_aware=True) # only millis, not micros - self.round_trip({"date": datetime.datetime(2009, 12, 9, 15, - 49, 45, 191000, utc)}) - - jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000+0000"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000000+0000"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000+00:00"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000000+00:00"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000000+00"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000Z"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000000Z"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - jsn = '{"dt": { "$date" : "1970-01-01T00:00:00Z"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - # No explicit offset - jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - jsn = '{"dt": { "$date" : "1970-01-01T00:00:00"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000000"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - # Localtime behind UTC - jsn = '{"dt": { "$date" : "1969-12-31T16:00:00.000-0800"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - jsn = '{"dt": { "$date" : "1969-12-31T16:00:00.000000-0800"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - jsn = '{"dt": { "$date" : "1969-12-31T16:00:00.000-08:00"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - jsn = '{"dt": { "$date" : "1969-12-31T16:00:00.000000-08:00"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - jsn = '{"dt": { "$date" : "1969-12-31T16:00:00.000000-08"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - # Localtime ahead of UTC - jsn = '{"dt": { "$date" : "1970-01-01T01:00:00.000+0100"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - jsn = '{"dt": { "$date" : "1970-01-01T01:00:00.000000+0100"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - jsn = '{"dt": { "$date" : "1970-01-01T01:00:00.000+01:00"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - jsn = '{"dt": { "$date" : "1970-01-01T01:00:00.000000+01:00"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - jsn = '{"dt": { "$date" : "1970-01-01T01:00:00.000000+01"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) + self.round_trip( + {"date": datetime.datetime(2009, 12, 9, 15, 49, 45, 191000, utc)}, + json_options=tz_aware_opts, + ) + self.round_trip({"date": datetime.datetime(2009, 12, 9, 15, 49, 45, 191000)}) + + for jsn in [ + '{"dt": { "$date" : "1970-01-01T00:00:00.000+0000"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00.000000+0000"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00.000+00:00"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00.000000+00:00"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00.000000+00"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00.000Z"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00.000000Z"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00Z"}}', + '{"dt": {"$date": "1970-01-01T00:00:00.000"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00.000000"}}', + '{"dt": { "$date" : "1969-12-31T16:00:00.000-0800"}}', + '{"dt": { "$date" : "1969-12-31T16:00:00.000000-0800"}}', + '{"dt": { "$date" : "1969-12-31T16:00:00.000-08:00"}}', + '{"dt": { "$date" : "1969-12-31T16:00:00.000000-08:00"}}', + '{"dt": { "$date" : "1969-12-31T16:00:00.000000-08"}}', + '{"dt": { "$date" : "1970-01-01T01:00:00.000+0100"}}', + '{"dt": { "$date" : "1970-01-01T01:00:00.000000+0100"}}', + '{"dt": { "$date" : "1970-01-01T01:00:00.000+01:00"}}', + '{"dt": { "$date" : "1970-01-01T01:00:00.000000+01:00"}}', + '{"dt": { "$date" : "1970-01-01T01:00:00.000000+01"}}', + ]: + self.assertEqual(EPOCH_AWARE, json_util.loads(jsn, json_options=tz_aware_opts)["dt"]) + self.assertEqual(EPOCH_NAIVE, json_util.loads(jsn)["dt"]) dtm = datetime.datetime(1, 1, 1, 1, 1, 1, 0, utc) jsn = '{"dt": {"$date": -62135593139000}}' - self.assertEqual(dtm, json_util.loads(jsn)["dt"]) + self.assertEqual(dtm, json_util.loads(jsn, json_options=tz_aware_opts)["dt"]) jsn = '{"dt": {"$date": {"$numberLong": "-62135593139000"}}}' - self.assertEqual(dtm, json_util.loads(jsn)["dt"]) + self.assertEqual(dtm, json_util.loads(jsn, json_options=tz_aware_opts)["dt"]) # Test dumps format pre_epoch = {"dt": datetime.datetime(1, 1, 1, 1, 1, 1, 10000, utc)} post_epoch = {"dt": datetime.datetime(1972, 1, 1, 1, 1, 1, 10000, utc)} + self.assertEqual( + '{"dt": {"$date": {"$numberLong": "-62135593138990"}}}', json_util.dumps(pre_epoch) + ) + self.assertEqual( + '{"dt": {"$date": "1972-01-01T01:01:01.010Z"}}', json_util.dumps(post_epoch) + ) self.assertEqual( '{"dt": {"$date": -62135593138990}}', - json_util.dumps(pre_epoch)) + json_util.dumps(pre_epoch, json_options=LEGACY_JSON_OPTIONS), + ) self.assertEqual( '{"dt": {"$date": 63075661010}}', - json_util.dumps(post_epoch)) + json_util.dumps(post_epoch, json_options=LEGACY_JSON_OPTIONS), + ) self.assertEqual( '{"dt": {"$date": {"$numberLong": "-62135593138990"}}}', - json_util.dumps(pre_epoch, json_options=STRICT_JSON_OPTIONS)) + json_util.dumps(pre_epoch, json_options=STRICT_JSON_OPTIONS), + ) self.assertEqual( '{"dt": {"$date": "1972-01-01T01:01:01.010Z"}}', - json_util.dumps(post_epoch, json_options=STRICT_JSON_OPTIONS)) + json_util.dumps(post_epoch, json_options=STRICT_JSON_OPTIONS), + ) - number_long_options = json_util.JSONOptions( - datetime_representation=DatetimeRepresentation.NUMBERLONG) + number_long_options = JSONOptions( + datetime_representation=DatetimeRepresentation.NUMBERLONG, json_mode=JSONMode.LEGACY + ) self.assertEqual( '{"dt": {"$date": {"$numberLong": "63075661010"}}}', - json_util.dumps(post_epoch, json_options=number_long_options)) + json_util.dumps(post_epoch, json_options=number_long_options), + ) self.assertEqual( '{"dt": {"$date": {"$numberLong": "-62135593138990"}}}', - json_util.dumps(pre_epoch, json_options=number_long_options)) + json_util.dumps(pre_epoch, json_options=number_long_options), + ) # ISO8601 mode assumes naive datetimes are UTC pre_epoch_naive = {"dt": datetime.datetime(1, 1, 1, 1, 1, 1, 10000)} - post_epoch_naive = { - "dt": datetime.datetime(1972, 1, 1, 1, 1, 1, 10000)} + post_epoch_naive = {"dt": datetime.datetime(1972, 1, 1, 1, 1, 1, 10000)} self.assertEqual( '{"dt": {"$date": {"$numberLong": "-62135593138990"}}}', - json_util.dumps(pre_epoch_naive, json_options=STRICT_JSON_OPTIONS)) + json_util.dumps(pre_epoch_naive, json_options=STRICT_JSON_OPTIONS), + ) self.assertEqual( '{"dt": {"$date": "1972-01-01T01:01:01.010Z"}}', - json_util.dumps(post_epoch_naive, - json_options=STRICT_JSON_OPTIONS)) + json_util.dumps(post_epoch_naive, json_options=STRICT_JSON_OPTIONS), + ) # Test tz_aware and tzinfo options self.assertEqual( datetime.datetime(1972, 1, 1, 1, 1, 1, 10000, utc), json_util.loads( - '{"dt": {"$date": "1972-01-01T01:01:01.010+0000"}}')["dt"]) + '{"dt": {"$date": "1972-01-01T01:01:01.010+0000"}}', json_options=tz_aware_opts + )["dt"], + ) self.assertEqual( datetime.datetime(1972, 1, 1, 1, 1, 1, 10000, utc), json_util.loads( '{"dt": {"$date": "1972-01-01T01:01:01.010+0000"}}', - json_options=json_util.JSONOptions(tz_aware=True, - tzinfo=utc))["dt"]) + json_options=JSONOptions(tz_aware=True, tzinfo=utc), + )["dt"], + ) self.assertEqual( datetime.datetime(1972, 1, 1, 1, 1, 1, 10000), json_util.loads( '{"dt": {"$date": "1972-01-01T01:01:01.010+0000"}}', - json_options=json_util.JSONOptions(tz_aware=False))["dt"]) - self.round_trip(pre_epoch_naive, json_options=json_util.JSONOptions( - tz_aware=False)) + json_options=JSONOptions(tz_aware=False), + )["dt"], + ) + self.round_trip(pre_epoch_naive, json_options=JSONOptions(tz_aware=False)) # Test a non-utc timezone - pacific = FixedOffset(-8 * 60, 'US/Pacific') - aware_datetime = {"dt": datetime.datetime(2002, 10, 27, 6, 0, 0, 10000, - pacific)} + pacific = FixedOffset(-8 * 60, "US/Pacific") + aware_datetime = {"dt": datetime.datetime(2002, 10, 27, 6, 0, 0, 10000, pacific)} self.assertEqual( '{"dt": {"$date": "2002-10-27T06:00:00.010-0800"}}', - json_util.dumps(aware_datetime, json_options=STRICT_JSON_OPTIONS)) - self.round_trip(aware_datetime, json_options=json_util.JSONOptions( - tz_aware=True, tzinfo=pacific)) - self.round_trip(aware_datetime, json_options=json_util.JSONOptions( + json_util.dumps(aware_datetime, json_options=STRICT_JSON_OPTIONS), + ) + self.round_trip( + aware_datetime, + json_options=JSONOptions(json_mode=JSONMode.LEGACY, tz_aware=True, tzinfo=pacific), + ) + self.round_trip( + aware_datetime, + json_options=JSONOptions( + datetime_representation=DatetimeRepresentation.ISO8601, + json_mode=JSONMode.LEGACY, + tz_aware=True, + tzinfo=pacific, + ), + ) + + def test_datetime_ms(self): + # Test ISO8601 in-range + dat_min = {"x": DatetimeMS(0)} + dat_max = {"x": DatetimeMS(_max_datetime_ms())} + opts = JSONOptions(datetime_representation=DatetimeRepresentation.ISO8601) + + self.assertEqual( + dat_min["x"].as_datetime(CodecOptions(tz_aware=False)), + json_util.loads(json_util.dumps(dat_min))["x"], + ) + self.assertEqual( + dat_max["x"].as_datetime(CodecOptions(tz_aware=False)), + json_util.loads(json_util.dumps(dat_max))["x"], + ) + + # Test ISO8601 out-of-range + dat_min = {"x": DatetimeMS(-1)} + dat_max = {"x": DatetimeMS(_max_datetime_ms() + 1)} + + self.assertEqual('{"x": {"$date": {"$numberLong": "-1"}}}', json_util.dumps(dat_min)) + self.assertEqual( + '{"x": {"$date": {"$numberLong": "' + str(int(dat_max["x"])) + '"}}}', + json_util.dumps(dat_max), + ) + # Test legacy. + opts = JSONOptions( + datetime_representation=DatetimeRepresentation.LEGACY, json_mode=JSONMode.LEGACY + ) + self.assertEqual('{"x": {"$date": "-1"}}', json_util.dumps(dat_min, json_options=opts)) + self.assertEqual( + '{"x": {"$date": "' + str(int(dat_max["x"])) + '"}}', + json_util.dumps(dat_max, json_options=opts), + ) + + # Test regular. + opts = JSONOptions( + datetime_representation=DatetimeRepresentation.NUMBERLONG, json_mode=JSONMode.LEGACY + ) + self.assertEqual( + '{"x": {"$date": {"$numberLong": "-1"}}}', json_util.dumps(dat_min, json_options=opts) + ) + self.assertEqual( + '{"x": {"$date": {"$numberLong": "' + str(int(dat_max["x"])) + '"}}}', + json_util.dumps(dat_max, json_options=opts), + ) + + # Test decode from datetime.datetime to DatetimeMS + dat_min = {"x": datetime.datetime.min} + dat_max = {"x": DatetimeMS(_max_datetime_ms()).as_datetime(CodecOptions(tz_aware=False))} + opts = JSONOptions( datetime_representation=DatetimeRepresentation.ISO8601, - tz_aware=True, tzinfo=pacific)) + datetime_conversion=DatetimeConversion.DATETIME_MS, + ) + + self.assertEqual( + DatetimeMS(dat_min["x"]), + json_util.loads(json_util.dumps(dat_min), json_options=opts)["x"], + ) + self.assertEqual( + DatetimeMS(dat_max["x"]), + json_util.loads(json_util.dumps(dat_max), json_options=opts)["x"], + ) def test_regex_object_hook(self): # Extended JSON format regular expression. - pat = 'a*b' + pat = "a*b" json_re = '{"$regex": "%s", "$options": "u"}' % pat loaded = json_util.object_hook(json.loads(json_re)) self.assertTrue(isinstance(loaded, Regex)) @@ -204,9 +323,7 @@ def test_regex_object_hook(self): self.assertEqual(re.U, loaded.flags) def test_regex(self): - for regex_instance in ( - re.compile("a*b", re.IGNORECASE), - Regex("a*b", re.IGNORECASE)): + for regex_instance in (re.compile("a*b", re.IGNORECASE), Regex("a*b", re.IGNORECASE)): res = self.round_tripped({"r": regex_instance})["r"] self.assertEqual("a*b", res.pattern) @@ -214,28 +331,43 @@ def test_regex(self): self.assertEqual("a*b", res.pattern) self.assertEqual(re.IGNORECASE, res.flags) - unicode_options = re.I|re.M|re.S|re.U|re.X + unicode_options = re.I | re.M | re.S | re.U | re.X regex = re.compile("a*b", unicode_options) res = self.round_tripped({"r": regex})["r"] self.assertEqual(unicode_options, res.flags) # Some tools may not add $options if no flags are set. - res = json_util.loads('{"r": {"$regex": "a*b"}}')['r'] + res = json_util.loads('{"r": {"$regex": "a*b"}}')["r"] self.assertEqual(0, res.flags) self.assertEqual( - Regex('.*', 'ilm'), - json_util.loads( - '{"r": {"$regex": ".*", "$options": "ilm"}}')['r']) + Regex(".*", "ilm"), json_util.loads('{"r": {"$regex": ".*", "$options": "ilm"}}')["r"] + ) # Check order. self.assertEqual( - '{"$regex": ".*", "$options": "mx"}', - json_util.dumps(Regex('.*', re.M | re.X))) + '{"$regularExpression": {"pattern": ".*", "options": "mx"}}', + json_util.dumps(Regex(".*", re.M | re.X)), + ) + + self.assertEqual( + '{"$regularExpression": {"pattern": ".*", "options": "mx"}}', + json_util.dumps(re.compile(b".*", re.M | re.X)), + ) self.assertEqual( '{"$regex": ".*", "$options": "mx"}', - json_util.dumps(re.compile(b'.*', re.M | re.X))) + json_util.dumps(Regex(".*", re.M | re.X), json_options=LEGACY_JSON_OPTIONS), + ) + + def test_regex_validation(self): + non_str_types = [10, {}, []] + docs = [{"$regex": i} for i in non_str_types] + for doc in docs: + self.assertEqual(doc, json_util.loads(json.dumps(doc))) + + doc = {"$regex": ""} + self.assertIsInstance(json_util.loads(json.dumps(doc)), Regex) def test_minkey(self): self.round_trip({"m": MinKey()}) @@ -250,45 +382,97 @@ def test_timestamp(self): self.assertEqual(dct, rtdct) self.assertEqual('{"ts": {"$timestamp": {"t": 4, "i": 13}}}', res) + def test_uuid_default(self): + # Cannot directly encode native UUIDs with the default + # uuid_representation. + doc = {"uuid": uuid.UUID("f47ac10b-58cc-4372-a567-0e02b2c3d479")} + with self.assertRaisesRegex(ValueError, "cannot encode native uuid"): + json_util.dumps(doc) + legacy_jsn = '{"uuid": {"$uuid": "f47ac10b58cc4372a5670e02b2c3d479"}}' + expected = {"uuid": Binary(b"\xf4z\xc1\x0bX\xccCr\xa5g\x0e\x02\xb2\xc3\xd4y", 4)} + self.assertEqual(json_util.loads(legacy_jsn), expected) + def test_uuid(self): - doc = {'uuid': uuid.UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479')} - self.round_trip(doc) + doc = {"uuid": uuid.UUID("f47ac10b-58cc-4372-a567-0e02b2c3d479")} + uuid_legacy_opts = LEGACY_JSON_OPTIONS.with_options( + uuid_representation=UuidRepresentation.PYTHON_LEGACY + ) + self.round_trip(doc, json_options=uuid_legacy_opts) self.assertEqual( '{"uuid": {"$uuid": "f47ac10b58cc4372a5670e02b2c3d479"}}', - json_util.dumps(doc)) + json_util.dumps(doc, json_options=LEGACY_JSON_OPTIONS), + ) self.assertEqual( - '{"uuid": ' - '{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "03"}}', + '{"uuid": {"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "03"}}', json_util.dumps( - doc, json_options=json_util.STRICT_JSON_OPTIONS)) + doc, + json_options=STRICT_JSON_OPTIONS.with_options( + uuid_representation=UuidRepresentation.PYTHON_LEGACY + ), + ), + ) self.assertEqual( - '{"uuid": ' - '{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "04"}}', + '{"uuid": {"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "04"}}', json_util.dumps( - doc, json_options=json_util.JSONOptions( - strict_uuid=True, uuid_representation=STANDARD))) - self.assertEqual( - doc, json_util.loads( - '{"uuid": ' - '{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "03"}}')) - for uuid_representation in ALL_UUID_REPRESENTATIONS: - options = json_util.JSONOptions( - strict_uuid=True, uuid_representation=uuid_representation) + doc, + json_options=JSONOptions( + strict_uuid=True, json_mode=JSONMode.LEGACY, uuid_representation=STANDARD + ), + ), + ) + self.assertEqual( + doc, + json_util.loads( + '{"uuid": {"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "03"}}', + json_options=uuid_legacy_opts, + ), + ) + for uuid_representation in set(ALL_UUID_REPRESENTATIONS) - {UuidRepresentation.UNSPECIFIED}: + options = JSONOptions( + strict_uuid=True, json_mode=JSONMode.LEGACY, uuid_representation=uuid_representation + ) self.round_trip(doc, json_options=options) # Ignore UUID representation when decoding BSON binary subtype 4. - self.assertEqual(doc, json_util.loads( - '{"uuid": ' - '{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "04"}}', - json_options=options)) + self.assertEqual( + doc, + json_util.loads( + '{"uuid": {"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "04"}}', + json_options=options, + ), + ) + + def test_uuid_uuid_rep_unspecified(self): + _uuid = uuid.uuid4() + options = JSONOptions( + strict_uuid=True, + json_mode=JSONMode.LEGACY, + uuid_representation=UuidRepresentation.UNSPECIFIED, + ) + + # Cannot directly encode native UUIDs with UNSPECIFIED. + doc = {"uuid": _uuid} + with self.assertRaises(ValueError): + json_util.dumps(doc, json_options=options) + + # All UUID subtypes are decoded as Binary with UNSPECIFIED. + # subtype 3 + doc = {"uuid": Binary(_uuid.bytes, subtype=3)} + ext_json_str = json_util.dumps(doc) + self.assertEqual(doc, json_util.loads(ext_json_str, json_options=options)) + # subtype 4 + doc = {"uuid": Binary(_uuid.bytes, subtype=4)} + ext_json_str = json_util.dumps(doc) + self.assertEqual(doc, json_util.loads(ext_json_str, json_options=options)) + # $uuid-encoded fields + doc = {"uuid": Binary(_uuid.bytes, subtype=4)} + ext_json_str = json_util.dumps({"uuid": _uuid}, json_options=LEGACY_JSON_OPTIONS) + self.assertEqual(doc, json_util.loads(ext_json_str, json_options=options)) def test_binary(self): - if PY3: - bin_type_dict = {"bin": b"\x00\x01\x02\x03\x04"} - else: - bin_type_dict = {"bin": Binary(b"\x00\x01\x02\x03\x04")} + bin_type_dict = {"bin": b"\x00\x01\x02\x03\x04"} md5_type_dict = { - "md5": Binary(b' n7\x18\xaf\t/\xd1\xd1/\x80\xca\xe7q\xcc\xac', - MD5_SUBTYPE)} + "md5": Binary(b" n7\x18\xaf\t/\xd1\xd1/\x80\xca\xe7q\xcc\xac", MD5_SUBTYPE) + } custom_type_dict = {"custom": Binary(b"hello", USER_DEFINED_SUBTYPE)} self.round_trip(bin_type_dict) @@ -296,43 +480,47 @@ def test_binary(self): self.round_trip(custom_type_dict) # Binary with subtype 0 is decoded into bytes in Python 3. - bin = json_util.loads( - '{"bin": {"$binary": "AAECAwQ=", "$type": "00"}}')['bin'] - if PY3: - self.assertEqual(type(bin), bytes) - else: - self.assertEqual(type(bin), Binary) + bin = json_util.loads('{"bin": {"$binary": "AAECAwQ=", "$type": "00"}}')["bin"] + self.assertEqual(type(bin), bytes) # PYTHON-443 ensure old type formats are supported - json_bin_dump = json_util.dumps(bin_type_dict) - self.assertTrue('"$type": "00"' in json_bin_dump) - self.assertEqual(bin_type_dict, - json_util.loads('{"bin": {"$type": 0, "$binary": "AAECAwQ="}}')) - json_bin_dump = json_util.dumps(md5_type_dict) + json_bin_dump = json_util.dumps(bin_type_dict, json_options=LEGACY_JSON_OPTIONS) + self.assertIn('"$type": "00"', json_bin_dump) + self.assertEqual( + bin_type_dict, json_util.loads('{"bin": {"$type": 0, "$binary": "AAECAwQ="}}') + ) + json_bin_dump = json_util.dumps(md5_type_dict, json_options=LEGACY_JSON_OPTIONS) # Check order. self.assertEqual( - '{"md5": {"$binary": "IG43GK8JL9HRL4DK53HMrA==",' - + ' "$type": "05"}}', - json_bin_dump) + '{"md5": {"$binary": "IG43GK8JL9HRL4DK53HMrA==", "$type": "05"}}', json_bin_dump + ) - self.assertEqual(md5_type_dict, - json_util.loads('{"md5": {"$type": 5, "$binary":' - ' "IG43GK8JL9HRL4DK53HMrA=="}}')) + self.assertEqual( + md5_type_dict, + json_util.loads('{"md5": {"$type": 5, "$binary": "IG43GK8JL9HRL4DK53HMrA=="}}'), + ) - json_bin_dump = json_util.dumps(custom_type_dict) - self.assertTrue('"$type": "80"' in json_bin_dump) - self.assertEqual(custom_type_dict, - json_util.loads('{"custom": {"$type": 128, "$binary":' - ' "aGVsbG8="}}')) + json_bin_dump = json_util.dumps(custom_type_dict, json_options=LEGACY_JSON_OPTIONS) + self.assertIn('"$type": "80"', json_bin_dump) + self.assertEqual( + custom_type_dict, + json_util.loads('{"custom": {"$type": 128, "$binary": "aGVsbG8="}}'), + ) # Handle mongoexport where subtype >= 128 - self.assertEqual(128, - json_util.loads('{"custom": {"$type": "ffffff80", "$binary":' - ' "aGVsbG8="}}')['custom'].subtype) + self.assertEqual( + 128, + json_util.loads('{"custom": {"$type": "ffffff80", "$binary": "aGVsbG8="}}')[ + "custom" + ].subtype, + ) - self.assertEqual(255, - json_util.loads('{"custom": {"$type": "ffffffff", "$binary":' - ' "aGVsbG8="}}')['custom'].subtype) + self.assertEqual( + 255, + json_util.loads('{"custom": {"$type": "ffffffff", "$binary": "aGVsbG8="}}')[ + "custom" + ].subtype, + ) def test_code(self): self.round_trip({"code": Code("function x() { return 1; }")}) @@ -344,33 +532,30 @@ def test_code(self): # Check order. self.assertEqual('{"$code": "return z", "$scope": {"z": 2}}', res) - no_scope = Code('function() {}') - self.assertEqual( - '{"$code": "function() {}"}', json_util.dumps(no_scope)) + no_scope = Code("function() {}") + self.assertEqual('{"$code": "function() {}"}', json_util.dumps(no_scope)) def test_undefined(self): jsn = '{"name": {"$undefined": true}}' - self.assertIsNone(json_util.loads(jsn)['name']) + self.assertIsNone(json_util.loads(jsn)["name"]) def test_numberlong(self): jsn = '{"weight": {"$numberLong": "65535"}}' - self.assertEqual(json_util.loads(jsn)['weight'], - Int64(65535)) - self.assertEqual(json_util.dumps({"weight": Int64(65535)}), - '{"weight": 65535}') - json_options = json_util.JSONOptions(strict_number_long=True) - self.assertEqual(json_util.dumps({"weight": Int64(65535)}, - json_options=json_options), - jsn) + self.assertEqual(json_util.loads(jsn)["weight"], Int64(65535)) + self.assertEqual(json_util.dumps({"weight": Int64(65535)}), '{"weight": 65535}') + json_options = JSONOptions(strict_number_long=True, json_mode=JSONMode.LEGACY) + self.assertEqual(json_util.dumps({"weight": Int64(65535)}, json_options=json_options), jsn) def test_loads_document_class(self): # document_class dict should always work - self.assertEqual({"foo": "bar"}, json_util.loads( - '{"foo": "bar"}', - json_options=json_util.JSONOptions(document_class=dict))) - self.assertEqual(SON([("foo", "bar"), ("b", 1)]), json_util.loads( - '{"foo": "bar", "b": 1}', - json_options=json_util.JSONOptions(document_class=SON))) + self.assertEqual( + {"foo": "bar"}, + json_util.loads('{"foo": "bar"}', json_options=JSONOptions(document_class=dict)), + ) + self.assertEqual( + SON([("foo", "bar"), ("b", 1)]), + json_util.loads('{"foo": "bar", "b": 1}', json_options=JSONOptions(document_class=SON)), + ) class TestJsonUtilRoundtrip(IntegrationTest): @@ -378,13 +563,12 @@ def test_cursor(self): db = self.db db.drop_collection("test") - docs = [ - {'foo': [1, 2]}, - {'bar': {'hello': 'world'}}, - {'code': Code("function x() { return 1; }")}, - {'bin': Binary(b"\x00\x01\x02\x03\x04", USER_DEFINED_SUBTYPE)}, - {'dbref': {'_ref': DBRef('simple', - ObjectId('509b8db456c02c5ab7e63c34'))}} + docs: List[MutableMapping[str, Any]] = [ + {"foo": [1, 2]}, + {"bar": {"hello": "world"}}, + {"code": Code("function x() { return 1; }")}, + {"bin": Binary(b"\x00\x01\x02\x03\x04", USER_DEFINED_SUBTYPE)}, + {"dbref": {"_ref": DBRef("simple", ObjectId("509b8db456c02c5ab7e63c34"))}}, ] db.test.insert_many(docs) @@ -392,5 +576,6 @@ def test_cursor(self): for doc in docs: self.assertTrue(doc in reloaded_docs) + if __name__ == "__main__": unittest.main() diff --git a/test/test_legacy_api.py b/test/test_legacy_api.py deleted file mode 100644 index 01c4d4ae01..0000000000 --- a/test/test_legacy_api.py +++ /dev/null @@ -1,2508 +0,0 @@ -# Copyright 2015-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Test various legacy / deprecated API features.""" - -import itertools -import sys -import threading -import time -import uuid -import warnings - -sys.path[0:0] = [""] - -from bson.binary import PYTHON_LEGACY, STANDARD -from bson.code import Code -from bson.codec_options import CodecOptions -from bson.objectid import ObjectId -from bson.py3compat import string_type -from bson.son import SON -from pymongo import ASCENDING, DESCENDING -from pymongo.database import Database -from pymongo.common import partition_node -from pymongo.errors import (BulkWriteError, - ConfigurationError, - CursorNotFound, - DocumentTooLarge, - DuplicateKeyError, - InvalidDocument, - InvalidOperation, - OperationFailure, - WriteConcernError, - WTimeoutError) -from pymongo.message import _CursorAddress -from pymongo.son_manipulator import (AutoReference, - NamespaceInjector, - ObjectIdShuffler, - SONManipulator) -from pymongo.write_concern import WriteConcern -from test import client_context, qcheck, unittest, SkipTest -from test.test_client import IntegrationTest -from test.test_bulk import BulkTestBase, BulkAuthorizationTestBase -from test.utils import (DeprecationFilter, - joinall, - oid_generated_on_process, - rs_or_single_client, - rs_or_single_client_noauth, - single_client, - wait_until) - - -class TestDeprecations(IntegrationTest): - - @classmethod - def setUpClass(cls): - super(TestDeprecations, cls).setUpClass() - cls.deprecation_filter = DeprecationFilter("error") - - @classmethod - def tearDownClass(cls): - cls.deprecation_filter.stop() - - def test_save_deprecation(self): - self.assertRaises( - DeprecationWarning, lambda: self.db.test.save({})) - - def test_insert_deprecation(self): - self.assertRaises( - DeprecationWarning, lambda: self.db.test.insert({})) - - def test_update_deprecation(self): - self.assertRaises( - DeprecationWarning, lambda: self.db.test.update({}, {})) - - def test_remove_deprecation(self): - self.assertRaises( - DeprecationWarning, lambda: self.db.test.remove({})) - - def test_find_and_modify_deprecation(self): - self.assertRaises( - DeprecationWarning, - lambda: self.db.test.find_and_modify({'i': 5}, {})) - - def test_add_son_manipulator_deprecation(self): - db = self.client.pymongo_test - self.assertRaises(DeprecationWarning, - lambda: db.add_son_manipulator(AutoReference(db))) - - def test_ensure_index_deprecation(self): - try: - self.assertRaises( - DeprecationWarning, - lambda: self.db.test.ensure_index('i')) - finally: - self.db.test.drop() - - -class TestLegacy(IntegrationTest): - - @classmethod - def setUpClass(cls): - super(TestLegacy, cls).setUpClass() - cls.w = client_context.w - cls.deprecation_filter = DeprecationFilter() - - @classmethod - def tearDownClass(cls): - cls.deprecation_filter.stop() - - def test_insert_find_one(self): - # Tests legacy insert. - db = self.db - db.test.drop() - self.assertEqual(0, len(list(db.test.find()))) - doc = {"hello": u"world"} - _id = db.test.insert(doc) - self.assertEqual(1, len(list(db.test.find()))) - self.assertEqual(doc, db.test.find_one()) - self.assertEqual(doc["_id"], _id) - self.assertTrue(isinstance(_id, ObjectId)) - - doc_class = dict - # Work around http://bugs.jython.org/issue1728 - if (sys.platform.startswith('java') and - sys.version_info[:3] >= (2, 5, 2)): - doc_class = SON - - db = self.client.get_database( - db.name, codec_options=CodecOptions(document_class=doc_class)) - - def remove_insert_find_one(doc): - db.test.remove({}) - db.test.insert(doc) - # SON equality is order sensitive. - return db.test.find_one() == doc.to_dict() - - qcheck.check_unittest(self, remove_insert_find_one, - qcheck.gen_mongo_dict(3)) - - def test_generator_insert(self): - # Only legacy insert currently supports insert from a generator. - db = self.db - db.test.remove({}) - self.assertEqual(db.test.find().count(), 0) - db.test.insert(({'a': i} for i in range(5)), manipulate=False) - self.assertEqual(5, db.test.count()) - db.test.remove({}) - - db.test.insert(({'a': i} for i in range(5)), manipulate=True) - self.assertEqual(5, db.test.count()) - db.test.remove({}) - - def test_insert_multiple(self): - # Tests legacy insert. - db = self.db - db.drop_collection("test") - doc1 = {"hello": u"world"} - doc2 = {"hello": u"mike"} - self.assertEqual(db.test.find().count(), 0) - ids = db.test.insert([doc1, doc2]) - self.assertEqual(db.test.find().count(), 2) - self.assertEqual(doc1, db.test.find_one({"hello": u"world"})) - self.assertEqual(doc2, db.test.find_one({"hello": u"mike"})) - - self.assertEqual(2, len(ids)) - self.assertEqual(doc1["_id"], ids[0]) - self.assertEqual(doc2["_id"], ids[1]) - - ids = db.test.insert([{"hello": 1}]) - self.assertTrue(isinstance(ids, list)) - self.assertEqual(1, len(ids)) - - self.assertRaises(InvalidOperation, db.test.insert, []) - - # Generator that raises StopIteration on first call to next(). - self.assertRaises(InvalidOperation, db.test.insert, (i for i in [])) - - def test_insert_multiple_with_duplicate(self): - # Tests legacy insert. - db = self.db - db.drop_collection("test_insert_multiple_with_duplicate") - collection = db.test_insert_multiple_with_duplicate - collection.create_index([('i', ASCENDING)], unique=True) - - # No error - collection.insert([{'i': i} for i in range(5, 10)], w=0) - wait_until(lambda: 5 == collection.count(), 'insert 5 documents') - - db.drop_collection("test_insert_multiple_with_duplicate") - collection.create_index([('i', ASCENDING)], unique=True) - - # No error - collection.insert([{'i': 1}] * 2, w=0) - wait_until(lambda: 1 == collection.count(), 'insert 1 document') - - self.assertRaises( - DuplicateKeyError, - lambda: collection.insert([{'i': 2}] * 2), - ) - - db.drop_collection("test_insert_multiple_with_duplicate") - db = self.client.get_database( - db.name, write_concern=WriteConcern(w=0)) - - collection = db.test_insert_multiple_with_duplicate - collection.create_index([('i', ASCENDING)], unique=True) - - # No error. - collection.insert([{'i': 1}] * 2) - wait_until(lambda: 1 == collection.count(), 'insert 1 document') - - # Implied acknowledged. - self.assertRaises( - DuplicateKeyError, - lambda: collection.insert([{'i': 2}] * 2, fsync=True), - ) - - # Explicit acknowledged. - self.assertRaises( - DuplicateKeyError, - lambda: collection.insert([{'i': 2}] * 2, w=1)) - - db.drop_collection("test_insert_multiple_with_duplicate") - - @client_context.require_replica_set - def test_insert_prefers_write_errors(self): - # Tests legacy insert. - collection = self.db.test_insert_prefers_write_errors - self.db.drop_collection(collection.name) - collection.insert_one({'_id': 1}) - large = 's' * 1024 * 1024 * 15 - with self.assertRaises(DuplicateKeyError): - collection.insert( - [{'_id': 1, 's': large}, {'_id': 2, 's': large}]) - self.assertEqual(1, collection.count()) - - with self.assertRaises(DuplicateKeyError): - collection.insert( - [{'_id': 1, 's': large}, {'_id': 2, 's': large}], - continue_on_error=True) - self.assertEqual(2, collection.count()) - collection.delete_one({'_id': 2}) - - # A writeError followed by a writeConcernError should prefer to raise - # the writeError. - with self.assertRaises(DuplicateKeyError): - collection.insert( - [{'_id': 1, 's': large}, {'_id': 2, 's': large}], - continue_on_error=True, - w=len(client_context.nodes) + 10, wtimeout=1) - self.assertEqual(2, collection.count()) - collection.delete_many({}) - - with self.assertRaises(WriteConcernError): - collection.insert( - [{'_id': 1, 's': large}, {'_id': 2, 's': large}], - continue_on_error=True, - w=len(client_context.nodes) + 10, wtimeout=1) - self.assertEqual(2, collection.count()) - - def test_insert_iterables(self): - # Tests legacy insert. - db = self.db - - self.assertRaises(TypeError, db.test.insert, 4) - self.assertRaises(TypeError, db.test.insert, None) - self.assertRaises(TypeError, db.test.insert, True) - - db.drop_collection("test") - self.assertEqual(db.test.find().count(), 0) - db.test.insert(({"hello": u"world"}, {"hello": u"world"})) - self.assertEqual(db.test.find().count(), 2) - - db.drop_collection("test") - self.assertEqual(db.test.find().count(), 0) - db.test.insert(map(lambda x: {"hello": "world"}, - itertools.repeat(None, 10))) - self.assertEqual(db.test.find().count(), 10) - - def test_insert_manipulate_false(self): - # Test two aspects of legacy insert with manipulate=False: - # 1. The return value is None or [None] as appropriate. - # 2. _id is not set on the passed-in document object. - collection = self.db.test_insert_manipulate_false - collection.drop() - oid = ObjectId() - doc = {'a': oid} - - try: - # The return value is None. - self.assertTrue(collection.insert(doc, manipulate=False) is None) - # insert() shouldn't set _id on the passed-in document object. - self.assertEqual({'a': oid}, doc) - - # Bulk insert. The return value is a list of None. - self.assertEqual([None], collection.insert([{}], manipulate=False)) - - docs = [{}, {}] - ids = collection.insert(docs, manipulate=False) - self.assertEqual([None, None], ids) - self.assertEqual([{}, {}], docs) - finally: - collection.drop() - - def test_continue_on_error(self): - # Tests legacy insert. - db = self.db - db.drop_collection("test_continue_on_error") - collection = db.test_continue_on_error - oid = collection.insert({"one": 1}) - self.assertEqual(1, collection.count()) - - docs = [] - docs.append({"_id": oid, "two": 2}) # Duplicate _id. - docs.append({"three": 3}) - docs.append({"four": 4}) - docs.append({"five": 5}) - - with self.assertRaises(DuplicateKeyError): - collection.insert(docs, manipulate=False) - - self.assertEqual(1, collection.count()) - - with self.assertRaises(DuplicateKeyError): - collection.insert(docs, manipulate=False, continue_on_error=True) - - self.assertEqual(4, collection.count()) - - collection.remove({}, w=client_context.w) - - oid = collection.insert({"_id": oid, "one": 1}, w=0) - wait_until(lambda: 1 == collection.count(), 'insert 1 document') - - docs[0].pop("_id") - docs[2]["_id"] = oid - - with self.assertRaises(DuplicateKeyError): - collection.insert(docs, manipulate=False) - - self.assertEqual(3, collection.count()) - collection.insert(docs, manipulate=False, continue_on_error=True, w=0) - wait_until(lambda: 6 == collection.count(), 'insert 3 documents') - - def test_acknowledged_insert(self): - # Tests legacy insert. - db = self.db - db.drop_collection("test_acknowledged_insert") - collection = db.test_acknowledged_insert - - a = {"hello": "world"} - collection.insert(a) - collection.insert(a, w=0) - self.assertRaises(OperationFailure, - collection.insert, a) - - def test_insert_adds_id(self): - # Tests legacy insert. - doc = {"hello": "world"} - self.db.test.insert(doc) - self.assertTrue("_id" in doc) - - docs = [{"hello": "world"}, {"hello": "world"}] - self.db.test.insert(docs) - for doc in docs: - self.assertTrue("_id" in doc) - - def test_insert_large_batch(self): - # Tests legacy insert. - db = self.client.test_insert_large_batch - self.addCleanup(self.client.drop_database, 'test_insert_large_batch') - max_bson_size = self.client.max_bson_size - # Write commands are limited to 16MB + 16k per batch - big_string = 'x' * int(max_bson_size / 2) - - # Batch insert that requires 2 batches. - successful_insert = [{'x': big_string}, {'x': big_string}, - {'x': big_string}, {'x': big_string}] - db.collection_0.insert(successful_insert, w=1) - self.assertEqual(4, db.collection_0.count()) - - db.collection_0.drop() - - # Test that inserts fail after first error. - insert_second_fails = [{'_id': 'id0', 'x': big_string}, - {'_id': 'id0', 'x': big_string}, - {'_id': 'id1', 'x': big_string}, - {'_id': 'id2', 'x': big_string}] - - with self.assertRaises(DuplicateKeyError): - db.collection_1.insert(insert_second_fails) - - self.assertEqual(1, db.collection_1.count()) - - db.collection_1.drop() - - # 2 batches, 2nd insert fails, don't continue on error. - self.assertTrue(db.collection_2.insert(insert_second_fails, w=0)) - wait_until(lambda: 1 == db.collection_2.count(), - 'insert 1 document', timeout=60) - - db.collection_2.drop() - - # 2 batches, ids of docs 0 and 1 are dupes, ids of docs 2 and 3 are - # dupes. Acknowledged, continue on error. - insert_two_failures = [{'_id': 'id0', 'x': big_string}, - {'_id': 'id0', 'x': big_string}, - {'_id': 'id1', 'x': big_string}, - {'_id': 'id1', 'x': big_string}] - - with self.assertRaises(OperationFailure) as context: - db.collection_3.insert(insert_two_failures, - continue_on_error=True, w=1) - - self.assertIn('id1', str(context.exception)) - - # Only the first and third documents should be inserted. - self.assertEqual(2, db.collection_3.count()) - - db.collection_3.drop() - - # 2 batches, 2 errors, unacknowledged, continue on error. - db.collection_4.insert(insert_two_failures, continue_on_error=True, w=0) - - # Only the first and third documents are inserted. - wait_until(lambda: 2 == db.collection_4.count(), - 'insert 2 documents', timeout=60) - - db.collection_4.drop() - - def test_bad_dbref(self): - # Requires the legacy API to test. - c = self.db.test - c.drop() - - # Incomplete DBRefs. - self.assertRaises( - InvalidDocument, - c.insert_one, {'ref': {'$ref': 'collection'}}) - - self.assertRaises( - InvalidDocument, - c.insert_one, {'ref': {'$id': ObjectId()}}) - - ref_only = {'ref': {'$ref': 'collection'}} - id_only = {'ref': {'$id': ObjectId()}} - - - def test_update(self): - # Tests legacy update. - db = self.db - db.drop_collection("test") - - id1 = db.test.save({"x": 5}) - db.test.update({}, {"$inc": {"x": 1}}) - self.assertEqual(db.test.find_one(id1)["x"], 6) - - id2 = db.test.save({"x": 1}) - db.test.update({"x": 6}, {"$inc": {"x": 1}}) - self.assertEqual(db.test.find_one(id1)["x"], 7) - self.assertEqual(db.test.find_one(id2)["x"], 1) - - def test_update_manipulate(self): - # Tests legacy update. - db = self.db - db.drop_collection("test") - db.test.insert({'_id': 1}) - db.test.update({'_id': 1}, {'a': 1}, manipulate=True) - self.assertEqual( - {'_id': 1, 'a': 1}, - db.test.find_one()) - - class AddField(SONManipulator): - def transform_incoming(self, son, dummy): - son['field'] = 'value' - return son - - db.add_son_manipulator(AddField()) - db.test.update({'_id': 1}, {'a': 2}, manipulate=False) - self.assertEqual( - {'_id': 1, 'a': 2}, - db.test.find_one()) - - db.test.update({'_id': 1}, {'a': 3}, manipulate=True) - self.assertEqual( - {'_id': 1, 'a': 3, 'field': 'value'}, - db.test.find_one()) - - def test_update_nmodified(self): - # Tests legacy update. - db = self.db - db.drop_collection("test") - ismaster = self.client.admin.command('ismaster') - used_write_commands = (ismaster.get("maxWireVersion", 0) > 1) - - db.test.insert({'_id': 1}) - result = db.test.update({'_id': 1}, {'$set': {'x': 1}}) - if used_write_commands: - self.assertEqual(1, result['nModified']) - else: - self.assertFalse('nModified' in result) - - # x is already 1. - result = db.test.update({'_id': 1}, {'$set': {'x': 1}}) - if used_write_commands: - self.assertEqual(0, result['nModified']) - else: - self.assertFalse('nModified' in result) - - def test_multi_update(self): - # Tests legacy update. - db = self.db - db.drop_collection("test") - - db.test.save({"x": 4, "y": 3}) - db.test.save({"x": 5, "y": 5}) - db.test.save({"x": 4, "y": 4}) - - db.test.update({"x": 4}, {"$set": {"y": 5}}, multi=True) - - self.assertEqual(3, db.test.count()) - for doc in db.test.find(): - self.assertEqual(5, doc["y"]) - - self.assertEqual(2, db.test.update({"x": 4}, {"$set": {"y": 6}}, - multi=True)["n"]) - - def test_upsert(self): - # Tests legacy update. - db = self.db - db.drop_collection("test") - - db.test.update({"page": "/"}, {"$inc": {"count": 1}}, upsert=True) - db.test.update({"page": "/"}, {"$inc": {"count": 1}}, upsert=True) - - self.assertEqual(1, db.test.count()) - self.assertEqual(2, db.test.find_one()["count"]) - - def test_acknowledged_update(self): - # Tests legacy update. - db = self.db - db.drop_collection("test_acknowledged_update") - collection = db.test_acknowledged_update - collection.create_index("x", unique=True) - - collection.insert({"x": 5}) - _id = collection.insert({"x": 4}) - - self.assertEqual( - None, collection.update({"_id": _id}, {"$inc": {"x": 1}}, w=0)) - - self.assertRaises(DuplicateKeyError, collection.update, - {"_id": _id}, {"$inc": {"x": 1}}) - - self.assertEqual(1, collection.update({"_id": _id}, - {"$inc": {"x": 2}})["n"]) - - self.assertEqual(0, collection.update({"_id": "foo"}, - {"$inc": {"x": 2}})["n"]) - db.drop_collection("test_acknowledged_update") - - def test_update_backward_compat(self): - # MongoDB versions >= 2.6.0 don't return the updatedExisting field - # and return upsert _id in an array subdocument. This test should - # pass regardless of server version or type (mongod/s). - # Tests legacy update. - c = self.db.test - c.drop() - oid = ObjectId() - res = c.update({'_id': oid}, {'$set': {'a': 'a'}}, upsert=True) - self.assertFalse(res.get('updatedExisting')) - self.assertEqual(oid, res.get('upserted')) - - res = c.update({'_id': oid}, {'$set': {'b': 'b'}}) - self.assertTrue(res.get('updatedExisting')) - - def test_save(self): - # Tests legacy save. - self.db.drop_collection("test_save") - collection = self.db.test_save - - # Save a doc with autogenerated id - _id = collection.save({"hello": "world"}) - self.assertEqual(collection.find_one()["_id"], _id) - self.assertTrue(isinstance(_id, ObjectId)) - - # Save a doc with explicit id - collection.save({"_id": "explicit_id", "hello": "bar"}) - doc = collection.find_one({"_id": "explicit_id"}) - self.assertEqual(doc['_id'], 'explicit_id') - self.assertEqual(doc['hello'], 'bar') - - # Save docs with _id field already present (shouldn't create new docs) - self.assertEqual(2, collection.count()) - collection.save({'_id': _id, 'hello': 'world'}) - self.assertEqual(2, collection.count()) - collection.save({'_id': 'explicit_id', 'hello': 'baz'}) - self.assertEqual(2, collection.count()) - self.assertEqual( - 'baz', - collection.find_one({'_id': 'explicit_id'})['hello'] - ) - - # Acknowledged mode. - collection.create_index("hello", unique=True) - # No exception, even though we duplicate the first doc's "hello" value - collection.save({'_id': 'explicit_id', 'hello': 'world'}, w=0) - - self.assertRaises( - DuplicateKeyError, - collection.save, - {'_id': 'explicit_id', 'hello': 'world'}) - self.db.drop_collection("test") - - def test_save_with_invalid_key(self): - if client_context.version.at_least(3, 5, 8): - raise SkipTest("MongoDB >= 3.5.8 allows dotted fields in updates") - # Tests legacy save. - self.db.drop_collection("test") - self.assertTrue(self.db.test.insert({"hello": "world"})) - doc = self.db.test.find_one() - doc['a.b'] = 'c' - self.assertRaises(OperationFailure, self.db.test.save, doc) - - def test_acknowledged_save(self): - # Tests legacy save. - db = self.db - db.drop_collection("test_acknowledged_save") - collection = db.test_acknowledged_save - collection.create_index("hello", unique=True) - - collection.save({"hello": "world"}) - collection.save({"hello": "world"}, w=0) - self.assertRaises(DuplicateKeyError, collection.save, - {"hello": "world"}) - db.drop_collection("test_acknowledged_save") - - def test_save_adds_id(self): - # Tests legacy save. - doc = {"hello": "jesse"} - self.db.test.save(doc) - self.assertTrue("_id" in doc) - - def test_save_returns_id(self): - doc = {"hello": "jesse"} - _id = self.db.test.save(doc) - self.assertTrue(isinstance(_id, ObjectId)) - self.assertEqual(_id, doc["_id"]) - doc["hi"] = "bernie" - _id = self.db.test.save(doc) - self.assertTrue(isinstance(_id, ObjectId)) - self.assertEqual(_id, doc["_id"]) - - def test_remove_one(self): - # Tests legacy remove. - self.db.test.remove() - self.assertEqual(0, self.db.test.count()) - - self.db.test.insert({"x": 1}) - self.db.test.insert({"y": 1}) - self.db.test.insert({"z": 1}) - self.assertEqual(3, self.db.test.count()) - - self.db.test.remove(multi=False) - self.assertEqual(2, self.db.test.count()) - self.db.test.remove() - self.assertEqual(0, self.db.test.count()) - - def test_remove_all(self): - # Tests legacy remove. - self.db.test.remove() - self.assertEqual(0, self.db.test.count()) - - self.db.test.insert({"x": 1}) - self.db.test.insert({"y": 1}) - self.assertEqual(2, self.db.test.count()) - - self.db.test.remove() - self.assertEqual(0, self.db.test.count()) - - def test_remove_non_objectid(self): - # Tests legacy remove. - db = self.db - db.drop_collection("test") - - db.test.insert_one({"_id": 5}) - - self.assertEqual(1, db.test.count()) - db.test.remove(5) - self.assertEqual(0, db.test.count()) - - def test_write_large_document(self): - # Tests legacy insert, save, and update. - max_size = self.db.client.max_bson_size - half_size = int(max_size / 2) - self.assertEqual(max_size, 16777216) - - self.assertRaises(OperationFailure, self.db.test.insert, - {"foo": "x" * max_size}) - self.assertRaises(OperationFailure, self.db.test.save, - {"foo": "x" * max_size}) - self.assertRaises(OperationFailure, self.db.test.insert, - [{"x": 1}, {"foo": "x" * max_size}]) - self.db.test.insert([{"foo": "x" * half_size}, - {"foo": "x" * half_size}]) - - self.db.test.insert({"bar": "x"}) - # Use w=0 here to test legacy doc size checking in all server versions - self.assertRaises(DocumentTooLarge, self.db.test.update, - {"bar": "x"}, {"bar": "x" * (max_size - 14)}, w=0) - # This will pass with OP_UPDATE or the update command. - self.db.test.update({"bar": "x"}, {"bar": "x" * (max_size - 32)}) - - def test_last_error_options(self): - # Tests legacy write methods. - self.db.test.save({"x": 1}, w=1, wtimeout=1) - self.db.test.insert({"x": 1}, w=1, wtimeout=1) - self.db.test.remove({"x": 1}, w=1, wtimeout=1) - self.db.test.update({"x": 1}, {"y": 2}, w=1, wtimeout=1) - - if client_context.replica_set_name: - # client_context.w is the number of hosts in the replica set - w = client_context.w + 1 - - # MongoDB 2.8+ raises error code 100, CannotSatisfyWriteConcern, - # if w > number of members. Older versions just time out after 1 ms - # as if they had enough secondaries but some are lagging. They - # return an error with 'wtimeout': True and no code. - def wtimeout_err(f, *args, **kwargs): - try: - f(*args, **kwargs) - except WTimeoutError as exc: - self.assertIsNotNone(exc.details) - except OperationFailure as exc: - self.assertIsNotNone(exc.details) - self.assertEqual(100, exc.code, - "Unexpected error: %r" % exc) - else: - self.fail("%s should have failed" % f) - - coll = self.db.test - wtimeout_err(coll.save, {"x": 1}, w=w, wtimeout=1) - wtimeout_err(coll.insert, {"x": 1}, w=w, wtimeout=1) - wtimeout_err(coll.update, {"x": 1}, {"y": 2}, w=w, wtimeout=1) - wtimeout_err(coll.remove, {"x": 1}, w=w, wtimeout=1) - - # can't use fsync and j options together - self.assertRaises(ConfigurationError, self.db.test.insert, - {"_id": 1}, j=True, fsync=True) - - def test_find_and_modify(self): - c = self.db.test - c.drop() - c.insert({'_id': 1, 'i': 1}) - - # Test that we raise DuplicateKeyError when appropriate. - c.ensure_index('i', unique=True) - self.assertRaises(DuplicateKeyError, - c.find_and_modify, query={'i': 1, 'j': 1}, - update={'$set': {'k': 1}}, upsert=True) - c.drop_indexes() - - # Test correct findAndModify - self.assertEqual({'_id': 1, 'i': 1}, - c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}})) - self.assertEqual({'_id': 1, 'i': 3}, - c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, - new=True)) - - self.assertEqual({'_id': 1, 'i': 3}, - c.find_and_modify({'_id': 1}, remove=True)) - - self.assertEqual(None, c.find_one({'_id': 1})) - - self.assertEqual(None, - c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}})) - self.assertEqual(None, c.find_and_modify({'_id': 1}, - {'$inc': {'i': 1}}, - upsert=True)) - self.assertEqual({'_id': 1, 'i': 2}, - c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, - upsert=True, new=True)) - - self.assertEqual({'_id': 1, 'i': 2}, - c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, - fields=['i'])) - self.assertEqual({'_id': 1, 'i': 4}, - c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, - new=True, fields={'i': 1})) - - # Test with full_response=True. - result = c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, - new=True, upsert=True, - full_response=True, - fields={'i': 1}) - self.assertEqual({'_id': 1, 'i': 5}, result["value"]) - self.assertEqual(True, - result["lastErrorObject"]["updatedExisting"]) - - result = c.find_and_modify({'_id': 2}, {'$inc': {'i': 1}}, - new=True, upsert=True, - full_response=True, - fields={'i': 1}) - self.assertEqual({'_id': 2, 'i': 1}, result["value"]) - self.assertEqual(False, - result["lastErrorObject"]["updatedExisting"]) - - class ExtendedDict(dict): - pass - - result = c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, - new=True, fields={'i': 1}) - self.assertFalse(isinstance(result, ExtendedDict)) - c = self.db.get_collection( - "test", codec_options=CodecOptions(document_class=ExtendedDict)) - result = c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, - new=True, fields={'i': 1}) - self.assertTrue(isinstance(result, ExtendedDict)) - - def test_find_and_modify_with_sort(self): - c = self.db.test - c.drop() - for j in range(5): - c.insert({'j': j, 'i': 0}) - - sort = {'j': DESCENDING} - self.assertEqual(4, c.find_and_modify({}, - {'$inc': {'i': 1}}, - sort=sort)['j']) - sort = {'j': ASCENDING} - self.assertEqual(0, c.find_and_modify({}, - {'$inc': {'i': 1}}, - sort=sort)['j']) - sort = [('j', DESCENDING)] - self.assertEqual(4, c.find_and_modify({}, - {'$inc': {'i': 1}}, - sort=sort)['j']) - sort = [('j', ASCENDING)] - self.assertEqual(0, c.find_and_modify({}, - {'$inc': {'i': 1}}, - sort=sort)['j']) - sort = SON([('j', DESCENDING)]) - self.assertEqual(4, c.find_and_modify({}, - {'$inc': {'i': 1}}, - sort=sort)['j']) - sort = SON([('j', ASCENDING)]) - self.assertEqual(0, c.find_and_modify({}, - {'$inc': {'i': 1}}, - sort=sort)['j']) - - try: - from collections import OrderedDict - sort = OrderedDict([('j', DESCENDING)]) - self.assertEqual(4, c.find_and_modify({}, - {'$inc': {'i': 1}}, - sort=sort)['j']) - sort = OrderedDict([('j', ASCENDING)]) - self.assertEqual(0, c.find_and_modify({}, - {'$inc': {'i': 1}}, - sort=sort)['j']) - except ImportError: - pass - # Test that a standard dict with two keys is rejected. - sort = {'j': DESCENDING, 'foo': DESCENDING} - self.assertRaises(TypeError, c.find_and_modify, - {}, {'$inc': {'i': 1}}, sort=sort) - - def test_find_and_modify_with_manipulator(self): - class AddCollectionNameManipulator(SONManipulator): - def will_copy(self): - return True - - def transform_incoming(self, son, dummy): - copy = SON(son) - if 'collection' in copy: - del copy['collection'] - return copy - - def transform_outgoing(self, son, collection): - copy = SON(son) - copy['collection'] = collection.name - return copy - - db = self.client.pymongo_test - db.add_son_manipulator(AddCollectionNameManipulator()) - - c = db.test - c.drop() - c.insert({'_id': 1, 'i': 1}) - - # Test correct findAndModify - # With manipulators - self.assertEqual({'_id': 1, 'i': 1, 'collection': 'test'}, - c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, - manipulate=True)) - self.assertEqual({'_id': 1, 'i': 3, 'collection': 'test'}, - c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, - new=True, manipulate=True)) - # With out manipulators - self.assertEqual({'_id': 1, 'i': 3}, - c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}})) - self.assertEqual({'_id': 1, 'i': 5}, - c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, - new=True)) - - @client_context.require_version_max(4, 1, 0, -1) - def test_group(self): - db = self.db - db.drop_collection("test") - - self.assertEqual([], - db.test.group([], {}, {"count": 0}, - "function (obj, prev) { prev.count++; }" - )) - - db.test.insert_many([{"a": 2}, {"b": 5}, {"a": 1}]) - - self.assertEqual([{"count": 3}], - db.test.group([], {}, {"count": 0}, - "function (obj, prev) { prev.count++; }" - )) - - self.assertEqual([{"count": 1}], - db.test.group([], {"a": {"$gt": 1}}, {"count": 0}, - "function (obj, prev) { prev.count++; }" - )) - - db.test.insert_one({"a": 2, "b": 3}) - - self.assertEqual([{"a": 2, "count": 2}, - {"a": None, "count": 1}, - {"a": 1, "count": 1}], - db.test.group(["a"], {}, {"count": 0}, - "function (obj, prev) { prev.count++; }" - )) - - # modifying finalize - self.assertEqual([{"a": 2, "count": 3}, - {"a": None, "count": 2}, - {"a": 1, "count": 2}], - db.test.group(["a"], {}, {"count": 0}, - "function (obj, prev) " - "{ prev.count++; }", - "function (obj) { obj.count++; }")) - - # returning finalize - self.assertEqual([2, 1, 1], - db.test.group(["a"], {}, {"count": 0}, - "function (obj, prev) " - "{ prev.count++; }", - "function (obj) { return obj.count; }")) - - # keyf - self.assertEqual([2, 2], - db.test.group("function (obj) { if (obj.a == 2) " - "{ return {a: true} }; " - "return {b: true}; }", {}, {"count": 0}, - "function (obj, prev) " - "{ prev.count++; }", - "function (obj) { return obj.count; }")) - - # no key - self.assertEqual([{"count": 4}], - db.test.group(None, {}, {"count": 0}, - "function (obj, prev) { prev.count++; }" - )) - - self.assertRaises(OperationFailure, db.test.group, - [], {}, {}, "5 ++ 5") - - @client_context.require_version_max(4, 1, 0, -1) - def test_group_with_scope(self): - db = self.db - db.drop_collection("test") - db.test.insert_many([{"a": 1}, {"b": 1}]) - - reduce_function = "function (obj, prev) { prev.count += inc_value; }" - - self.assertEqual(2, db.test.group([], {}, {"count": 0}, - Code(reduce_function, - {"inc_value": 1}))[0]['count']) - self.assertEqual(4, db.test.group([], {}, {"count": 0}, - Code(reduce_function, - {"inc_value": 2}))[0]['count']) - - self.assertEqual(1, - db.test.group([], {}, {"count": 0}, - Code(reduce_function, - {"inc_value": 0.5}))[0]['count']) - - self.assertEqual(2, db.test.group( - [], {}, {"count": 0}, - Code(reduce_function, {"inc_value": 1}))[0]['count']) - - self.assertEqual(4, db.test.group( - [], {}, {"count": 0}, - Code(reduce_function, {"inc_value": 2}))[0]['count']) - - self.assertEqual(1, db.test.group( - [], {}, {"count": 0}, - Code(reduce_function, {"inc_value": 0.5}))[0]['count']) - - @client_context.require_version_max(4, 1, 0, -1) - def test_group_uuid_representation(self): - db = self.db - coll = db.uuid - coll.drop() - uu = uuid.uuid4() - coll.insert_one({"_id": uu, "a": 2}) - coll.insert_one({"_id": uuid.uuid4(), "a": 1}) - - reduce = "function (obj, prev) { prev.count++; }" - coll = self.db.get_collection( - "uuid", CodecOptions(uuid_representation=STANDARD)) - self.assertEqual([], - coll.group([], {"_id": uu}, - {"count": 0}, reduce)) - coll = self.db.get_collection( - "uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) - self.assertEqual([{"count": 1}], - coll.group([], {"_id": uu}, - {"count": 0}, reduce)) - - def test_last_status(self): - # Tests many legacy API elements. - # We must call getlasterror on same socket as the last operation. - db = rs_or_single_client(maxPoolSize=1).pymongo_test - collection = db.test_last_status - collection.remove({}) - collection.save({"i": 1}) - - collection.update({"i": 1}, {"$set": {"i": 2}}, w=0) - # updatedExisting is always false on mongos after an OP_MSG - # unacknowledged write. - if not (client_context.version >= (3, 6) and client_context.is_mongos): - self.assertTrue(db.last_status()["updatedExisting"]) - wait_until(lambda: collection.find_one({"i": 2}), - "found updated w=0 doc") - - collection.update({"i": 1}, {"$set": {"i": 500}}, w=0) - self.assertFalse(db.last_status()["updatedExisting"]) - - def test_auto_ref_and_deref(self): - # Legacy API. - db = self.client.pymongo_test - db.add_son_manipulator(AutoReference(db)) - db.add_son_manipulator(NamespaceInjector()) - - db.test.a.remove({}) - db.test.b.remove({}) - db.test.c.remove({}) - - a = {"hello": u"world"} - db.test.a.save(a) - - b = {"test": a} - db.test.b.save(b) - - c = {"another test": b} - db.test.c.save(c) - - a["hello"] = "mike" - db.test.a.save(a) - - self.assertEqual(db.test.a.find_one(), a) - self.assertEqual(db.test.b.find_one()["test"], a) - self.assertEqual(db.test.c.find_one()["another test"]["test"], a) - self.assertEqual(db.test.b.find_one(), b) - self.assertEqual(db.test.c.find_one()["another test"], b) - self.assertEqual(db.test.c.find_one(), c) - - def test_auto_ref_and_deref_list(self): - # Legacy API. - db = self.client.pymongo_test - db.add_son_manipulator(AutoReference(db)) - db.add_son_manipulator(NamespaceInjector()) - - db.drop_collection("users") - db.drop_collection("messages") - - message_1 = {"title": "foo"} - db.messages.save(message_1) - message_2 = {"title": "bar"} - db.messages.save(message_2) - - user = {"messages": [message_1, message_2]} - db.users.save(user) - db.messages.update(message_1, {"title": "buzz"}) - - self.assertEqual("buzz", db.users.find_one()["messages"][0]["title"]) - self.assertEqual("bar", db.users.find_one()["messages"][1]["title"]) - - def test_object_to_dict_transformer(self): - # PYTHON-709: Some users rely on their custom SONManipulators to run - # before any other checks, so they can insert non-dict objects and - # have them dictified before the _id is inserted or any other - # processing. - # Tests legacy API elements. - class Thing(object): - def __init__(self, value): - self.value = value - - class ThingTransformer(SONManipulator): - def transform_incoming(self, thing, dummy): - return {'value': thing.value} - - db = self.client.foo - db.add_son_manipulator(ThingTransformer()) - t = Thing('value') - - db.test.remove() - db.test.insert([t]) - out = db.test.find_one() - self.assertEqual('value', out.get('value')) - - def test_son_manipulator_outgoing(self): - class Thing(object): - def __init__(self, value): - self.value = value - - class ThingTransformer(SONManipulator): - def transform_outgoing(self, doc, collection): - # We don't want this applied to the command return - # value in pymongo.cursor.Cursor. - if 'value' in doc: - return Thing(doc['value']) - return doc - - db = self.client.foo - db.add_son_manipulator(ThingTransformer()) - - db.test.delete_many({}) - db.test.insert_one({'value': 'value'}) - out = db.test.find_one() - self.assertTrue(isinstance(out, Thing)) - self.assertEqual('value', out.value) - - out = next(db.test.aggregate([], cursor={})) - self.assertTrue(isinstance(out, Thing)) - self.assertEqual('value', out.value) - - def test_son_manipulator_inheritance(self): - # Tests legacy API elements. - class Thing(object): - def __init__(self, value): - self.value = value - - class ThingTransformer(SONManipulator): - def transform_incoming(self, thing, dummy): - return {'value': thing.value} - - def transform_outgoing(self, son, dummy): - return Thing(son['value']) - - class Child(ThingTransformer): - pass - - db = self.client.foo - db.add_son_manipulator(Child()) - t = Thing('value') - - db.test.remove() - db.test.insert([t]) - out = db.test.find_one() - self.assertTrue(isinstance(out, Thing)) - self.assertEqual('value', out.value) - - def test_disabling_manipulators(self): - - class IncByTwo(SONManipulator): - def transform_outgoing(self, son, collection): - if 'foo' in son: - son['foo'] += 2 - return son - - db = self.client.pymongo_test - db.add_son_manipulator(IncByTwo()) - c = db.test - c.drop() - c.insert({'foo': 0}) - self.assertEqual(2, c.find_one()['foo']) - self.assertEqual(0, c.find_one(manipulate=False)['foo']) - - self.assertEqual(2, c.find_one(manipulate=True)['foo']) - c.drop() - - def test_manipulator_properties(self): - db = self.client.foo - self.assertEqual([], db.incoming_manipulators) - self.assertEqual([], db.incoming_copying_manipulators) - self.assertEqual([], db.outgoing_manipulators) - self.assertEqual([], db.outgoing_copying_manipulators) - db.add_son_manipulator(AutoReference(db)) - db.add_son_manipulator(NamespaceInjector()) - db.add_son_manipulator(ObjectIdShuffler()) - self.assertEqual(1, len(db.incoming_manipulators)) - self.assertEqual(db.incoming_manipulators, ['NamespaceInjector']) - self.assertEqual(2, len(db.incoming_copying_manipulators)) - for name in db.incoming_copying_manipulators: - self.assertTrue(name in ('ObjectIdShuffler', 'AutoReference')) - self.assertEqual([], db.outgoing_manipulators) - self.assertEqual(['AutoReference'], - db.outgoing_copying_manipulators) - - def test_ensure_index(self): - db = self.db - - self.assertRaises(TypeError, db.test.ensure_index, {"hello": 1}) - self.assertRaises(TypeError, - db.test.ensure_index, {"hello": 1}, cache_for='foo') - - db.test.drop_indexes() - - self.assertEqual("goodbye_1", - db.test.ensure_index("goodbye")) - self.assertEqual(None, db.test.ensure_index("goodbye")) - - db.test.drop_indexes() - self.assertEqual("foo", - db.test.ensure_index("goodbye", name="foo")) - self.assertEqual(None, db.test.ensure_index("goodbye", name="foo")) - - db.test.drop_indexes() - self.assertEqual("goodbye_1", - db.test.ensure_index("goodbye")) - self.assertEqual(None, db.test.ensure_index("goodbye")) - - db.test.drop_index("goodbye_1") - self.assertEqual("goodbye_1", - db.test.ensure_index("goodbye")) - self.assertEqual(None, db.test.ensure_index("goodbye")) - - db.drop_collection("test") - self.assertEqual("goodbye_1", - db.test.ensure_index("goodbye")) - self.assertEqual(None, db.test.ensure_index("goodbye")) - - db.test.drop_index("goodbye_1") - self.assertEqual("goodbye_1", - db.test.ensure_index("goodbye")) - self.assertEqual(None, db.test.ensure_index("goodbye")) - - db.test.drop_index("goodbye_1") - self.assertEqual("goodbye_1", - db.test.ensure_index("goodbye", cache_for=1)) - time.sleep(1.2) - self.assertEqual("goodbye_1", - db.test.ensure_index("goodbye")) - # Make sure the expiration time is updated. - self.assertEqual(None, - db.test.ensure_index("goodbye")) - - # Clean up indexes for later tests - db.test.drop_indexes() - - @client_context.require_version_max(4, 1) # PYTHON-1734 - def test_ensure_index_threaded(self): - coll = self.db.threaded_index_creation - index_docs = [] - - class Indexer(threading.Thread): - def run(self): - coll.ensure_index('foo0') - coll.ensure_index('foo1') - coll.ensure_index('foo2') - index_docs.append(coll.index_information()) - - try: - threads = [] - for _ in range(10): - t = Indexer() - t.setDaemon(True) - threads.append(t) - - for thread in threads: - thread.start() - - joinall(threads) - - first = index_docs[0] - for index_doc in index_docs[1:]: - self.assertEqual(index_doc, first) - finally: - coll.drop() - - def test_ensure_purge_index_threaded(self): - coll = self.db.threaded_index_creation - - class Indexer(threading.Thread): - def run(self): - coll.ensure_index('foo') - try: - coll.drop_index('foo') - except OperationFailure: - # The index may have already been dropped. - pass - coll.ensure_index('foo') - coll.drop_indexes() - coll.create_index('foo') - - try: - threads = [] - for _ in range(10): - t = Indexer() - t.setDaemon(True) - threads.append(t) - - for thread in threads: - thread.start() - - joinall(threads) - - self.assertTrue('foo_1' in coll.index_information()) - finally: - coll.drop() - - @client_context.require_version_max(4, 1) # PYTHON-1734 - def test_ensure_unique_index_threaded(self): - coll = self.db.test_unique_threaded - coll.drop() - coll.insert_many([{'foo': i} for i in range(10000)]) - - class Indexer(threading.Thread): - def run(self): - try: - coll.ensure_index('foo', unique=True) - coll.insert_one({'foo': 'bar'}) - coll.insert_one({'foo': 'bar'}) - except OperationFailure: - pass - - threads = [] - for _ in range(10): - t = Indexer() - t.setDaemon(True) - threads.append(t) - - for i in range(10): - threads[i].start() - - joinall(threads) - - self.assertEqual(10001, coll.count()) - coll.drop() - - def test_kill_cursors_with_cursoraddress(self): - coll = self.client.pymongo_test.test - coll.drop() - - coll.insert_many([{'_id': i} for i in range(200)]) - cursor = coll.find().batch_size(1) - next(cursor) - self.client.kill_cursors( - [cursor.cursor_id], - _CursorAddress(self.client.address, coll.full_name)) - - # Prevent killcursors from reaching the server while a getmore is in - # progress -- the server logs "Assertion: 16089:Cannot kill active - # cursor." - time.sleep(2) - - def raises_cursor_not_found(): - try: - next(cursor) - return False - except CursorNotFound: - return True - - wait_until(raises_cursor_not_found, 'close cursor') - - def test_kill_cursors_with_tuple(self): - # Some evergreen distros (Debian 7.1) still test against 3.6.5 where - # OP_KILL_CURSORS does not work. - if (client_context.is_mongos and client_context.auth_enabled and - (3, 6, 0) <= client_context.version < (3, 6, 6)): - raise SkipTest("SERVER-33553 This server version does not support " - "OP_KILL_CURSORS") - - coll = self.client.pymongo_test.test - coll.drop() - - coll.insert_many([{'_id': i} for i in range(200)]) - cursor = coll.find().batch_size(1) - next(cursor) - self.client.kill_cursors( - [cursor.cursor_id], - self.client.address) - - # Prevent killcursors from reaching the server while a getmore is in - # progress -- the server logs "Assertion: 16089:Cannot kill active - # cursor." - time.sleep(2) - - def raises_cursor_not_found(): - try: - next(cursor) - return False - except CursorNotFound: - return True - - wait_until(raises_cursor_not_found, 'close cursor') - - -class TestLegacyBulk(BulkTestBase): - - @classmethod - def setUpClass(cls): - super(TestLegacyBulk, cls).setUpClass() - cls.deprecation_filter = DeprecationFilter() - - @classmethod - def tearDownClass(cls): - cls.deprecation_filter.stop() - - def test_empty(self): - bulk = self.coll.initialize_ordered_bulk_op() - self.assertRaises(InvalidOperation, bulk.execute) - - def test_find(self): - # find() requires a selector. - bulk = self.coll.initialize_ordered_bulk_op() - self.assertRaises(TypeError, bulk.find) - self.assertRaises(TypeError, bulk.find, 'foo') - # No error. - bulk.find({}) - - @client_context.require_version_min(3, 1, 9, -1) - def test_bypass_document_validation_bulk_op(self): - - # Test insert - self.coll.insert_one({"z": 0}) - self.db.command(SON([("collMod", "test"), - ("validator", {"z": {"$gte": 0}})])) - bulk = self.coll.initialize_ordered_bulk_op( - bypass_document_validation=False) - bulk.insert({"z": -1}) # error - self.assertRaises(BulkWriteError, bulk.execute) - self.assertEqual(0, self.coll.count({"z": -1})) - - bulk = self.coll.initialize_ordered_bulk_op( - bypass_document_validation=True) - bulk.insert({"z": -1}) - bulk.execute() - self.assertEqual(1, self.coll.count({"z": -1})) - - self.coll.insert_one({"z": 0}) - self.db.command(SON([("collMod", "test"), - ("validator", {"z": {"$gte": 0}})])) - bulk = self.coll.initialize_unordered_bulk_op( - bypass_document_validation=False) - bulk.insert({"z": -1}) # error - self.assertRaises(BulkWriteError, bulk.execute) - self.assertEqual(1, self.coll.count({"z": -1})) - - bulk = self.coll.initialize_unordered_bulk_op( - bypass_document_validation=True) - bulk.insert({"z": -1}) - bulk.execute() - self.assertEqual(2, self.coll.count({"z": -1})) - self.coll.drop() - - def test_insert(self): - expected = { - 'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 1, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': [] - } - - bulk = self.coll.initialize_ordered_bulk_op() - self.assertRaises(TypeError, bulk.insert, 1) - - # find() before insert() is prohibited. - self.assertRaises(AttributeError, lambda: bulk.find({}).insert({})) - - # We don't allow multiple documents per call. - self.assertRaises(TypeError, bulk.insert, [{}, {}]) - self.assertRaises(TypeError, bulk.insert, ({} for _ in range(2))) - - bulk.insert({}) - result = bulk.execute() - self.assertEqualResponse(expected, result) - - self.assertEqual(1, self.coll.count()) - doc = self.coll.find_one() - self.assertTrue(oid_generated_on_process(doc['_id'])) - - bulk = self.coll.initialize_unordered_bulk_op() - bulk.insert({}) - result = bulk.execute() - self.assertEqualResponse(expected, result) - - self.assertEqual(2, self.coll.count()) - - def test_insert_check_keys(self): - bulk = self.coll.initialize_ordered_bulk_op() - bulk.insert({'$dollar': 1}) - self.assertRaises(InvalidDocument, bulk.execute) - - bulk = self.coll.initialize_ordered_bulk_op() - bulk.insert({'a.b': 1}) - self.assertRaises(InvalidDocument, bulk.execute) - - def test_update(self): - - expected = { - 'nMatched': 2, - 'nModified': 2, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': [] - } - self.coll.insert_many([{}, {}]) - - bulk = self.coll.initialize_ordered_bulk_op() - - # update() requires find() first. - self.assertRaises( - AttributeError, - lambda: bulk.update({'$set': {'x': 1}})) - - self.assertRaises(TypeError, bulk.find({}).update, 1) - self.assertRaises(ValueError, bulk.find({}).update, {}) - - # All fields must be $-operators. - self.assertRaises(ValueError, bulk.find({}).update, {'foo': 'bar'}) - bulk.find({}).update({'$set': {'foo': 'bar'}}) - result = bulk.execute() - self.assertEqualResponse(expected, result) - self.assertEqual(self.coll.find({'foo': 'bar'}).count(), 2) - - # All fields must be $-operators -- validated server-side. - bulk = self.coll.initialize_ordered_bulk_op() - updates = SON([('$set', {'x': 1}), ('y', 1)]) - bulk.find({}).update(updates) - self.assertRaises(BulkWriteError, bulk.execute) - - self.coll.delete_many({}) - self.coll.insert_many([{}, {}]) - - bulk = self.coll.initialize_unordered_bulk_op() - bulk.find({}).update({'$set': {'bim': 'baz'}}) - result = bulk.execute() - self.assertEqualResponse( - {'nMatched': 2, - 'nModified': 2, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) - - self.assertEqual(self.coll.find({'bim': 'baz'}).count(), 2) - - self.coll.insert_one({'x': 1}) - bulk = self.coll.initialize_unordered_bulk_op() - bulk.find({'x': 1}).update({'$set': {'x': 42}}) - result = bulk.execute() - self.assertEqualResponse( - {'nMatched': 1, - 'nModified': 1, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) - - self.assertEqual(1, self.coll.find({'x': 42}).count()) - - # Second time, x is already 42 so nModified is 0. - bulk = self.coll.initialize_unordered_bulk_op() - bulk.find({'x': 42}).update({'$set': {'x': 42}}) - result = bulk.execute() - self.assertEqualResponse( - {'nMatched': 1, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) - - def test_update_one(self): - - expected = { - 'nMatched': 1, - 'nModified': 1, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': [] - } - - self.coll.insert_many([{}, {}]) - - bulk = self.coll.initialize_ordered_bulk_op() - - # update_one() requires find() first. - self.assertRaises( - AttributeError, - lambda: bulk.update_one({'$set': {'x': 1}})) - - self.assertRaises(TypeError, bulk.find({}).update_one, 1) - self.assertRaises(ValueError, bulk.find({}).update_one, {}) - self.assertRaises(ValueError, bulk.find({}).update_one, {'foo': 'bar'}) - bulk.find({}).update_one({'$set': {'foo': 'bar'}}) - result = bulk.execute() - self.assertEqualResponse(expected, result) - - self.assertEqual(self.coll.find({'foo': 'bar'}).count(), 1) - - self.coll.delete_many({}) - self.coll.insert_many([{}, {}]) - - bulk = self.coll.initialize_unordered_bulk_op() - bulk.find({}).update_one({'$set': {'bim': 'baz'}}) - result = bulk.execute() - self.assertEqualResponse(expected, result) - - self.assertEqual(self.coll.find({'bim': 'baz'}).count(), 1) - - # All fields must be $-operators -- validated server-side. - bulk = self.coll.initialize_ordered_bulk_op() - updates = SON([('$set', {'x': 1}), ('y', 1)]) - bulk.find({}).update_one(updates) - self.assertRaises(BulkWriteError, bulk.execute) - - def test_replace_one(self): - - expected = { - 'nMatched': 1, - 'nModified': 1, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': [] - } - - self.coll.insert_many([{}, {}]) - - bulk = self.coll.initialize_ordered_bulk_op() - self.assertRaises(TypeError, bulk.find({}).replace_one, 1) - self.assertRaises(ValueError, - bulk.find({}).replace_one, {'$set': {'foo': 'bar'}}) - bulk.find({}).replace_one({'foo': 'bar'}) - result = bulk.execute() - self.assertEqualResponse(expected, result) - - self.assertEqual(self.coll.find({'foo': 'bar'}).count(), 1) - - self.coll.delete_many({}) - self.coll.insert_many([{}, {}]) - - bulk = self.coll.initialize_unordered_bulk_op() - bulk.find({}).replace_one({'bim': 'baz'}) - result = bulk.execute() - self.assertEqualResponse(expected, result) - - self.assertEqual(self.coll.find({'bim': 'baz'}).count(), 1) - - def test_remove(self): - # Test removing all documents, ordered. - expected = { - 'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 2, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': [] - } - self.coll.insert_many([{}, {}]) - - bulk = self.coll.initialize_ordered_bulk_op() - - # remove() must be preceded by find(). - self.assertRaises(AttributeError, lambda: bulk.remove()) - bulk.find({}).remove() - result = bulk.execute() - self.assertEqualResponse(expected, result) - - self.assertEqual(self.coll.count(), 0) - - # Test removing some documents, ordered. - self.coll.insert_many([{}, {'x': 1}, {}, {'x': 1}]) - - bulk = self.coll.initialize_ordered_bulk_op() - - bulk.find({'x': 1}).remove() - result = bulk.execute() - self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 2, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) - - self.assertEqual(self.coll.count(), 2) - self.coll.delete_many({}) - - # Test removing all documents, unordered. - self.coll.insert_many([{}, {}]) - - bulk = self.coll.initialize_unordered_bulk_op() - bulk.find({}).remove() - result = bulk.execute() - self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 2, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) - - # Test removing some documents, unordered. - self.assertEqual(self.coll.count(), 0) - - self.coll.insert_many([{}, {'x': 1}, {}, {'x': 1}]) - - bulk = self.coll.initialize_unordered_bulk_op() - bulk.find({'x': 1}).remove() - result = bulk.execute() - self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 2, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) - - self.assertEqual(self.coll.count(), 2) - self.coll.delete_many({}) - - def test_remove_one(self): - - bulk = self.coll.initialize_ordered_bulk_op() - - # remove_one() must be preceded by find(). - self.assertRaises(AttributeError, lambda: bulk.remove_one()) - - # Test removing one document, empty selector. - # First ordered, then unordered. - self.coll.insert_many([{}, {}]) - expected = { - 'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 1, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': [] - } - - bulk.find({}).remove_one() - result = bulk.execute() - self.assertEqualResponse(expected, result) - - self.assertEqual(self.coll.count(), 1) - - self.coll.insert_one({}) - - bulk = self.coll.initialize_unordered_bulk_op() - bulk.find({}).remove_one() - result = bulk.execute() - self.assertEqualResponse(expected, result) - - self.assertEqual(self.coll.count(), 1) - - # Test removing one document, with a selector. - # First ordered, then unordered. - self.coll.insert_one({'x': 1}) - - bulk = self.coll.initialize_ordered_bulk_op() - bulk.find({'x': 1}).remove_one() - result = bulk.execute() - self.assertEqualResponse(expected, result) - - self.assertEqual([{}], list(self.coll.find({}, {'_id': False}))) - self.coll.insert_one({'x': 1}) - - bulk = self.coll.initialize_unordered_bulk_op() - bulk.find({'x': 1}).remove_one() - result = bulk.execute() - self.assertEqualResponse(expected, result) - - self.assertEqual([{}], list(self.coll.find({}, {'_id': False}))) - - def test_upsert(self): - bulk = self.coll.initialize_ordered_bulk_op() - - # upsert() requires find() first. - self.assertRaises( - AttributeError, - lambda: bulk.upsert()) - - expected = { - 'nMatched': 0, - 'nModified': 0, - 'nUpserted': 1, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [{'index': 0, '_id': '...'}] - } - - bulk.find({}).upsert().replace_one({'foo': 'bar'}) - result = bulk.execute() - self.assertEqualResponse(expected, result) - - bulk = self.coll.initialize_ordered_bulk_op() - bulk.find({}).upsert().update_one({'$set': {'bim': 'baz'}}) - result = bulk.execute() - self.assertEqualResponse( - {'nMatched': 1, - 'nModified': 1, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) - - self.assertEqual(self.coll.find({'bim': 'baz'}).count(), 1) - - bulk = self.coll.initialize_ordered_bulk_op() - bulk.find({}).upsert().update({'$set': {'bim': 'bop'}}) - # Non-upsert, no matches. - bulk.find({'x': 1}).update({'$set': {'x': 2}}) - result = bulk.execute() - self.assertEqualResponse( - {'nMatched': 1, - 'nModified': 1, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) - - self.assertEqual(self.coll.find({'bim': 'bop'}).count(), 1) - self.assertEqual(self.coll.find({'x': 2}).count(), 0) - - def test_upsert_large(self): - big = 'a' * (client_context.client.max_bson_size - 37) - bulk = self.coll.initialize_ordered_bulk_op() - bulk.find({'x': 1}).upsert().update({'$set': {'s': big}}) - result = bulk.execute() - self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 1, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [{'index': 0, '_id': '...'}]}, - result) - - self.assertEqual(1, self.coll.find({'x': 1}).count()) - - def test_client_generated_upsert_id(self): - batch = self.coll.initialize_ordered_bulk_op() - batch.find({'_id': 0}).upsert().update_one({'$set': {'a': 0}}) - batch.find({'a': 1}).upsert().replace_one({'_id': 1}) - # This is just here to make the counts right in all cases. - batch.find({'_id': 2}).upsert().replace_one({'_id': 2}) - result = batch.execute() - self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 3, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [{'index': 0, '_id': 0}, - {'index': 1, '_id': 1}, - {'index': 2, '_id': 2}]}, - result) - - def test_single_ordered_batch(self): - batch = self.coll.initialize_ordered_bulk_op() - batch.insert({'a': 1}) - batch.find({'a': 1}).update_one({'$set': {'b': 1}}) - batch.find({'a': 2}).upsert().update_one({'$set': {'b': 2}}) - batch.insert({'a': 3}) - batch.find({'a': 3}).remove() - result = batch.execute() - self.assertEqualResponse( - {'nMatched': 1, - 'nModified': 1, - 'nUpserted': 1, - 'nInserted': 2, - 'nRemoved': 1, - 'upserted': [{'index': 2, '_id': '...'}]}, - result) - - def test_single_error_ordered_batch(self): - self.coll.create_index('a', unique=True) - self.addCleanup(self.coll.drop_index, [('a', 1)]) - batch = self.coll.initialize_ordered_bulk_op() - batch.insert({'b': 1, 'a': 1}) - batch.find({'b': 2}).upsert().update_one({'$set': {'a': 1}}) - batch.insert({'b': 3, 'a': 2}) - - try: - batch.execute() - except BulkWriteError as exc: - result = exc.details - self.assertEqual(exc.code, 65) - else: - self.fail("Error not raised") - - self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 1, - 'nRemoved': 0, - 'upserted': [], - 'writeConcernErrors': [], - 'writeErrors': [ - {'index': 1, - 'code': 11000, - 'errmsg': '...', - 'op': {'q': {'b': 2}, - 'u': {'$set': {'a': 1}}, - 'multi': False, - 'upsert': True}}]}, - result) - - def test_multiple_error_ordered_batch(self): - self.coll.create_index('a', unique=True) - self.addCleanup(self.coll.drop_index, [('a', 1)]) - batch = self.coll.initialize_ordered_bulk_op() - batch.insert({'b': 1, 'a': 1}) - batch.find({'b': 2}).upsert().update_one({'$set': {'a': 1}}) - batch.find({'b': 3}).upsert().update_one({'$set': {'a': 2}}) - batch.find({'b': 2}).upsert().update_one({'$set': {'a': 1}}) - batch.insert({'b': 4, 'a': 3}) - batch.insert({'b': 5, 'a': 1}) - - try: - batch.execute() - except BulkWriteError as exc: - result = exc.details - self.assertEqual(exc.code, 65) - else: - self.fail("Error not raised") - - self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 1, - 'nRemoved': 0, - 'upserted': [], - 'writeConcernErrors': [], - 'writeErrors': [ - {'index': 1, - 'code': 11000, - 'errmsg': '...', - 'op': {'q': {'b': 2}, - 'u': {'$set': {'a': 1}}, - 'multi': False, - 'upsert': True}}]}, - result) - - def test_single_unordered_batch(self): - batch = self.coll.initialize_unordered_bulk_op() - batch.insert({'a': 1}) - batch.find({'a': 1}).update_one({'$set': {'b': 1}}) - batch.find({'a': 2}).upsert().update_one({'$set': {'b': 2}}) - batch.insert({'a': 3}) - batch.find({'a': 3}).remove() - result = batch.execute() - self.assertEqualResponse( - {'nMatched': 1, - 'nModified': 1, - 'nUpserted': 1, - 'nInserted': 2, - 'nRemoved': 1, - 'upserted': [{'index': 2, '_id': '...'}], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) - - def test_single_error_unordered_batch(self): - self.coll.create_index('a', unique=True) - self.addCleanup(self.coll.drop_index, [('a', 1)]) - batch = self.coll.initialize_unordered_bulk_op() - batch.insert({'b': 1, 'a': 1}) - batch.find({'b': 2}).upsert().update_one({'$set': {'a': 1}}) - batch.insert({'b': 3, 'a': 2}) - - try: - batch.execute() - except BulkWriteError as exc: - result = exc.details - self.assertEqual(exc.code, 65) - else: - self.fail("Error not raised") - - self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 2, - 'nRemoved': 0, - 'upserted': [], - 'writeConcernErrors': [], - 'writeErrors': [ - {'index': 1, - 'code': 11000, - 'errmsg': '...', - 'op': {'q': {'b': 2}, - 'u': {'$set': {'a': 1}}, - 'multi': False, - 'upsert': True}}]}, - result) - - def test_multiple_error_unordered_batch(self): - self.coll.create_index('a', unique=True) - self.addCleanup(self.coll.drop_index, [('a', 1)]) - batch = self.coll.initialize_unordered_bulk_op() - batch.insert({'b': 1, 'a': 1}) - batch.find({'b': 2}).upsert().update_one({'$set': {'a': 3}}) - batch.find({'b': 3}).upsert().update_one({'$set': {'a': 4}}) - batch.find({'b': 4}).upsert().update_one({'$set': {'a': 3}}) - batch.insert({'b': 5, 'a': 2}) - batch.insert({'b': 6, 'a': 1}) - - try: - batch.execute() - except BulkWriteError as exc: - result = exc.details - self.assertEqual(exc.code, 65) - else: - self.fail("Error not raised") - # Assume the update at index 1 runs before the update at index 3, - # although the spec does not require it. Same for inserts. - self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 2, - 'nInserted': 2, - 'nRemoved': 0, - 'upserted': [ - {'index': 1, '_id': '...'}, - {'index': 2, '_id': '...'}], - 'writeConcernErrors': [], - 'writeErrors': [ - {'index': 3, - 'code': 11000, - 'errmsg': '...', - 'op': {'q': {'b': 4}, - 'u': {'$set': {'a': 3}}, - 'multi': False, - 'upsert': True}}, - {'index': 5, - 'code': 11000, - 'errmsg': '...', - 'op': {'_id': '...', 'b': 6, 'a': 1}}]}, - result) - - def test_large_inserts_ordered(self): - big = 'x' * self.coll.database.client.max_bson_size - batch = self.coll.initialize_ordered_bulk_op() - batch.insert({'b': 1, 'a': 1}) - batch.insert({'big': big}) - batch.insert({'b': 2, 'a': 2}) - - try: - batch.execute() - except BulkWriteError as exc: - result = exc.details - self.assertEqual(exc.code, 65) - else: - self.fail("Error not raised") - - self.assertEqual(1, result['nInserted']) - - self.coll.delete_many({}) - - big = 'x' * (1024 * 1024 * 4) - batch = self.coll.initialize_ordered_bulk_op() - batch.insert({'a': 1, 'big': big}) - batch.insert({'a': 2, 'big': big}) - batch.insert({'a': 3, 'big': big}) - batch.insert({'a': 4, 'big': big}) - batch.insert({'a': 5, 'big': big}) - batch.insert({'a': 6, 'big': big}) - result = batch.execute() - - self.assertEqual(6, result['nInserted']) - self.assertEqual(6, self.coll.count()) - - def test_large_inserts_unordered(self): - big = 'x' * self.coll.database.client.max_bson_size - batch = self.coll.initialize_unordered_bulk_op() - batch.insert({'b': 1, 'a': 1}) - batch.insert({'big': big}) - batch.insert({'b': 2, 'a': 2}) - - try: - batch.execute() - except BulkWriteError as exc: - result = exc.details - self.assertEqual(exc.code, 65) - else: - self.fail("Error not raised") - - self.assertEqual(2, result['nInserted']) - - self.coll.delete_many({}) - - big = 'x' * (1024 * 1024 * 4) - batch = self.coll.initialize_ordered_bulk_op() - batch.insert({'a': 1, 'big': big}) - batch.insert({'a': 2, 'big': big}) - batch.insert({'a': 3, 'big': big}) - batch.insert({'a': 4, 'big': big}) - batch.insert({'a': 5, 'big': big}) - batch.insert({'a': 6, 'big': big}) - result = batch.execute() - - self.assertEqual(6, result['nInserted']) - self.assertEqual(6, self.coll.count()) - - def test_numerous_inserts(self): - # Ensure we don't exceed server's 1000-document batch size limit. - n_docs = 2100 - batch = self.coll.initialize_unordered_bulk_op() - for _ in range(n_docs): - batch.insert({}) - - result = batch.execute() - self.assertEqual(n_docs, result['nInserted']) - self.assertEqual(n_docs, self.coll.count()) - - # Same with ordered bulk. - self.coll.delete_many({}) - batch = self.coll.initialize_ordered_bulk_op() - for _ in range(n_docs): - batch.insert({}) - - result = batch.execute() - self.assertEqual(n_docs, result['nInserted']) - self.assertEqual(n_docs, self.coll.count()) - - def test_multiple_execution(self): - batch = self.coll.initialize_ordered_bulk_op() - batch.insert({}) - batch.execute() - self.assertRaises(InvalidOperation, batch.execute) - - def test_generator_insert(self): - def gen(): - yield {'a': 1, 'b': 1} - yield {'a': 1, 'b': 2} - yield {'a': 2, 'b': 3} - yield {'a': 3, 'b': 5} - yield {'a': 5, 'b': 8} - - result = self.coll.insert_many(gen()) - self.assertEqual(5, len(result.inserted_ids)) - - -class TestLegacyBulkNoResults(BulkTestBase): - - @classmethod - def setUpClass(cls): - super(TestLegacyBulkNoResults, cls).setUpClass() - cls.deprecation_filter = DeprecationFilter() - - @classmethod - def tearDownClass(cls): - cls.deprecation_filter.stop() - - def tearDown(self): - self.coll.delete_many({}) - - def test_no_results_ordered_success(self): - batch = self.coll.initialize_ordered_bulk_op() - batch.insert({'_id': 1}) - batch.find({'_id': 3}).upsert().update_one({'$set': {'b': 1}}) - batch.insert({'_id': 2}) - batch.find({'_id': 1}).remove_one() - self.assertTrue(batch.execute({'w': 0}) is None) - wait_until(lambda: 2 == self.coll.count(), - 'insert 2 documents') - wait_until(lambda: self.coll.find_one({'_id': 1}) is None, - 'removed {"_id": 1}') - - def test_no_results_ordered_failure(self): - batch = self.coll.initialize_ordered_bulk_op() - batch.insert({'_id': 1}) - batch.find({'_id': 3}).upsert().update_one({'$set': {'b': 1}}) - batch.insert({'_id': 2}) - # Fails with duplicate key error. - batch.insert({'_id': 1}) - # Should not be executed since the batch is ordered. - batch.find({'_id': 1}).remove_one() - self.assertTrue(batch.execute({'w': 0}) is None) - wait_until(lambda: 3 == self.coll.count(), - 'insert 3 documents') - self.assertEqual({'_id': 1}, self.coll.find_one({'_id': 1})) - - def test_no_results_unordered_success(self): - batch = self.coll.initialize_unordered_bulk_op() - batch.insert({'_id': 1}) - batch.find({'_id': 3}).upsert().update_one({'$set': {'b': 1}}) - batch.insert({'_id': 2}) - batch.find({'_id': 1}).remove_one() - self.assertTrue(batch.execute({'w': 0}) is None) - wait_until(lambda: 2 == self.coll.count(), - 'insert 2 documents') - wait_until(lambda: self.coll.find_one({'_id': 1}) is None, - 'removed {"_id": 1}') - - def test_no_results_unordered_failure(self): - batch = self.coll.initialize_unordered_bulk_op() - batch.insert({'_id': 1}) - batch.find({'_id': 3}).upsert().update_one({'$set': {'b': 1}}) - batch.insert({'_id': 2}) - # Fails with duplicate key error. - batch.insert({'_id': 1}) - # Should be executed since the batch is unordered. - batch.find({'_id': 1}).remove_one() - self.assertTrue(batch.execute({'w': 0}) is None) - wait_until(lambda: 2 == self.coll.count(), - 'insert 2 documents') - wait_until(lambda: self.coll.find_one({'_id': 1}) is None, - 'removed {"_id": 1}') - - -class TestLegacyBulkWriteConcern(BulkTestBase): - - @classmethod - def setUpClass(cls): - super(TestLegacyBulkWriteConcern, cls).setUpClass() - cls.w = client_context.w - cls.secondary = None - if cls.w > 1: - for member in client_context.ismaster['hosts']: - if member != client_context.ismaster['primary']: - cls.secondary = single_client(*partition_node(member)) - break - - # We tested wtimeout errors by specifying a write concern greater than - # the number of members, but in MongoDB 2.7.8+ this causes a different - # sort of error, "Not enough data-bearing nodes". In recent servers we - # use a failpoint to pause replication on a secondary. - cls.need_replication_stopped = client_context.version.at_least(2, 7, 8) - cls.deprecation_filter = DeprecationFilter() - - @classmethod - def tearDownClass(cls): - cls.deprecation_filter.stop() - - def cause_wtimeout(self, batch): - if self.need_replication_stopped: - if not client_context.test_commands_enabled: - raise SkipTest("Test commands must be enabled.") - - self.secondary.admin.command('configureFailPoint', - 'rsSyncApplyStop', - mode='alwaysOn') - - try: - return batch.execute({'w': self.w, 'wtimeout': 1}) - finally: - self.secondary.admin.command('configureFailPoint', - 'rsSyncApplyStop', - mode='off') - else: - return batch.execute({'w': self.w + 1, 'wtimeout': 1}) - - def test_fsync_and_j(self): - batch = self.coll.initialize_ordered_bulk_op() - batch.insert({'a': 1}) - self.assertRaises( - ConfigurationError, - batch.execute, {'fsync': True, 'j': True}) - - @client_context.require_replica_set - def test_write_concern_failure_ordered(self): - # Ensure we don't raise on wnote. - batch = self.coll.initialize_ordered_bulk_op() - batch.find({"something": "that does no exist"}).remove() - self.assertTrue(batch.execute({"w": self.w})) - - batch = self.coll.initialize_ordered_bulk_op() - batch.insert({'a': 1}) - batch.insert({'a': 2}) - - # Replication wtimeout is a 'soft' error. - # It shouldn't stop batch processing. - try: - self.cause_wtimeout(batch) - except BulkWriteError as exc: - result = exc.details - self.assertEqual(exc.code, 65) - else: - self.fail("Error not raised") - - self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 2, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': []}, - result) - - # When talking to legacy servers there will be a - # write concern error for each operation. - self.assertTrue(len(result['writeConcernErrors']) > 0) - - failed = result['writeConcernErrors'][0] - self.assertEqual(64, failed['code']) - self.assertTrue(isinstance(failed['errmsg'], string_type)) - - self.coll.delete_many({}) - self.coll.create_index('a', unique=True) - self.addCleanup(self.coll.drop_index, [('a', 1)]) - - # Fail due to write concern support as well - # as duplicate key error on ordered batch. - batch = self.coll.initialize_ordered_bulk_op() - batch.insert({'a': 1}) - batch.find({'a': 3}).upsert().replace_one({'b': 1}) - batch.insert({'a': 1}) - batch.insert({'a': 2}) - try: - self.cause_wtimeout(batch) - except BulkWriteError as exc: - result = exc.details - self.assertEqual(exc.code, 65) - else: - self.fail("Error not raised") - - self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 1, - 'nInserted': 1, - 'nRemoved': 0, - 'upserted': [{'index': 1, '_id': '...'}], - 'writeErrors': [ - {'index': 2, - 'code': 11000, - 'errmsg': '...', - 'op': {'_id': '...', 'a': 1}}]}, - result) - - self.assertTrue(len(result['writeConcernErrors']) > 1) - failed = result['writeErrors'][0] - self.assertTrue("duplicate" in failed['errmsg']) - - @client_context.require_replica_set - def test_write_concern_failure_unordered(self): - # Ensure we don't raise on wnote. - batch = self.coll.initialize_unordered_bulk_op() - batch.find({"something": "that does no exist"}).remove() - self.assertTrue(batch.execute({"w": self.w})) - - batch = self.coll.initialize_unordered_bulk_op() - batch.insert({'a': 1}) - batch.find({'a': 3}).upsert().update_one({'$set': {'a': 3, 'b': 1}}) - batch.insert({'a': 2}) - - # Replication wtimeout is a 'soft' error. - # It shouldn't stop batch processing. - try: - self.cause_wtimeout(batch) - except BulkWriteError as exc: - result = exc.details - self.assertEqual(exc.code, 65) - else: - self.fail("Error not raised") - - self.assertEqual(2, result['nInserted']) - self.assertEqual(1, result['nUpserted']) - self.assertEqual(0, len(result['writeErrors'])) - # When talking to legacy servers there will be a - # write concern error for each operation. - self.assertTrue(len(result['writeConcernErrors']) > 1) - - self.coll.delete_many({}) - self.coll.create_index('a', unique=True) - self.addCleanup(self.coll.drop_index, [('a', 1)]) - - # Fail due to write concern support as well - # as duplicate key error on unordered batch. - batch = self.coll.initialize_unordered_bulk_op() - batch.insert({'a': 1}) - batch.find({'a': 3}).upsert().update_one({'$set': {'a': 3, - 'b': 1}}) - batch.insert({'a': 1}) - batch.insert({'a': 2}) - try: - self.cause_wtimeout(batch) - except BulkWriteError as exc: - result = exc.details - self.assertEqual(exc.code, 65) - else: - self.fail("Error not raised") - - self.assertEqual(2, result['nInserted']) - self.assertEqual(1, result['nUpserted']) - self.assertEqual(1, len(result['writeErrors'])) - # When talking to legacy servers there will be a - # write concern error for each operation. - self.assertTrue(len(result['writeConcernErrors']) > 1) - - failed = result['writeErrors'][0] - self.assertEqual(2, failed['index']) - self.assertEqual(11000, failed['code']) - self.assertTrue(isinstance(failed['errmsg'], string_type)) - self.assertEqual(1, failed['op']['a']) - - failed = result['writeConcernErrors'][0] - self.assertEqual(64, failed['code']) - self.assertTrue(isinstance(failed['errmsg'], string_type)) - - upserts = result['upserted'] - self.assertEqual(1, len(upserts)) - self.assertEqual(1, upserts[0]['index']) - self.assertTrue(upserts[0].get('_id')) - - -class TestLegacyBulkAuthorization(BulkAuthorizationTestBase): - - @classmethod - def setUpClass(cls): - super(TestLegacyBulkAuthorization, cls).setUpClass() - cls.deprecation_filter = DeprecationFilter() - - @classmethod - def tearDownClass(cls): - cls.deprecation_filter.stop() - - def test_readonly(self): - # We test that an authorization failure aborts the batch and is raised - # as OperationFailure. - cli = rs_or_single_client_noauth() - db = cli.pymongo_test - coll = db.test - db.authenticate('readonly', 'pw') - bulk = coll.initialize_ordered_bulk_op() - bulk.insert({'x': 1}) - self.assertRaises(OperationFailure, bulk.execute) - - def test_no_remove(self): - # We test that an authorization failure aborts the batch and is raised - # as OperationFailure. - cli = rs_or_single_client_noauth() - db = cli.pymongo_test - coll = db.test - db.authenticate('noremove', 'pw') - bulk = coll.initialize_ordered_bulk_op() - bulk.insert({'x': 1}) - bulk.find({'x': 2}).upsert().replace_one({'x': 2}) - bulk.find({}).remove() # Prohibited. - bulk.insert({'x': 3}) # Never attempted. - self.assertRaises(OperationFailure, bulk.execute) - self.assertEqual(set([1, 2]), set(self.coll.distinct('x'))) - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_load_balancer.py b/test/test_load_balancer.py new file mode 100644 index 0000000000..0fb877652f --- /dev/null +++ b/test/test_load_balancer.py @@ -0,0 +1,173 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the Load Balancer unified spec tests.""" +from __future__ import annotations + +import gc +import os +import sys +import threading + +sys.path[0:0] = [""] + +from test import IntegrationTest, client_context, unittest +from test.unified_format import generate_test_classes +from test.utils import ExceptionCatchingThread, get_pool, rs_client, wait_until + +# Location of JSON test specifications. +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "load_balancer") + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + + +class TestLB(IntegrationTest): + RUN_ON_LOAD_BALANCER = True + RUN_ON_SERVERLESS = True + + def test_connections_are_only_returned_once(self): + if "PyPy" in sys.version: + # Tracked in PYTHON-3011 + self.skipTest("Test is flaky on PyPy") + pool = get_pool(self.client) + n_conns = len(pool.conns) + self.db.test.find_one({}) + self.assertEqual(len(pool.conns), n_conns) + list(self.db.test.aggregate([{"$limit": 1}])) + self.assertEqual(len(pool.conns), n_conns) + + @client_context.require_load_balancer + def test_unpin_committed_transaction(self): + client = rs_client() + self.addCleanup(client.close) + pool = get_pool(client) + coll = client[self.db.name].test + with client.start_session() as session: + with session.start_transaction(): + self.assertEqual(pool.active_sockets, 0) + coll.insert_one({}, session=session) + self.assertEqual(pool.active_sockets, 1) # Pinned. + self.assertEqual(pool.active_sockets, 1) # Still pinned. + self.assertEqual(pool.active_sockets, 0) # Unpinned. + + @client_context.require_failCommand_fail_point + def test_cursor_gc(self): + def create_resource(coll): + cursor = coll.find({}, batch_size=3) + next(cursor) + return cursor + + self._test_no_gc_deadlock(create_resource) + + @client_context.require_failCommand_fail_point + def test_command_cursor_gc(self): + def create_resource(coll): + cursor = coll.aggregate([], batchSize=3) + next(cursor) + return cursor + + self._test_no_gc_deadlock(create_resource) + + def _test_no_gc_deadlock(self, create_resource): + client = rs_client() + self.addCleanup(client.close) + pool = get_pool(client) + coll = client[self.db.name].test + coll.insert_many([{} for _ in range(10)]) + self.assertEqual(pool.active_sockets, 0) + # Cause the initial find attempt to fail to induce a reference cycle. + args = { + "mode": {"times": 1}, + "data": { + "failCommands": ["find", "aggregate"], + "closeConnection": True, + }, + } + with self.fail_point(args): + resource = create_resource(coll) + if client_context.load_balancer: + self.assertEqual(pool.active_sockets, 1) # Pinned. + + thread = PoolLocker(pool) + thread.start() + self.assertTrue(thread.locked.wait(5), "timed out") + # Garbage collect the resource while the pool is locked to ensure we + # don't deadlock. + del resource + # On PyPy it can take a few rounds to collect the cursor. + for _ in range(3): + gc.collect() + thread.unlock.set() + thread.join(5) + self.assertFalse(thread.is_alive()) + self.assertIsNone(thread.exc) + + wait_until(lambda: pool.active_sockets == 0, "return socket") + # Run another operation to ensure the socket still works. + coll.delete_many({}) + + @client_context.require_transactions + def test_session_gc(self): + client = rs_client() + self.addCleanup(client.close) + pool = get_pool(client) + session = client.start_session() + session.start_transaction() + client.test_session_gc.test.find_one({}, session=session) + # Cleanup the transaction left open on the server unless we're + # testing serverless which does not support killSessions. + if not client_context.serverless: + self.addCleanup(self.client.admin.command, "killSessions", [session.session_id]) + if client_context.load_balancer: + self.assertEqual(pool.active_sockets, 1) # Pinned. + + thread = PoolLocker(pool) + thread.start() + self.assertTrue(thread.locked.wait(5), "timed out") + # Garbage collect the session while the pool is locked to ensure we + # don't deadlock. + del session + # On PyPy it can take a few rounds to collect the session. + for _ in range(3): + gc.collect() + thread.unlock.set() + thread.join(5) + self.assertFalse(thread.is_alive()) + self.assertIsNone(thread.exc) + + wait_until(lambda: pool.active_sockets == 0, "return socket") + # Run another operation to ensure the socket still works. + client[self.db.name].test.delete_many({}) + + +class PoolLocker(ExceptionCatchingThread): + def __init__(self, pool): + super().__init__(target=self.lock_pool) + self.pool = pool + self.daemon = True + self.locked = threading.Event() + self.unlock = threading.Event() + + def lock_pool(self): + with self.pool.lock: + self.locked.set() + # Wait for the unlock flag. + unlock_pool = self.unlock.wait(10) + if not unlock_pool: + raise Exception("timed out waiting for unlock signal: deadlock?") + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_max_staleness.py b/test/test_max_staleness.py index c313be227d..a87cbad587 100644 --- a/test/test_max_staleness.py +++ b/test/test_max_staleness.py @@ -13,6 +13,7 @@ # limitations under the License. """Test maxStalenessSeconds support.""" +from __future__ import annotations import os import sys @@ -21,21 +22,19 @@ sys.path[0:0] = [""] -from pymongo import MongoClient -from pymongo.errors import ConfigurationError -from pymongo.server_selectors import writable_server_selector - from test import client_context, unittest from test.utils import rs_or_single_client from test.utils_selection_tests import create_selection_tests +from pymongo import MongoClient +from pymongo.errors import ConfigurationError +from pymongo.server_selectors import writable_server_selector + # Location of JSON test specifications. -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), - 'max_staleness') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "max_staleness") -class TestAllScenarios(create_selection_tests(_TEST_PATH)): +class TestAllScenarios(create_selection_tests(_TEST_PATH)): # type: ignore pass @@ -54,26 +53,21 @@ def test_max_staleness(self): with self.assertRaises(ConfigurationError): # Read pref "primary" can't be used with max staleness. - MongoClient("mongodb://a/?readPreference=primary&" - "maxStalenessSeconds=120") + MongoClient("mongodb://a/?readPreference=primary&maxStalenessSeconds=120") client = MongoClient("mongodb://host/?maxStalenessSeconds=-1") self.assertEqual(-1, client.read_preference.max_staleness) - client = MongoClient("mongodb://host/?readPreference=primary&" - "maxStalenessSeconds=-1") + client = MongoClient("mongodb://host/?readPreference=primary&maxStalenessSeconds=-1") self.assertEqual(-1, client.read_preference.max_staleness) - client = MongoClient("mongodb://host/?readPreference=secondary&" - "maxStalenessSeconds=120") + client = MongoClient("mongodb://host/?readPreference=secondary&maxStalenessSeconds=120") self.assertEqual(120, client.read_preference.max_staleness) - client = MongoClient("mongodb://a/?readPreference=secondary&" - "maxStalenessSeconds=1") + client = MongoClient("mongodb://a/?readPreference=secondary&maxStalenessSeconds=1") self.assertEqual(1, client.read_preference.max_staleness) - client = MongoClient("mongodb://a/?readPreference=secondary&" - "maxStalenessSeconds=-1") + client = MongoClient("mongodb://a/?readPreference=secondary&maxStalenessSeconds=-1") self.assertEqual(-1, client.read_preference.max_staleness) client = MongoClient(maxStalenessSeconds=-1, readPreference="nearest") @@ -85,15 +79,13 @@ def test_max_staleness(self): def test_max_staleness_float(self): with self.assertRaises(TypeError) as ctx: - rs_or_single_client(maxStalenessSeconds=1.5, - readPreference="nearest") + rs_or_single_client(maxStalenessSeconds=1.5, readPreference="nearest") self.assertIn("must be an integer", str(ctx.exception)) with warnings.catch_warnings(record=True) as ctx: warnings.simplefilter("always") - client = MongoClient("mongodb://host/?maxStalenessSeconds=1.5" - "&readPreference=nearest") + client = MongoClient("mongodb://host/?maxStalenessSeconds=1.5&readPreference=nearest") # Option was ignored. self.assertEqual(-1, client.read_preference.max_staleness) @@ -102,42 +94,42 @@ def test_max_staleness_float(self): def test_max_staleness_zero(self): # Zero is too small. with self.assertRaises(ValueError) as ctx: - rs_or_single_client(maxStalenessSeconds=0, - readPreference="nearest") + rs_or_single_client(maxStalenessSeconds=0, readPreference="nearest") self.assertIn("must be a positive integer", str(ctx.exception)) with warnings.catch_warnings(record=True) as ctx: warnings.simplefilter("always") - client = MongoClient("mongodb://host/?maxStalenessSeconds=0" - "&readPreference=nearest") + client = MongoClient("mongodb://host/?maxStalenessSeconds=0&readPreference=nearest") # Option was ignored. self.assertEqual(-1, client.read_preference.max_staleness) self.assertIn("must be a positive integer", str(ctx[0])) - @client_context.require_version_min(3, 3, 6) # SERVER-8858 @client_context.require_replica_set def test_last_write_date(self): # From max-staleness-tests.rst, "Parse lastWriteDate". client = rs_or_single_client(heartbeatFrequencyMS=500) client.pymongo_test.test.insert_one({}) - time.sleep(2) + # Wait for the server description to be updated. + time.sleep(1) server = client._topology.select_server(writable_server_selector) - last_write = server.description.last_write_date - self.assertTrue(last_write) + first = server.description.last_write_date + self.assertTrue(first) + # The first last_write_date may correspond to a internal server write, + # sleep so that the next write does not occur within the same second. + time.sleep(1) client.pymongo_test.test.insert_one({}) - time.sleep(2) + # Wait for the server description to be updated. + time.sleep(1) server = client._topology.select_server(writable_server_selector) - self.assertGreater(server.description.last_write_date, last_write) - self.assertLess(server.description.last_write_date, last_write + 10) - - @client_context.require_version_max(3, 3) - def test_last_write_date_absent(self): - # From max-staleness-tests.rst, "Absent lastWriteDate". - client = rs_or_single_client() - sd = client._topology.select_server(writable_server_selector) - self.assertIsNone(sd.description.last_write_date) + second = server.description.last_write_date + assert first is not None + + assert second is not None + self.assertGreater(second, first) + self.assertLess(second, first + 10) + if __name__ == "__main__": unittest.main() diff --git a/test/test_mongos_load_balancing.py b/test/test_mongos_load_balancing.py index 7df2239e7d..a1e2438840 100644 --- a/test/test_mongos_load_balancing.py +++ b/test/test_mongos_load_balancing.py @@ -13,34 +13,36 @@ # limitations under the License. """Test MongoClient's mongos load balancing using a mock.""" +from __future__ import annotations import sys import threading sys.path[0:0] = [""] +from test import MockClientTest, client_context, unittest +from test.pymongo_mocks import MockClient +from test.utils import connected, wait_until + from pymongo.errors import AutoReconnect, InvalidOperation from pymongo.server_selectors import writable_server_selector from pymongo.topology_description import TOPOLOGY_TYPE -from test import unittest, client_context, MockClientTest -from test.pymongo_mocks import MockClient -from test.utils import connected, wait_until @client_context.require_connection +@client_context.require_no_load_balancer def setUpModule(): pass class SimpleOp(threading.Thread): - def __init__(self, client): - super(SimpleOp, self).__init__() + super().__init__() self.client = client self.passed = False def run(self): - self.client.db.command('ismaster') + self.client.db.command("ping") self.passed = True # No exception raised. @@ -57,25 +59,27 @@ def do_simple_op(client, nthreads): def writable_addresses(topology): - return set(server.description.address for server in - topology.select_servers(writable_server_selector)) + return { + server.description.address for server in topology.select_servers(writable_server_selector) + } class TestMongosLoadBalancing(MockClientTest): - def mock_client(self, **kwargs): mock_client = MockClient( standalones=[], members=[], - mongoses=['a:1', 'b:2', 'c:3'], - host='a:1,b:2,c:3', + mongoses=["a:1", "b:2", "c:3"], + host="a:1,b:2,c:3", connect=False, - **kwargs) + **kwargs, + ) + self.addCleanup(mock_client.close) # Latencies in seconds. - mock_client.mock_rtts['a:1'] = 0.020 - mock_client.mock_rtts['b:2'] = 0.025 - mock_client.mock_rtts['c:3'] = 0.045 + mock_client.mock_rtts["a:1"] = 0.020 + mock_client.mock_rtts["b:2"] = 0.025 + mock_client.mock_rtts["c:3"] = 0.045 return mock_client def test_lazy_connect(self): @@ -88,30 +92,15 @@ def test_lazy_connect(self): # Trigger initial connection. do_simple_op(client, nthreads) - wait_until(lambda: len(client.nodes) == 3, 'connect to all mongoses') - - def test_reconnect(self): - nthreads = 10 - client = connected(self.mock_client()) - - # connected() ensures we've contacted at least one mongos. Wait for - # all of them. - wait_until(lambda: len(client.nodes) == 3, 'connect to all mongoses') - - # Trigger reconnect. - client.close() - do_simple_op(client, nthreads) - - wait_until(lambda: len(client.nodes) == 3, - 'reconnect to all mongoses') + wait_until(lambda: len(client.nodes) == 3, "connect to all mongoses") def test_failover(self): nthreads = 10 client = connected(self.mock_client(localThresholdMS=0.001)) - wait_until(lambda: len(client.nodes) == 3, 'connect to all mongoses') + wait_until(lambda: len(client.nodes) == 3, "connect to all mongoses") # Our chosen mongos goes down. - client.kill_host('a:1') + client.kill_host("a:1") # Trigger failover to higher-latency nodes. AutoReconnect should be # raised at most once in each thread. @@ -119,10 +108,10 @@ def test_failover(self): def f(): try: - client.db.command('ismaster') + client.db.command("ping") except AutoReconnect: # Second attempt succeeds. - client.db.command('ismaster') + client.db.command("ping") passed.append(True) @@ -140,35 +129,35 @@ def f(): def test_local_threshold(self): client = connected(self.mock_client(localThresholdMS=30)) - self.assertEqual(30, client.local_threshold_ms) - wait_until(lambda: len(client.nodes) == 3, 'connect to all mongoses') + self.assertEqual(30, client.options.local_threshold_ms) + wait_until(lambda: len(client.nodes) == 3, "connect to all mongoses") topology = client._topology # All are within a 30-ms latency window, see self.mock_client(). - self.assertEqual(set([('a', 1), ('b', 2), ('c', 3)]), - writable_addresses(topology)) + self.assertEqual({("a", 1), ("b", 2), ("c", 3)}, writable_addresses(topology)) # No error - client.admin.command('ismaster') + client.admin.command("ping") client = connected(self.mock_client(localThresholdMS=0)) - self.assertEqual(0, client.local_threshold_ms) + self.assertEqual(0, client.options.local_threshold_ms) # No error - client.db.command('ismaster') + client.db.command("ping") # Our chosen mongos goes down. - client.kill_host('%s:%s' % next(iter(client.nodes))) + client.kill_host("{}:{}".format(*next(iter(client.nodes)))) try: - client.db.command('ismaster') + client.db.command("ping") except: pass # We eventually connect to a new mongos. def connect_to_new_mongos(): try: - return client.db.command('ismaster') + return client.db.command("ping") except AutoReconnect: pass - wait_until(connect_to_new_mongos, 'connect to a new mongos') + + wait_until(connect_to_new_mongos, "connect to a new mongos") def test_load_balancing(self): # Although the server selection JSON tests already prove that @@ -176,25 +165,25 @@ def test_load_balancing(self): # test of discovering servers' round trip times and configuring # localThresholdMS. client = connected(self.mock_client()) - wait_until(lambda: len(client.nodes) == 3, 'connect to all mongoses') + wait_until(lambda: len(client.nodes) == 3, "connect to all mongoses") # Prohibited for topology type Sharded. with self.assertRaises(InvalidOperation): client.address topology = client._topology - self.assertEqual(TOPOLOGY_TYPE.Sharded, - topology.description.topology_type) + self.assertEqual(TOPOLOGY_TYPE.Sharded, topology.description.topology_type) # a and b are within the 15-ms latency window, see self.mock_client(). - self.assertEqual(set([('a', 1), ('b', 2)]), - writable_addresses(topology)) + self.assertEqual({("a", 1), ("b", 2)}, writable_addresses(topology)) - client.mock_rtts['a:1'] = 0.045 + client.mock_rtts["a:1"] = 0.045 # Discover only b is within latency window. - wait_until(lambda: set([('b', 2)]) == writable_addresses(topology), - 'discover server "a" is too far') + wait_until( + lambda: {("b", 2)} == writable_addresses(topology), + 'discover server "a" is too far', + ) if __name__ == "__main__": diff --git a/test/test_monitor.py b/test/test_monitor.py index fe014e34a0..92bcdc49ad 100644 --- a/test/test_monitor.py +++ b/test/test_monitor.py @@ -13,20 +13,24 @@ # limitations under the License. """Test the monitor module.""" +from __future__ import annotations import gc +import subprocess import sys from functools import partial sys.path[0:0] = [""] -from pymongo.periodic_executor import _EXECUTORS +from test import IntegrationTest, unittest +from test.utils import ( + ServerAndTopologyEventListener, + connected, + single_client, + wait_until, +) -from test import unittest, IntegrationTest -from test.utils import (connected, - ServerAndTopologyEventListener, - single_client, - wait_until) +from pymongo.periodic_executor import _EXECUTORS def unregistered(ref): @@ -38,6 +42,7 @@ def get_executors(client): executors = [] for server in client._topology._servers.values(): executors.append(server._monitor._executor) + executors.append(server._monitor._rtt_monitor._executor) executors.append(client._kill_cursors_executor) executors.append(client._topology._Topology__events_executor) return [e for e in executors if e is not None] @@ -54,31 +59,37 @@ class TestMonitor(IntegrationTest): def test_cleanup_executors_on_client_del(self): client = create_client() executors = get_executors(client) - self.assertEqual(len(executors), 3) + self.assertEqual(len(executors), 4) # Each executor stores a weakref to itself in _EXECUTORS. - executor_refs = [ - (r, r()._name) for r in _EXECUTORS.copy() if r() in executors] + executor_refs = [(r, r()._name) for r in _EXECUTORS.copy() if r() in executors] del executors del client for ref, name in executor_refs: - wait_until(partial(unregistered, ref), - 'unregister executor: %s' % (name,), - timeout=5) + wait_until(partial(unregistered, ref), f"unregister executor: {name}", timeout=5) def test_cleanup_executors_on_client_close(self): client = create_client() executors = get_executors(client) - self.assertEqual(len(executors), 3) + self.assertEqual(len(executors), 4) client.close() for executor in executors: - wait_until(lambda: executor._stopped, - 'closed executor: %s' % (executor._name,), - timeout=5) + wait_until(lambda: executor._stopped, f"closed executor: {executor._name}", timeout=5) + + def test_no_thread_start_runtime_err_on_shutdown(self): + """Test we silence noisy runtime errors fired when the MongoClient spawns a new thread + on process shutdown.""" + command = [sys.executable, "-c", "from pymongo import MongoClient; c = MongoClient()"] + completed_process: subprocess.CompletedProcess = subprocess.run( + command, capture_output=True + ) + + self.assertFalse(completed_process.stderr) + self.assertFalse(completed_process.stdout) if __name__ == "__main__": diff --git a/test/test_monitoring.py b/test/test_monitoring.py index 2e16e1c9a4..6880a30dc7 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -11,149 +11,131 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import copy import datetime import sys import time -import warnings +from typing import Any sys.path[0:0] = [""] +from test import IntegrationTest, client_context, client_knobs, sanitize_cmd, unittest +from test.utils import EventListener, rs_or_single_client, single_client, wait_until + +from bson.int64 import Int64 from bson.objectid import ObjectId -from bson.py3compat import text_type from bson.son import SON -from pymongo import CursorType, monitoring, InsertOne, UpdateOne, DeleteOne +from pymongo import CursorType, DeleteOne, InsertOne, UpdateOne, monitoring from pymongo.command_cursor import CommandCursor -from pymongo.errors import NotMasterError, OperationFailure +from pymongo.errors import AutoReconnect, NotPrimaryError, OperationFailure from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern -from test import (client_context, - client_knobs, - PyMongoTestCase, - sanitize_cmd, - unittest) -from test.utils import (EventListener, - rs_or_single_client, - single_client, - wait_until) -class TestCommandMonitoring(PyMongoTestCase): +class TestCommandMonitoring(IntegrationTest): + listener: EventListener @classmethod @client_context.require_connection def setUpClass(cls): + super().setUpClass() cls.listener = EventListener() - cls.client = rs_or_single_client( - event_listeners=[cls.listener], - retryWrites=False) + cls.client = rs_or_single_client(event_listeners=[cls.listener], retryWrites=False) + + @classmethod + def tearDownClass(cls): + cls.client.close() + super().tearDownClass() def tearDown(self): - self.listener.results.clear() + self.listener.reset() + super().tearDown() def test_started_simple(self): - self.client.pymongo_test.command('ismaster') - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertTrue( - isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) - self.assertEqualCommand(SON([('ismaster', 1)]), started.command) - self.assertEqual('ismaster', started.command_name) + self.client.pymongo_test.command("ping") + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertEqualCommand(SON([("ping", 1)]), started.command) + self.assertEqual("ping", started.command_name) self.assertEqual(self.client.address, started.connection_id) - self.assertEqual('pymongo_test', started.database_name) + self.assertEqual("pymongo_test", started.database_name) self.assertTrue(isinstance(started.request_id, int)) def test_succeeded_simple(self): - self.client.pymongo_test.command('ismaster') - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) - self.assertTrue( - isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertEqual('ismaster', succeeded.command_name) + self.client.pymongo_test.command("ping") + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertEqual("ping", succeeded.command_name) self.assertEqual(self.client.address, succeeded.connection_id) - self.assertEqual(1, succeeded.reply.get('ok')) + self.assertEqual(1, succeeded.reply.get("ok")) self.assertTrue(isinstance(succeeded.request_id, int)) self.assertTrue(isinstance(succeeded.duration_micros, int)) def test_failed_simple(self): try: - self.client.pymongo_test.command('oops!') + self.client.pymongo_test.command("oops!") except OperationFailure: pass - results = self.listener.results - started = results['started'][0] - failed = results['failed'][0] - self.assertEqual(0, len(results['succeeded'])) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) - self.assertTrue( - isinstance(failed, monitoring.CommandFailedEvent)) - self.assertEqual('oops!', failed.command_name) + started = self.listener.started_events[0] + failed = self.listener.failed_events[0] + self.assertEqual(0, len(self.listener.succeeded_events)) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertTrue(isinstance(failed, monitoring.CommandFailedEvent)) + self.assertEqual("oops!", failed.command_name) self.assertEqual(self.client.address, failed.connection_id) - self.assertEqual(0, failed.failure.get('ok')) + self.assertEqual(0, failed.failure.get("ok")) self.assertTrue(isinstance(failed.request_id, int)) self.assertTrue(isinstance(failed.duration_micros, int)) def test_find_one(self): self.client.pymongo_test.test.find_one() - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertTrue( - isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand( - SON([('find', 'test'), - ('filter', {}), - ('limit', 1), - ('singleBatch', True)]), - started.command) - self.assertEqual('find', started.command_name) + SON([("find", "test"), ("filter", {}), ("limit", 1), ("singleBatch", True)]), + started.command, + ) + self.assertEqual("find", started.command_name) self.assertEqual(self.client.address, started.connection_id) - self.assertEqual('pymongo_test', started.database_name) + self.assertEqual("pymongo_test", started.database_name) self.assertTrue(isinstance(started.request_id, int)) def test_find_and_get_more(self): self.client.pymongo_test.test.drop() self.client.pymongo_test.test.insert_many([{} for _ in range(10)]) - self.listener.results.clear() - cursor = self.client.pymongo_test.test.find( - projection={'_id': False}, - batch_size=4) + self.listener.reset() + cursor = self.client.pymongo_test.test.find(projection={"_id": False}, batch_size=4) for _ in range(4): next(cursor) cursor_id = cursor.cursor_id - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand( - SON([('find', 'test'), - ('filter', {}), - ('projection', {'_id': False}), - ('batchSize', 4)]), - started.command) - self.assertEqual('find', started.command_name) + SON( + [("find", "test"), ("filter", {}), ("projection", {"_id": False}), ("batchSize", 4)] + ), + started.command, + ) + self.assertEqual("find", started.command_name) self.assertEqual(self.client.address, started.connection_id) - self.assertEqual('pymongo_test', started.database_name) + self.assertEqual("pymongo_test", started.database_name) self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue( - isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) self.assertTrue(isinstance(succeeded.duration_micros, int)) - self.assertEqual('find', succeeded.command_name) + self.assertEqual("find", succeeded.command_name) self.assertTrue(isinstance(succeeded.request_id, int)) self.assertEqual(cursor.address, succeeded.connection_id) csr = succeeded.reply["cursor"] @@ -161,30 +143,26 @@ def test_find_and_get_more(self): self.assertEqual(csr["ns"], "pymongo_test.test") self.assertEqual(csr["firstBatch"], [{} for _ in range(4)]) - self.listener.results.clear() + self.listener.reset() # Next batch. Exhausting the cursor could cause a getMore # that returns id of 0 and no results. next(cursor) try: - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand( - SON([('getMore', cursor_id), - ('collection', 'test'), - ('batchSize', 4)]), - started.command) - self.assertEqual('getMore', started.command_name) + SON([("getMore", cursor_id), ("collection", "test"), ("batchSize", 4)]), + started.command, + ) + self.assertEqual("getMore", started.command_name) self.assertEqual(self.client.address, started.connection_id) - self.assertEqual('pymongo_test', started.database_name) + self.assertEqual("pymongo_test", started.database_name) self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue( - isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) self.assertTrue(isinstance(succeeded.duration_micros, int)) - self.assertEqual('getMore', succeeded.command_name) + self.assertEqual("getMore", succeeded.command_name) self.assertTrue(isinstance(succeeded.request_id, int)) self.assertEqual(cursor.address, succeeded.connection_id) csr = succeeded.reply["cursor"] @@ -196,32 +174,27 @@ def test_find_and_get_more(self): tuple(cursor) def test_find_with_explain(self): - cmd = SON([('explain', SON([('find', 'test'), - ('filter', {})]))]) + cmd = SON([("explain", SON([("find", "test"), ("filter", {})]))]) self.client.pymongo_test.test.drop() self.client.pymongo_test.test.insert_one({}) - self.listener.results.clear() + self.listener.reset() coll = self.client.pymongo_test.test # Test that we publish the unwrapped command. if self.client.is_mongos: - coll = coll.with_options( - read_preference=ReadPreference.PRIMARY_PREFERRED) + coll = coll.with_options(read_preference=ReadPreference.PRIMARY_PREFERRED) res = coll.find().explain() - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand(cmd, started.command) - self.assertEqual('explain', started.command_name) + self.assertEqual("explain", started.command_name) self.assertEqual(self.client.address, started.connection_id) - self.assertEqual('pymongo_test', started.database_name) + self.assertEqual("pymongo_test", started.database_name) self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue( - isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) self.assertTrue(isinstance(succeeded.duration_micros, int)) - self.assertEqual('explain', succeeded.command_name) + self.assertEqual("explain", succeeded.command_name) self.assertTrue(isinstance(succeeded.request_id, int)) self.assertEqual(self.client.address, succeeded.connection_id) self.assertEqual(res, succeeded.reply) @@ -229,34 +202,30 @@ def test_find_with_explain(self): def _test_find_options(self, query, expected_cmd): coll = self.client.pymongo_test.test coll.drop() - coll.create_index('x') - coll.insert_many([{'x': i} for i in range(5)]) + coll.create_index("x") + coll.insert_many([{"x": i} for i in range(5)]) # Test that we publish the unwrapped command. - self.listener.results.clear() + self.listener.reset() if self.client.is_mongos: - coll = coll.with_options( - read_preference=ReadPreference.PRIMARY_PREFERRED) + coll = coll.with_options(read_preference=ReadPreference.PRIMARY_PREFERRED) cursor = coll.find(**query) next(cursor) try: - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand(expected_cmd, started.command) - self.assertEqual('find', started.command_name) + self.assertEqual("find", started.command_name) self.assertEqual(self.client.address, started.connection_id) - self.assertEqual('pymongo_test', started.database_name) + self.assertEqual("pymongo_test", started.database_name) self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue( - isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) self.assertTrue(isinstance(succeeded.duration_micros, int)) - self.assertEqual('find', succeeded.command_name) + self.assertEqual("find", succeeded.command_name) self.assertTrue(isinstance(succeeded.request_id, int)) self.assertEqual(self.client.address, succeeded.connection_id) finally: @@ -264,125 +233,126 @@ def _test_find_options(self, query, expected_cmd): tuple(cursor) def test_find_options(self): - query = dict(filter={}, - hint=[('x', 1)], - max_time_ms=10000, - max={'x': 10}, - min={'x': -10}, - return_key=True, - show_record_id=True, - projection={'x': False}, - skip=1, - no_cursor_timeout=True, - sort=[('_id', 1)], - allow_partial_results=True, - comment='this is a test', - batch_size=2) - - cmd = dict(find='test', - filter={}, - hint=SON([('x', 1)]), - comment='this is a test', - maxTimeMS=10000, - max={'x': 10}, - min={'x': -10}, - returnKey=True, - showRecordId=True, - sort=SON([('_id', 1)]), - projection={'x': False}, - skip=1, - batchSize=2, - noCursorTimeout=True, - allowPartialResults=True) + query = { + "filter": {}, + "hint": [("x", 1)], + "max_time_ms": 10000, + "max": {"x": 10}, + "min": {"x": -10}, + "return_key": True, + "show_record_id": True, + "projection": {"x": False}, + "skip": 1, + "no_cursor_timeout": True, + "sort": [("_id", 1)], + "allow_partial_results": True, + "comment": "this is a test", + "batch_size": 2, + } + + cmd = { + "find": "test", + "filter": {}, + "hint": SON([("x", 1)]), + "comment": "this is a test", + "maxTimeMS": 10000, + "max": {"x": 10}, + "min": {"x": -10}, + "returnKey": True, + "showRecordId": True, + "sort": SON([("_id", 1)]), + "projection": {"x": False}, + "skip": 1, + "batchSize": 2, + "noCursorTimeout": True, + "allowPartialResults": True, + } if client_context.version < (4, 1, 0, -1): - query['max_scan'] = 10 - cmd['maxScan'] = 10 + query["max_scan"] = 10 + cmd["maxScan"] = 10 self._test_find_options(query, cmd) @client_context.require_version_max(3, 7, 2) def test_find_snapshot(self): # Test "snapshot" parameter separately, can't combine with "sort". - query = dict(filter={}, - snapshot=True) + query = {"filter": {}, "snapshot": True} - cmd = dict(find='test', - filter={}, - snapshot=True) + cmd = {"find": "test", "filter": {}, "snapshot": True} self._test_find_options(query, cmd) def test_command_and_get_more(self): self.client.pymongo_test.test.drop() - self.client.pymongo_test.test.insert_many( - [{'x': 1} for _ in range(10)]) - self.listener.results.clear() + self.client.pymongo_test.test.insert_many([{"x": 1} for _ in range(10)]) + self.listener.reset() coll = self.client.pymongo_test.test # Test that we publish the unwrapped command. if self.client.is_mongos: - coll = coll.with_options( - read_preference=ReadPreference.PRIMARY_PREFERRED) - cursor = coll.aggregate( - [{'$project': {'_id': False, 'x': 1}}], batchSize=4) + coll = coll.with_options(read_preference=ReadPreference.PRIMARY_PREFERRED) + cursor = coll.aggregate([{"$project": {"_id": False, "x": 1}}], batchSize=4) for _ in range(4): next(cursor) cursor_id = cursor.cursor_id - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand( - SON([('aggregate', 'test'), - ('pipeline', [{'$project': {'_id': False, 'x': 1}}]), - ('cursor', {'batchSize': 4})]), - started.command) - self.assertEqual('aggregate', started.command_name) + SON( + [ + ("aggregate", "test"), + ("pipeline", [{"$project": {"_id": False, "x": 1}}]), + ("cursor", {"batchSize": 4}), + ] + ), + started.command, + ) + self.assertEqual("aggregate", started.command_name) self.assertEqual(self.client.address, started.connection_id) - self.assertEqual('pymongo_test', started.database_name) + self.assertEqual("pymongo_test", started.database_name) self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue( - isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) self.assertTrue(isinstance(succeeded.duration_micros, int)) - self.assertEqual('aggregate', succeeded.command_name) + self.assertEqual("aggregate", succeeded.command_name) self.assertTrue(isinstance(succeeded.request_id, int)) self.assertEqual(cursor.address, succeeded.connection_id) - expected_cursor = {'id': cursor_id, - 'ns': 'pymongo_test.test', - 'firstBatch': [{'x': 1} for _ in range(4)]} - self.assertEqualCommand(expected_cursor, succeeded.reply.get('cursor')) - - self.listener.results.clear() + expected_cursor = { + "id": cursor_id, + "ns": "pymongo_test.test", + "firstBatch": [{"x": 1} for _ in range(4)], + } + self.assertEqualCommand(expected_cursor, succeeded.reply.get("cursor")) + + self.listener.reset() next(cursor) try: - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand( - SON([('getMore', cursor_id), - ('collection', 'test'), - ('batchSize', 4)]), - started.command) - self.assertEqual('getMore', started.command_name) + SON([("getMore", cursor_id), ("collection", "test"), ("batchSize", 4)]), + started.command, + ) + self.assertEqual("getMore", started.command_name) self.assertEqual(self.client.address, started.connection_id) - self.assertEqual('pymongo_test', started.database_name) + self.assertEqual("pymongo_test", started.database_name) self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue( - isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) self.assertTrue(isinstance(succeeded.duration_micros, int)) - self.assertEqual('getMore', succeeded.command_name) + self.assertEqual("getMore", succeeded.command_name) self.assertTrue(isinstance(succeeded.request_id, int)) self.assertEqual(cursor.address, succeeded.connection_id) expected_result = { - 'cursor': {'id': cursor_id, - 'ns': 'pymongo_test.test', - 'nextBatch': [{'x': 1} for _ in range(4)]}, - 'ok': 1.0} + "cursor": { + "id": cursor_id, + "ns": "pymongo_test.test", + "nextBatch": [{"x": 1} for _ in range(4)], + }, + "ok": 1.0, + } self.assertEqualReply(expected_result, succeeded.reply) finally: # Exhaust the cursor to avoid kill cursors. @@ -391,58 +361,52 @@ def test_command_and_get_more(self): def test_get_more_failure(self): address = self.client.address coll = self.client.pymongo_test.test - cursor_doc = {"id": 12345, "firstBatch": [], "ns": coll.full_name} + cursor_id = Int64(12345) + cursor_doc = {"id": cursor_id, "firstBatch": [], "ns": coll.full_name} cursor = CommandCursor(coll, cursor_doc, address) try: next(cursor) except Exception: pass - results = self.listener.results - started = results['started'][0] - self.assertEqual(0, len(results['succeeded'])) - failed = results['failed'][0] - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) + started = self.listener.started_events[0] + self.assertEqual(0, len(self.listener.succeeded_events)) + failed = self.listener.failed_events[0] + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand( - SON([('getMore', 12345), - ('collection', 'test')]), - started.command) - self.assertEqual('getMore', started.command_name) + SON([("getMore", cursor_id), ("collection", "test")]), started.command + ) + self.assertEqual("getMore", started.command_name) self.assertEqual(self.client.address, started.connection_id) - self.assertEqual('pymongo_test', started.database_name) + self.assertEqual("pymongo_test", started.database_name) self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue( - isinstance(failed, monitoring.CommandFailedEvent)) + self.assertTrue(isinstance(failed, monitoring.CommandFailedEvent)) self.assertTrue(isinstance(failed.duration_micros, int)) - self.assertEqual('getMore', failed.command_name) + self.assertEqual("getMore", failed.command_name) self.assertTrue(isinstance(failed.request_id, int)) self.assertEqual(cursor.address, failed.connection_id) self.assertEqual(0, failed.failure.get("ok")) @client_context.require_replica_set @client_context.require_secondaries_count(1) - def test_not_master_error(self): + def test_not_primary_error(self): address = next(iter(client_context.client.secondaries)) client = single_client(*address, event_listeners=[self.listener]) # Clear authentication command results from the listener. - client.admin.command('ismaster') - self.listener.results.clear() + client.admin.command("ping") + self.listener.reset() error = None try: client.pymongo_test.test.find_one_and_delete({}) - except NotMasterError as exc: + except NotPrimaryError as exc: error = exc.errors - results = self.listener.results - started = results['started'][0] - failed = results['failed'][0] - self.assertEqual(0, len(results['succeeded'])) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) - self.assertTrue( - isinstance(failed, monitoring.CommandFailedEvent)) - self.assertEqual('findAndModify', failed.command_name) + started = self.listener.started_events[0] + failed = self.listener.failed_events[0] + self.assertEqual(0, len(self.listener.succeeded_events)) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertTrue(isinstance(failed, monitoring.CommandFailedEvent)) + self.assertEqual("findAndModify", failed.command_name) self.assertEqual(address, failed.connection_id) - self.assertEqual(0, failed.failure.get('ok')) + self.assertEqual(0, failed.failure.get("ok")) self.assertTrue(isinstance(failed.request_id, int)) self.assertTrue(isinstance(failed.duration_micros, int)) self.assertEqual(error, failed.failure) @@ -450,72 +414,63 @@ def test_not_master_error(self): @client_context.require_no_mongos def test_exhaust(self): self.client.pymongo_test.test.drop() - self.client.pymongo_test.test.insert_many([{} for _ in range(10)]) - self.listener.results.clear() + self.client.pymongo_test.test.insert_many([{} for _ in range(11)]) + self.listener.reset() cursor = self.client.pymongo_test.test.find( - projection={'_id': False}, - batch_size=5, - cursor_type=CursorType.EXHAUST) + projection={"_id": False}, batch_size=5, cursor_type=CursorType.EXHAUST + ) next(cursor) cursor_id = cursor.cursor_id - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand( - SON([('find', 'test'), - ('filter', {}), - ('projection', {'_id': False}), - ('batchSize', 5)]), - started.command) - self.assertEqual('find', started.command_name) + SON( + [("find", "test"), ("filter", {}), ("projection", {"_id": False}), ("batchSize", 5)] + ), + started.command, + ) + self.assertEqual("find", started.command_name) self.assertEqual(cursor.address, started.connection_id) - self.assertEqual('pymongo_test', started.database_name) + self.assertEqual("pymongo_test", started.database_name) self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue( - isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) self.assertTrue(isinstance(succeeded.duration_micros, int)) - self.assertEqual('find', succeeded.command_name) + self.assertEqual("find", succeeded.command_name) self.assertTrue(isinstance(succeeded.request_id, int)) self.assertEqual(cursor.address, succeeded.connection_id) expected_result = { - 'cursor': {'id': cursor_id, - 'ns': 'pymongo_test.test', - 'firstBatch': [{} for _ in range(5)]}, - 'ok': 1} + "cursor": { + "id": cursor_id, + "ns": "pymongo_test.test", + "firstBatch": [{} for _ in range(5)], + }, + "ok": 1, + } self.assertEqualReply(expected_result, succeeded.reply) - self.listener.results.clear() + self.listener.reset() tuple(cursor) - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) - self.assertEqualCommand( - SON([('getMore', cursor_id), - ('collection', 'test'), - ('batchSize', 5)]), - started.command) - self.assertEqual('getMore', started.command_name) - self.assertEqual(cursor.address, started.connection_id) - self.assertEqual('pymongo_test', started.database_name) - self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue( - isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue(isinstance(succeeded.duration_micros, int)) - self.assertEqual('getMore', succeeded.command_name) - self.assertTrue(isinstance(succeeded.request_id, int)) - self.assertEqual(cursor.address, succeeded.connection_id) - expected_result = { - 'cursor': {'id': 0, - 'ns': 'pymongo_test.test', - 'nextBatch': [{} for _ in range(5)]}, - 'ok': 1} - self.assertEqualReply(expected_result, succeeded.reply) + self.assertEqual(0, len(self.listener.failed_events)) + for event in self.listener.started_events: + self.assertTrue(isinstance(event, monitoring.CommandStartedEvent)) + self.assertEqualCommand( + SON([("getMore", cursor_id), ("collection", "test"), ("batchSize", 5)]), + event.command, + ) + self.assertEqual("getMore", event.command_name) + self.assertEqual(cursor.address, event.connection_id) + self.assertEqual("pymongo_test", event.database_name) + self.assertTrue(isinstance(event.request_id, int)) + for event in self.listener.succeeded_events: + self.assertTrue(isinstance(event, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(event.duration_micros, int)) + self.assertEqual("getMore", event.command_name) + self.assertTrue(isinstance(event.request_id, int)) + self.assertEqual(cursor.address, event.connection_id) + # Last getMore receives a response with cursor id 0. + self.assertEqual(0, self.listener.succeeded_events[-1].reply["cursor"]["id"]) def test_kill_cursors(self): with client_knobs(kill_cursor_frequency=0.01): @@ -524,53 +479,55 @@ def test_kill_cursors(self): cursor = self.client.pymongo_test.test.find().batch_size(5) next(cursor) cursor_id = cursor.cursor_id - self.listener.results.clear() + self.listener.reset() cursor.close() time.sleep(2) - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) # There could be more than one cursor_id here depending on # when the thread last ran. - self.assertIn(cursor_id, started.command['cursors']) - self.assertEqual('killCursors', started.command_name) + self.assertIn(cursor_id, started.command["cursors"]) + self.assertEqual("killCursors", started.command_name) self.assertIs(type(started.connection_id), tuple) self.assertEqual(cursor.address, started.connection_id) - self.assertEqual('pymongo_test', started.database_name) + self.assertEqual("pymongo_test", started.database_name) self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue( - isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) self.assertTrue(isinstance(succeeded.duration_micros, int)) - self.assertEqual('killCursors', succeeded.command_name) + self.assertEqual("killCursors", succeeded.command_name) self.assertTrue(isinstance(succeeded.request_id, int)) self.assertIs(type(succeeded.connection_id), tuple) self.assertEqual(cursor.address, succeeded.connection_id) # There could be more than one cursor_id here depending on # when the thread last ran. - self.assertTrue(cursor_id in succeeded.reply['cursorsUnknown'] - or cursor_id in succeeded.reply['cursorsKilled']) + self.assertTrue( + cursor_id in succeeded.reply["cursorsUnknown"] + or cursor_id in succeeded.reply["cursorsKilled"] + ) def test_non_bulk_writes(self): coll = self.client.pymongo_test.test coll.drop() - self.listener.results.clear() + self.listener.reset() # Implied write concern insert_one - res = coll.insert_one({'x': 1}) - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) + res = coll.insert_one({"x": 1}) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('insert', coll.name), - ('ordered', True), - ('documents', [{'_id': res.inserted_id, 'x': 1}])]) + expected = SON( + [ + ("insert", coll.name), + ("ordered", True), + ("documents", [{"_id": res.inserted_id, "x": 1}]), + ] + ) self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('insert', started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("insert", started.command_name) self.assertIsInstance(started.request_id, int) self.assertEqual(self.client.address, started.connection_id) self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) @@ -579,25 +536,28 @@ def test_non_bulk_writes(self): self.assertEqual(started.request_id, succeeded.request_id) self.assertEqual(started.connection_id, succeeded.connection_id) reply = succeeded.reply - self.assertEqual(1, reply.get('ok')) - self.assertEqual(1, reply.get('n')) + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) # Unacknowledged insert_one - self.listener.results.clear() + self.listener.reset() coll = coll.with_options(write_concern=WriteConcern(w=0)) - res = coll.insert_one({'x': 1}) - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) + res = coll.insert_one({"x": 1}) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('insert', coll.name), - ('ordered', True), - ('documents', [{'_id': res.inserted_id, 'x': 1}]), - ('writeConcern', {'w': 0})]) + expected = SON( + [ + ("insert", coll.name), + ("ordered", True), + ("documents", [{"_id": res.inserted_id, "x": 1}]), + ("writeConcern", {"w": 0}), + ] + ) self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('insert', started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("insert", started.command_name) self.assertIsInstance(started.request_id, int) self.assertEqual(self.client.address, started.connection_id) self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) @@ -605,24 +565,27 @@ def test_non_bulk_writes(self): self.assertEqual(started.command_name, succeeded.command_name) self.assertEqual(started.request_id, succeeded.request_id) self.assertEqual(started.connection_id, succeeded.connection_id) - self.assertEqualReply(succeeded.reply, {'ok': 1}) + self.assertEqualReply(succeeded.reply, {"ok": 1}) # Explicit write concern insert_one - self.listener.results.clear() + self.listener.reset() coll = coll.with_options(write_concern=WriteConcern(w=1)) - res = coll.insert_one({'x': 1}) - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) + res = coll.insert_one({"x": 1}) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('insert', coll.name), - ('ordered', True), - ('documents', [{'_id': res.inserted_id, 'x': 1}]), - ('writeConcern', {'w': 1})]) + expected = SON( + [ + ("insert", coll.name), + ("ordered", True), + ("documents", [{"_id": res.inserted_id, "x": 1}]), + ("writeConcern", {"w": 1}), + ] + ) self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('insert', started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("insert", started.command_name) self.assertIsInstance(started.request_id, int) self.assertEqual(self.client.address, started.connection_id) self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) @@ -631,25 +594,27 @@ def test_non_bulk_writes(self): self.assertEqual(started.request_id, succeeded.request_id) self.assertEqual(started.connection_id, succeeded.connection_id) reply = succeeded.reply - self.assertEqual(1, reply.get('ok')) - self.assertEqual(1, reply.get('n')) + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) # delete_many - self.listener.results.clear() - res = coll.delete_many({'x': 1}) - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) + self.listener.reset() + res = coll.delete_many({"x": 1}) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('delete', coll.name), - ('ordered', True), - ('deletes', [SON([('q', {'x': 1}), - ('limit', 0)])]), - ('writeConcern', {'w': 1})]) + expected = SON( + [ + ("delete", coll.name), + ("ordered", True), + ("deletes", [SON([("q", {"x": 1}), ("limit", 0)])]), + ("writeConcern", {"w": 1}), + ] + ) self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('delete', started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("delete", started.command_name) self.assertIsInstance(started.request_id, int) self.assertEqual(self.client.address, started.connection_id) self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) @@ -658,28 +623,40 @@ def test_non_bulk_writes(self): self.assertEqual(started.request_id, succeeded.request_id) self.assertEqual(started.connection_id, succeeded.connection_id) reply = succeeded.reply - self.assertEqual(1, reply.get('ok')) - self.assertEqual(res.deleted_count, reply.get('n')) + self.assertEqual(1, reply.get("ok")) + self.assertEqual(res.deleted_count, reply.get("n")) # replace_one - self.listener.results.clear() + self.listener.reset() oid = ObjectId() - res = coll.replace_one({'_id': oid}, {'_id': oid, 'x': 1}, upsert=True) - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) + res = coll.replace_one({"_id": oid}, {"_id": oid, "x": 1}, upsert=True) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('update', coll.name), - ('ordered', True), - ('updates', [SON([('q', {'_id': oid}), - ('u', {'_id': oid, 'x': 1}), - ('multi', False), - ('upsert', True)])]), - ('writeConcern', {'w': 1})]) + expected = SON( + [ + ("update", coll.name), + ("ordered", True), + ( + "updates", + [ + SON( + [ + ("q", {"_id": oid}), + ("u", {"_id": oid, "x": 1}), + ("multi", False), + ("upsert", True), + ] + ) + ], + ), + ("writeConcern", {"w": 1}), + ] + ) self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('update', started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("update", started.command_name) self.assertIsInstance(started.request_id, int) self.assertEqual(self.client.address, started.connection_id) self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) @@ -688,28 +665,40 @@ def test_non_bulk_writes(self): self.assertEqual(started.request_id, succeeded.request_id) self.assertEqual(started.connection_id, succeeded.connection_id) reply = succeeded.reply - self.assertEqual(1, reply.get('ok')) - self.assertEqual(1, reply.get('n')) - self.assertEqual([{'index': 0, '_id': oid}], reply.get('upserted')) + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) + self.assertEqual([{"index": 0, "_id": oid}], reply.get("upserted")) # update_one - self.listener.results.clear() - res = coll.update_one({'x': 1}, {'$inc': {'x': 1}}) - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) + self.listener.reset() + res = coll.update_one({"x": 1}, {"$inc": {"x": 1}}) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('update', coll.name), - ('ordered', True), - ('updates', [SON([('q', {'x': 1}), - ('u', {'$inc': {'x': 1}}), - ('multi', False), - ('upsert', False)])]), - ('writeConcern', {'w': 1})]) + expected = SON( + [ + ("update", coll.name), + ("ordered", True), + ( + "updates", + [ + SON( + [ + ("q", {"x": 1}), + ("u", {"$inc": {"x": 1}}), + ("multi", False), + ("upsert", False), + ] + ) + ], + ), + ("writeConcern", {"w": 1}), + ] + ) self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('update', started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("update", started.command_name) self.assertIsInstance(started.request_id, int) self.assertEqual(self.client.address, started.connection_id) self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) @@ -718,27 +707,39 @@ def test_non_bulk_writes(self): self.assertEqual(started.request_id, succeeded.request_id) self.assertEqual(started.connection_id, succeeded.connection_id) reply = succeeded.reply - self.assertEqual(1, reply.get('ok')) - self.assertEqual(1, reply.get('n')) + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) # update_many - self.listener.results.clear() - res = coll.update_many({'x': 2}, {'$inc': {'x': 1}}) - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) + self.listener.reset() + res = coll.update_many({"x": 2}, {"$inc": {"x": 1}}) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('update', coll.name), - ('ordered', True), - ('updates', [SON([('q', {'x': 2}), - ('u', {'$inc': {'x': 1}}), - ('multi', True), - ('upsert', False)])]), - ('writeConcern', {'w': 1})]) + expected = SON( + [ + ("update", coll.name), + ("ordered", True), + ( + "updates", + [ + SON( + [ + ("q", {"x": 2}), + ("u", {"$inc": {"x": 1}}), + ("multi", True), + ("upsert", False), + ] + ) + ], + ), + ("writeConcern", {"w": 1}), + ] + ) self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('update', started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("update", started.command_name) self.assertIsInstance(started.request_id, int) self.assertEqual(self.client.address, started.connection_id) self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) @@ -747,25 +748,27 @@ def test_non_bulk_writes(self): self.assertEqual(started.request_id, succeeded.request_id) self.assertEqual(started.connection_id, succeeded.connection_id) reply = succeeded.reply - self.assertEqual(1, reply.get('ok')) - self.assertEqual(1, reply.get('n')) + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) # delete_one - self.listener.results.clear() - res = coll.delete_one({'x': 3}) - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) + self.listener.reset() + _ = coll.delete_one({"x": 3}) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('delete', coll.name), - ('ordered', True), - ('deletes', [SON([('q', {'x': 3}), - ('limit', 1)])]), - ('writeConcern', {'w': 1})]) + expected = SON( + [ + ("delete", coll.name), + ("ordered", True), + ("deletes", [SON([("q", {"x": 3}), ("limit", 1)])]), + ("writeConcern", {"w": 1}), + ] + ) self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('delete', started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("delete", started.command_name) self.assertIsInstance(started.request_id, int) self.assertEqual(self.client.address, started.connection_id) self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) @@ -774,30 +777,33 @@ def test_non_bulk_writes(self): self.assertEqual(started.request_id, succeeded.request_id) self.assertEqual(started.connection_id, succeeded.connection_id) reply = succeeded.reply - self.assertEqual(1, reply.get('ok')) - self.assertEqual(1, reply.get('n')) + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) self.assertEqual(0, coll.count_documents({})) # write errors - coll.insert_one({'_id': 1}) + coll.insert_one({"_id": 1}) try: - self.listener.results.clear() - coll.insert_one({'_id': 1}) + self.listener.reset() + coll.insert_one({"_id": 1}) except OperationFailure: pass - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('insert', coll.name), - ('ordered', True), - ('documents', [{'_id': 1}]), - ('writeConcern', {'w': 1})]) + expected = SON( + [ + ("insert", coll.name), + ("ordered", True), + ("documents", [{"_id": 1}]), + ("writeConcern", {"w": 1}), + ] + ) self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('insert', started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("insert", started.command_name) self.assertIsInstance(started.request_id, int) self.assertEqual(self.client.address, started.connection_id) self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) @@ -806,252 +812,27 @@ def test_non_bulk_writes(self): self.assertEqual(started.request_id, succeeded.request_id) self.assertEqual(started.connection_id, succeeded.connection_id) reply = succeeded.reply - self.assertEqual(1, reply.get('ok')) - self.assertEqual(0, reply.get('n')) - errors = reply.get('writeErrors') + self.assertEqual(1, reply.get("ok")) + self.assertEqual(0, reply.get("n")) + errors = reply.get("writeErrors") self.assertIsInstance(errors, list) error = errors[0] - self.assertEqual(0, error.get('index')) - self.assertIsInstance(error.get('code'), int) - self.assertIsInstance(error.get('errmsg'), text_type) - - def test_legacy_writes(self): - with warnings.catch_warnings(): - warnings.simplefilter("ignore", DeprecationWarning) - - coll = self.client.pymongo_test.test - coll.drop() - self.listener.results.clear() - - # Implied write concern insert - _id = coll.insert({'x': 1}) - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('insert', coll.name), - ('ordered', True), - ('documents', [{'_id': _id, 'x': 1}])]) - self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('insert', started.command_name) - self.assertIsInstance(started.request_id, int) - self.assertEqual(self.client.address, started.connection_id) - self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) - self.assertIsInstance(succeeded.duration_micros, int) - self.assertEqual(started.command_name, succeeded.command_name) - self.assertEqual(started.request_id, succeeded.request_id) - self.assertEqual(started.connection_id, succeeded.connection_id) - reply = succeeded.reply - self.assertEqual(1, reply.get('ok')) - self.assertEqual(1, reply.get('n')) - - # Unacknowledged insert - self.listener.results.clear() - _id = coll.insert({'x': 1}, w=0) - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('insert', coll.name), - ('ordered', True), - ('documents', [{'_id': _id, 'x': 1}]), - ('writeConcern', {'w': 0})]) - self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('insert', started.command_name) - self.assertIsInstance(started.request_id, int) - self.assertEqual(self.client.address, started.connection_id) - self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) - self.assertIsInstance(succeeded.duration_micros, int) - self.assertEqual(started.command_name, succeeded.command_name) - self.assertEqual(started.request_id, succeeded.request_id) - self.assertEqual(started.connection_id, succeeded.connection_id) - self.assertEqual(succeeded.reply, {'ok': 1}) - - # Explicit write concern insert - self.listener.results.clear() - _id = coll.insert({'x': 1}, w=1) - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('insert', coll.name), - ('ordered', True), - ('documents', [{'_id': _id, 'x': 1}]), - ('writeConcern', {'w': 1})]) - self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('insert', started.command_name) - self.assertIsInstance(started.request_id, int) - self.assertEqual(self.client.address, started.connection_id) - self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) - self.assertIsInstance(succeeded.duration_micros, int) - self.assertEqual(started.command_name, succeeded.command_name) - self.assertEqual(started.request_id, succeeded.request_id) - self.assertEqual(started.connection_id, succeeded.connection_id) - reply = succeeded.reply - self.assertEqual(1, reply.get('ok')) - self.assertEqual(1, reply.get('n')) - - # remove all - self.listener.results.clear() - res = coll.remove({'x': 1}, w=1) - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('delete', coll.name), - ('ordered', True), - ('deletes', [SON([('q', {'x': 1}), - ('limit', 0)])]), - ('writeConcern', {'w': 1})]) - self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('delete', started.command_name) - self.assertIsInstance(started.request_id, int) - self.assertEqual(self.client.address, started.connection_id) - self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) - self.assertIsInstance(succeeded.duration_micros, int) - self.assertEqual(started.command_name, succeeded.command_name) - self.assertEqual(started.request_id, succeeded.request_id) - self.assertEqual(started.connection_id, succeeded.connection_id) - reply = succeeded.reply - self.assertEqual(1, reply.get('ok')) - self.assertEqual(res['n'], reply.get('n')) - - # upsert - self.listener.results.clear() - oid = ObjectId() - coll.update({'_id': oid}, {'_id': oid, 'x': 1}, upsert=True, w=1) - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('update', coll.name), - ('ordered', True), - ('updates', [SON([('q', {'_id': oid}), - ('u', {'_id': oid, 'x': 1}), - ('multi', False), - ('upsert', True)])]), - ('writeConcern', {'w': 1})]) - self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('update', started.command_name) - self.assertIsInstance(started.request_id, int) - self.assertEqual(self.client.address, started.connection_id) - self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) - self.assertIsInstance(succeeded.duration_micros, int) - self.assertEqual(started.command_name, succeeded.command_name) - self.assertEqual(started.request_id, succeeded.request_id) - self.assertEqual(started.connection_id, succeeded.connection_id) - reply = succeeded.reply - self.assertEqual(1, reply.get('ok')) - self.assertEqual(1, reply.get('n')) - self.assertEqual([{'index': 0, '_id': oid}], reply.get('upserted')) - - # update one - self.listener.results.clear() - coll.update({'x': 1}, {'$inc': {'x': 1}}) - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('update', coll.name), - ('ordered', True), - ('updates', [SON([('q', {'x': 1}), - ('u', {'$inc': {'x': 1}}), - ('multi', False), - ('upsert', False)])])]) - self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('update', started.command_name) - self.assertIsInstance(started.request_id, int) - self.assertEqual(self.client.address, started.connection_id) - self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) - self.assertIsInstance(succeeded.duration_micros, int) - self.assertEqual(started.command_name, succeeded.command_name) - self.assertEqual(started.request_id, succeeded.request_id) - self.assertEqual(started.connection_id, succeeded.connection_id) - reply = succeeded.reply - self.assertEqual(1, reply.get('ok')) - self.assertEqual(1, reply.get('n')) - - # update many - self.listener.results.clear() - coll.update({'x': 2}, {'$inc': {'x': 1}}, multi=True) - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('update', coll.name), - ('ordered', True), - ('updates', [SON([('q', {'x': 2}), - ('u', {'$inc': {'x': 1}}), - ('multi', True), - ('upsert', False)])])]) - self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('update', started.command_name) - self.assertIsInstance(started.request_id, int) - self.assertEqual(self.client.address, started.connection_id) - self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) - self.assertIsInstance(succeeded.duration_micros, int) - self.assertEqual(started.command_name, succeeded.command_name) - self.assertEqual(started.request_id, succeeded.request_id) - self.assertEqual(started.connection_id, succeeded.connection_id) - reply = succeeded.reply - self.assertEqual(1, reply.get('ok')) - self.assertEqual(1, reply.get('n')) - - # remove one - self.listener.results.clear() - coll.remove({'x': 3}, multi=False) - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('delete', coll.name), - ('ordered', True), - ('deletes', [SON([('q', {'x': 3}), - ('limit', 1)])])]) - self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('delete', started.command_name) - self.assertIsInstance(started.request_id, int) - self.assertEqual(self.client.address, started.connection_id) - self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) - self.assertIsInstance(succeeded.duration_micros, int) - self.assertEqual(started.command_name, succeeded.command_name) - self.assertEqual(started.request_id, succeeded.request_id) - self.assertEqual(started.connection_id, succeeded.connection_id) - reply = succeeded.reply - self.assertEqual(1, reply.get('ok')) - self.assertEqual(1, reply.get('n')) - - self.assertEqual(0, coll.count_documents({})) + self.assertEqual(0, error.get("index")) + self.assertIsInstance(error.get("code"), int) + self.assertIsInstance(error.get("errmsg"), str) def test_insert_many(self): # This always uses the bulk API. coll = self.client.pymongo_test.test coll.drop() - self.listener.results.clear() + self.listener.reset() - big = 'x' * (1024 * 1024 * 4) - docs = [{'_id': i, 'big': big} for i in range(6)] + big = "x" * (1024 * 1024 * 4) + docs = [{"_id": i, "big": big} for i in range(6)] coll.insert_many(docs) - results = self.listener.results - started = results['started'] - succeeded = results['succeeded'] - self.assertEqual(0, len(results['failed'])) + started = self.listener.started_events + succeeded = self.listener.succeeded_events + self.assertEqual(0, len(self.listener.failed_events)) documents = [] count = 0 operation_id = started[0].operation_id @@ -1059,13 +840,12 @@ def test_insert_many(self): for start, succeed in zip(started, succeeded): self.assertIsInstance(start, monitoring.CommandStartedEvent) cmd = sanitize_cmd(start.command) - self.assertEqual(['insert', 'ordered', 'documents'], - list(cmd.keys())) - self.assertEqual(coll.name, cmd['insert']) - self.assertIs(True, cmd['ordered']) - documents.extend(cmd['documents']) - self.assertEqual('pymongo_test', start.database_name) - self.assertEqual('insert', start.command_name) + self.assertEqual(["insert", "ordered", "documents"], list(cmd.keys())) + self.assertEqual(coll.name, cmd["insert"]) + self.assertIs(True, cmd["ordered"]) + documents.extend(cmd["documents"]) + self.assertEqual("pymongo_test", start.database_name) + self.assertEqual("insert", start.command_name) self.assertIsInstance(start.request_id, int) self.assertEqual(self.client.address, start.connection_id) self.assertIsInstance(succeed, monitoring.CommandSucceededEvent) @@ -1076,75 +856,71 @@ def test_insert_many(self): self.assertEqual(start.operation_id, operation_id) self.assertEqual(succeed.operation_id, operation_id) reply = succeed.reply - self.assertEqual(1, reply.get('ok')) - count += reply.get('n', 0) + self.assertEqual(1, reply.get("ok")) + count += reply.get("n", 0) self.assertEqual(documents, docs) self.assertEqual(6, count) - def test_legacy_insert_many(self): - # On legacy servers this uses bulk OP_INSERT. - with warnings.catch_warnings(): - warnings.simplefilter("ignore", DeprecationWarning) - - coll = self.client.pymongo_test.test - coll.drop() - self.listener.results.clear() - - # Force two batches on legacy servers. - big = 'x' * (1024 * 1024 * 12) - docs = [{'_id': i, 'big': big} for i in range(6)] - coll.insert(docs) - results = self.listener.results - started = results['started'] - succeeded = results['succeeded'] - self.assertEqual(0, len(results['failed'])) - documents = [] - count = 0 - operation_id = started[0].operation_id - self.assertIsInstance(operation_id, int) - for start, succeed in zip(started, succeeded): - self.assertIsInstance(start, monitoring.CommandStartedEvent) - cmd = sanitize_cmd(start.command) - self.assertEqual(['insert', 'ordered', 'documents'], - list(cmd.keys())) - self.assertEqual(coll.name, cmd['insert']) - self.assertIs(True, cmd['ordered']) - documents.extend(cmd['documents']) - self.assertEqual('pymongo_test', start.database_name) - self.assertEqual('insert', start.command_name) - self.assertIsInstance(start.request_id, int) - self.assertEqual(self.client.address, start.connection_id) - self.assertIsInstance(succeed, monitoring.CommandSucceededEvent) - self.assertIsInstance(succeed.duration_micros, int) - self.assertEqual(start.command_name, succeed.command_name) - self.assertEqual(start.request_id, succeed.request_id) - self.assertEqual(start.connection_id, succeed.connection_id) - self.assertEqual(start.operation_id, operation_id) - self.assertEqual(succeed.operation_id, operation_id) - reply = succeed.reply - self.assertEqual(1, reply.get('ok')) - count += reply.get('n', 0) - self.assertEqual(documents, docs) - self.assertEqual(6, count) + def test_insert_many_unacknowledged(self): + coll = self.client.pymongo_test.test + coll.drop() + unack_coll = coll.with_options(write_concern=WriteConcern(w=0)) + self.listener.reset() + + # Force two batches on legacy servers. + big = "x" * (1024 * 1024 * 12) + docs = [{"_id": i, "big": big} for i in range(6)] + unack_coll.insert_many(docs) + started = self.listener.started_events + succeeded = self.listener.succeeded_events + self.assertEqual(0, len(self.listener.failed_events)) + documents = [] + operation_id = started[0].operation_id + self.assertIsInstance(operation_id, int) + for start, succeed in zip(started, succeeded): + self.assertIsInstance(start, monitoring.CommandStartedEvent) + cmd = sanitize_cmd(start.command) + cmd.pop("writeConcern", None) + self.assertEqual(["insert", "ordered", "documents"], list(cmd.keys())) + self.assertEqual(coll.name, cmd["insert"]) + self.assertIs(True, cmd["ordered"]) + documents.extend(cmd["documents"]) + self.assertEqual("pymongo_test", start.database_name) + self.assertEqual("insert", start.command_name) + self.assertIsInstance(start.request_id, int) + self.assertEqual(self.client.address, start.connection_id) + self.assertIsInstance(succeed, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeed.duration_micros, int) + self.assertEqual(start.command_name, succeed.command_name) + self.assertEqual(start.request_id, succeed.request_id) + self.assertEqual(start.connection_id, succeed.connection_id) + self.assertEqual(start.operation_id, operation_id) + self.assertEqual(succeed.operation_id, operation_id) + self.assertEqual(1, succeed.reply.get("ok")) + self.assertEqual(documents, docs) + wait_until(lambda: coll.count_documents({}) == 6, "insert documents with w=0") def test_bulk_write(self): coll = self.client.pymongo_test.test coll.drop() - self.listener.results.clear() - - coll.bulk_write([InsertOne({'_id': 1}), - UpdateOne({'_id': 1}, {'$set': {'x': 1}}), - DeleteOne({'_id': 1})]) - results = self.listener.results - started = results['started'] - succeeded = results['succeeded'] - self.assertEqual(0, len(results['failed'])) + self.listener.reset() + + coll.bulk_write( + [ + InsertOne({"_id": 1}), + UpdateOne({"_id": 1}, {"$set": {"x": 1}}), + DeleteOne({"_id": 1}), + ] + ) + started = self.listener.started_events + succeeded = self.listener.succeeded_events + self.assertEqual(0, len(self.listener.failed_events)) operation_id = started[0].operation_id pairs = list(zip(started, succeeded)) self.assertEqual(3, len(pairs)) for start, succeed in pairs: self.assertIsInstance(start, monitoring.CommandStartedEvent) - self.assertEqual('pymongo_test', start.database_name) + self.assertEqual("pymongo_test", start.database_name) self.assertIsInstance(start.request_id, int) self.assertEqual(self.client.address, start.connection_id) self.assertIsInstance(succeed, monitoring.CommandSucceededEvent) @@ -1155,46 +931,112 @@ def test_bulk_write(self): self.assertEqual(start.operation_id, operation_id) self.assertEqual(succeed.operation_id, operation_id) - expected = SON([('insert', coll.name), - ('ordered', True), - ('documents', [{'_id': 1}])]) + expected = SON([("insert", coll.name), ("ordered", True), ("documents", [{"_id": 1}])]) self.assertEqualCommand(expected, started[0].command) - expected = SON([('update', coll.name), - ('ordered', True), - ('updates', [SON([('q', {'_id': 1}), - ('u', {'$set': {'x': 1}}), - ('multi', False), - ('upsert', False)])])]) + expected = SON( + [ + ("update", coll.name), + ("ordered", True), + ( + "updates", + [ + SON( + [ + ("q", {"_id": 1}), + ("u", {"$set": {"x": 1}}), + ("multi", False), + ("upsert", False), + ] + ) + ], + ), + ] + ) self.assertEqualCommand(expected, started[1].command) - expected = SON([('delete', coll.name), - ('ordered', True), - ('deletes', [SON([('q', {'_id': 1}), - ('limit', 1)])])]) + expected = SON( + [ + ("delete", coll.name), + ("ordered", True), + ("deletes", [SON([("q", {"_id": 1}), ("limit", 1)])]), + ] + ) self.assertEqualCommand(expected, started[2].command) + @client_context.require_failCommand_fail_point + def test_bulk_write_command_network_error(self): + coll = self.client.pymongo_test.test + self.listener.reset() + + insert_network_error = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "failCommands": ["insert"], + "closeConnection": True, + }, + } + with self.fail_point(insert_network_error): + with self.assertRaises(AutoReconnect): + coll.bulk_write([InsertOne({"_id": 1})]) + failed = self.listener.failed_events + self.assertEqual(1, len(failed)) + event = failed[0] + self.assertEqual(event.command_name, "insert") + self.assertIsInstance(event.failure, dict) + self.assertEqual(event.failure["errtype"], "AutoReconnect") + self.assertTrue(event.failure["errmsg"]) + + @client_context.require_failCommand_fail_point + def test_bulk_write_command_error(self): + coll = self.client.pymongo_test.test + self.listener.reset() + + insert_command_error = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "failCommands": ["insert"], + "closeConnection": False, + "errorCode": 10107, # Not primary + }, + } + with self.fail_point(insert_command_error): + with self.assertRaises(NotPrimaryError): + coll.bulk_write([InsertOne({"_id": 1})]) + failed = self.listener.failed_events + self.assertEqual(1, len(failed)) + event = failed[0] + self.assertEqual(event.command_name, "insert") + self.assertIsInstance(event.failure, dict) + self.assertEqual(event.failure["code"], 10107) + self.assertTrue(event.failure["errmsg"]) + def test_write_errors(self): coll = self.client.pymongo_test.test coll.drop() - self.listener.results.clear() + self.listener.reset() try: - coll.bulk_write([InsertOne({'_id': 1}), - InsertOne({'_id': 1}), - InsertOne({'_id': 1}), - DeleteOne({'_id': 1})], - ordered=False) + coll.bulk_write( + [ + InsertOne({"_id": 1}), + InsertOne({"_id": 1}), + InsertOne({"_id": 1}), + DeleteOne({"_id": 1}), + ], + ordered=False, + ) except OperationFailure: pass - results = self.listener.results - started = results['started'] - succeeded = results['succeeded'] - self.assertEqual(0, len(results['failed'])) + started = self.listener.started_events + succeeded = self.listener.succeeded_events + self.assertEqual(0, len(self.listener.failed_events)) operation_id = started[0].operation_id pairs = list(zip(started, succeeded)) errors = [] for start, succeed in pairs: self.assertIsInstance(start, monitoring.CommandStartedEvent) - self.assertEqual('pymongo_test', start.database_name) + self.assertEqual("pymongo_test", start.database_name) self.assertIsInstance(start.request_id, int) self.assertEqual(self.client.address, start.connection_id) self.assertIsInstance(succeed, monitoring.CommandSucceededEvent) @@ -1204,28 +1046,27 @@ def test_write_errors(self): self.assertEqual(start.connection_id, succeed.connection_id) self.assertEqual(start.operation_id, operation_id) self.assertEqual(succeed.operation_id, operation_id) - if 'writeErrors' in succeed.reply: - errors.extend(succeed.reply['writeErrors']) + if "writeErrors" in succeed.reply: + errors.extend(succeed.reply["writeErrors"]) self.assertEqual(2, len(errors)) - fields = set(['index', 'code', 'errmsg']) + fields = {"index", "code", "errmsg"} for error in errors: self.assertTrue(fields.issubset(set(error))) def test_first_batch_helper(self): # Regardless of server version and use of helpers._first_batch # this test should still pass. - self.listener.results.clear() + self.listener.reset() tuple(self.client.pymongo_test.test.list_indexes()) - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('listIndexes', 'test'), ('cursor', {})]) + expected = SON([("listIndexes", "test"), ("cursor", {})]) self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('listIndexes', started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("listIndexes", started.command_name) self.assertIsInstance(started.request_id, int) self.assertEqual(self.client.address, started.connection_id) self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) @@ -1233,73 +1074,34 @@ def test_first_batch_helper(self): self.assertEqual(started.command_name, succeeded.command_name) self.assertEqual(started.request_id, succeeded.request_id) self.assertEqual(started.connection_id, succeeded.connection_id) - self.assertTrue('cursor' in succeeded.reply) - self.assertTrue('ok' in succeeded.reply) - - self.listener.results.clear() - self.client.pymongo_test.current_op(True) - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('currentOp', 1), ('$all', True)]) - self.assertEqualCommand(expected, started.command) - self.assertEqual('admin', started.database_name) - self.assertEqual('currentOp', started.command_name) - self.assertIsInstance(started.request_id, int) - self.assertEqual(self.client.address, started.connection_id) - self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) - self.assertIsInstance(succeeded.duration_micros, int) - self.assertEqual(started.command_name, succeeded.command_name) - self.assertEqual(started.request_id, succeeded.request_id) - self.assertEqual(started.connection_id, succeeded.connection_id) - self.assertTrue('inprog' in succeeded.reply) - self.assertTrue('ok' in succeeded.reply) - - if not client_context.is_mongos: - self.client.fsync(lock=True) - self.listener.results.clear() - self.client.unlock() - # Wait for async unlock... - wait_until( - lambda: not self.client.is_locked, "unlock the database") - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = {'fsyncUnlock': 1} - self.assertEqualCommand(expected, started.command) - self.assertEqual('admin', started.database_name) - self.assertEqual('fsyncUnlock', started.command_name) - self.assertIsInstance(started.request_id, int) - self.assertEqual(self.client.address, started.connection_id) - self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) - self.assertIsInstance(succeeded.duration_micros, int) - self.assertEqual(started.command_name, succeeded.command_name) - self.assertEqual(started.request_id, succeeded.request_id) - self.assertEqual(started.connection_id, succeeded.connection_id) - self.assertTrue('info' in succeeded.reply) - self.assertTrue('ok' in succeeded.reply) + self.assertTrue("cursor" in succeeded.reply) + self.assertTrue("ok" in succeeded.reply) + self.listener.reset() + + @client_context.require_version_max(6, 1, 99) def test_sensitive_commands(self): listeners = self.client._event_listeners - self.listener.results.clear() + self.listener.reset() cmd = SON([("getnonce", 1)]) - listeners.publish_command_start( - cmd, "pymongo_test", 12345, self.client.address) + listeners.publish_command_start(cmd, "pymongo_test", 12345, self.client.address) # type: ignore[arg-type] delta = datetime.timedelta(milliseconds=100) listeners.publish_command_success( - delta, {'nonce': 'e474f4561c5eb40b', 'ok': 1.0}, - "getnonce", 12345, self.client.address) - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) + delta, + {"nonce": "e474f4561c5eb40b", "ok": 1.0}, + "getnonce", + 12345, + self.client.address, # type: ignore[arg-type] + database_name="pymongo_test", + ) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqual({}, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('getnonce', started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("getnonce", started.command_name) self.assertIsInstance(started.request_id, int) self.assertEqual(self.client.address, started.connection_id) self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) @@ -1310,42 +1112,138 @@ def test_sensitive_commands(self): self.assertEqual({}, succeeded.reply) -class TestGlobalListener(PyMongoTestCase): +class TestGlobalListener(IntegrationTest): + listener: EventListener + saved_listeners: Any @classmethod @client_context.require_connection def setUpClass(cls): + super().setUpClass() cls.listener = EventListener() # We plan to call register(), which internally modifies _LISTENERS. cls.saved_listeners = copy.deepcopy(monitoring._LISTENERS) monitoring.register(cls.listener) cls.client = single_client() # Get one (authenticated) socket in the pool. - cls.client.pymongo_test.command('ismaster') + cls.client.pymongo_test.command("ping") @classmethod def tearDownClass(cls): monitoring._LISTENERS = cls.saved_listeners + cls.client.close() + super().tearDownClass() def setUp(self): - self.listener.results.clear() + super().setUp() + self.listener.reset() def test_simple(self): - self.client.pymongo_test.command('ismaster') - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertTrue( - isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) - self.assertEqualCommand(SON([('ismaster', 1)]), started.command) - self.assertEqual('ismaster', started.command_name) + self.client.pymongo_test.command("ping") + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertEqualCommand(SON([("ping", 1)]), started.command) + self.assertEqual("ping", started.command_name) self.assertEqual(self.client.address, started.connection_id) - self.assertEqual('pymongo_test', started.database_name) + self.assertEqual("pymongo_test", started.database_name) self.assertTrue(isinstance(started.request_id, int)) +class TestEventClasses(unittest.TestCase): + def test_command_event_repr(self): + request_id, connection_id, operation_id, db_name = 1, ("localhost", 27017), 2, "admin" + event = monitoring.CommandStartedEvent( + {"ping": 1}, db_name, request_id, connection_id, operation_id + ) + self.assertEqual( + repr(event), + "", + ) + delta = datetime.timedelta(milliseconds=100) + event = monitoring.CommandSucceededEvent( + delta, {"ok": 1}, "ping", request_id, connection_id, operation_id, database_name=db_name + ) + self.assertEqual( + repr(event), + "", + ) + event = monitoring.CommandFailedEvent( + delta, {"ok": 0}, "ping", request_id, connection_id, operation_id, database_name=db_name + ) + self.assertEqual( + repr(event), + "", + ) + + def test_server_heartbeat_event_repr(self): + connection_id = ("localhost", 27017) + event = monitoring.ServerHeartbeatStartedEvent(connection_id) + self.assertEqual( + repr(event), "" + ) + delta = 0.1 + event = monitoring.ServerHeartbeatSucceededEvent( + delta, {"ok": 1}, connection_id # type: ignore[arg-type] + ) + self.assertEqual( + repr(event), + "", + ) + event = monitoring.ServerHeartbeatFailedEvent( + delta, "ERROR", connection_id # type: ignore[arg-type] + ) + self.assertEqual( + repr(event), + "", + ) + + def test_server_event_repr(self): + server_address = ("localhost", 27017) + topology_id = ObjectId("000000000000000000000001") + event = monitoring.ServerOpeningEvent(server_address, topology_id) + self.assertEqual( + repr(event), + "", + ) + event = monitoring.ServerDescriptionChangedEvent( + "PREV", "NEW", server_address, topology_id # type: ignore[arg-type] + ) + self.assertEqual( + repr(event), + "", + ) + event = monitoring.ServerClosedEvent(server_address, topology_id) + self.assertEqual( + repr(event), + "", + ) + + def test_topology_event_repr(self): + topology_id = ObjectId("000000000000000000000001") + event = monitoring.TopologyOpenedEvent(topology_id) + self.assertEqual(repr(event), "") + event = monitoring.TopologyDescriptionChangedEvent( + "PREV", "NEW", topology_id # type: ignore[arg-type] + ) + self.assertEqual( + repr(event), + "", + ) + event = monitoring.TopologyClosedEvent(topology_id) + self.assertEqual(repr(event), "") + + if __name__ == "__main__": unittest.main() diff --git a/test/test_objectid.py b/test/test_objectid.py index 4e66b67fab..771ba09422 100644 --- a/test/test_objectid.py +++ b/test/test_objectid.py @@ -13,6 +13,7 @@ # limitations under the License. """Tests for the objectid module.""" +from __future__ import annotations import datetime import pickle @@ -21,14 +22,13 @@ sys.path[0:0] = [""] -from bson.errors import InvalidId -from bson.objectid import ObjectId, _MAX_COUNTER_VALUE -from bson.py3compat import PY3, _unicode -from bson.tz_util import (FixedOffset, - utc) from test import SkipTest, unittest from test.utils import oid_generated_on_process +from bson.errors import InvalidId +from bson.objectid import _MAX_COUNTER_VALUE, ObjectId +from bson.tz_util import FixedOffset, utc + def oid(x): return ObjectId() @@ -50,40 +50,36 @@ def test_creation(self): def test_unicode(self): a = ObjectId() - self.assertEqual(a, ObjectId(_unicode(a))) - self.assertEqual(ObjectId("123456789012123456789012"), - ObjectId(u"123456789012123456789012")) - self.assertRaises(InvalidId, ObjectId, u"hello") + self.assertEqual(a, ObjectId(a)) + self.assertRaises(InvalidId, ObjectId, "hello") def test_from_hex(self): ObjectId("123456789012123456789012") self.assertRaises(InvalidId, ObjectId, "123456789012123456789G12") - self.assertRaises(InvalidId, ObjectId, u"123456789012123456789G12") def test_repr_str(self): - self.assertEqual(repr(ObjectId("1234567890abcdef12345678")), - "ObjectId('1234567890abcdef12345678')") - self.assertEqual(str(ObjectId("1234567890abcdef12345678")), - "1234567890abcdef12345678") - self.assertEqual(str(ObjectId(b"123456789012")), - "313233343536373839303132") - self.assertEqual(ObjectId("1234567890abcdef12345678").binary, - b'\x124Vx\x90\xab\xcd\xef\x124Vx') - self.assertEqual(str(ObjectId(b'\x124Vx\x90\xab\xcd\xef\x124Vx')), - "1234567890abcdef12345678") + self.assertEqual( + repr(ObjectId("1234567890abcdef12345678")), "ObjectId('1234567890abcdef12345678')" + ) + self.assertEqual(str(ObjectId("1234567890abcdef12345678")), "1234567890abcdef12345678") + self.assertEqual(str(ObjectId(b"123456789012")), "313233343536373839303132") + self.assertEqual( + ObjectId("1234567890abcdef12345678").binary, b"\x124Vx\x90\xab\xcd\xef\x124Vx" + ) + self.assertEqual( + str(ObjectId(b"\x124Vx\x90\xab\xcd\xef\x124Vx")), "1234567890abcdef12345678" + ) def test_equality(self): a = ObjectId() self.assertEqual(a, ObjectId(a)) - self.assertEqual(ObjectId(b"123456789012"), - ObjectId(b"123456789012")) + self.assertEqual(ObjectId(b"123456789012"), ObjectId(b"123456789012")) self.assertNotEqual(ObjectId(), ObjectId()) self.assertNotEqual(ObjectId(b"123456789012"), b"123456789012") # Explicitly test inequality self.assertFalse(a != ObjectId(a)) - self.assertFalse(ObjectId(b"123456789012") != - ObjectId(b"123456789012")) + self.assertFalse(ObjectId(b"123456789012") != ObjectId(b"123456789012")) def test_binary_str_equivalence(self): a = ObjectId() @@ -91,7 +87,7 @@ def test_binary_str_equivalence(self): self.assertEqual(a, ObjectId(str(a))) def test_generation_time(self): - d1 = datetime.datetime.utcnow() + d1 = datetime.datetime.now(tz=datetime.timezone.utc).replace(tzinfo=None) d2 = ObjectId().generation_time self.assertEqual(utc, d2.tzinfo) @@ -99,18 +95,19 @@ def test_generation_time(self): self.assertTrue(d2 - d1 < datetime.timedelta(seconds=2)) def test_from_datetime(self): - if 'PyPy 1.8.0' in sys.version: + if "PyPy 1.8.0" in sys.version: # See https://bugs.pypy.org/issue1092 raise SkipTest("datetime.timedelta is broken in pypy 1.8.0") - d = datetime.datetime.utcnow() + d = datetime.datetime.now(tz=datetime.timezone.utc).replace(tzinfo=None) d = d - datetime.timedelta(microseconds=d.microsecond) oid = ObjectId.from_datetime(d) self.assertEqual(d, oid.generation_time.replace(tzinfo=None)) self.assertEqual("0" * 16, str(oid)[8:]) - aware = datetime.datetime(1993, 4, 4, 2, - tzinfo=FixedOffset(555, "SomeZone")) - as_utc = (aware - aware.utcoffset()).replace(tzinfo=utc) + aware = datetime.datetime(1993, 4, 4, 2, tzinfo=FixedOffset(555, "SomeZone")) + offset = aware.utcoffset() + assert offset is not None + as_utc = (aware - offset).replace(tzinfo=utc) oid = ObjectId.from_datetime(aware) self.assertEqual(as_utc, oid.generation_time) @@ -128,7 +125,8 @@ def test_pickle_backwards_compatability(self): b"(cbson.objectid\nObjectId\np1\nc__builtin__\n" b"object\np2\nNtp3\nRp4\n" b"(dp5\nS'_ObjectId__id'\np6\n" - b"S'M\\x9afV\\x13v\\xc0\\x0b\\x88\\x00\\x00\\x00'\np7\nsb.") + b"S'M\\x9afV\\x13v\\xc0\\x0b\\x88\\x00\\x00\\x00'\np7\nsb." + ) # We also test against a hardcoded "New" pickle format so that we # make sure we're backward compatible with the current version in @@ -137,15 +135,12 @@ def test_pickle_backwards_compatability(self): b"ccopy_reg\n_reconstructor\np0\n" b"(cbson.objectid\nObjectId\np1\nc__builtin__\n" b"object\np2\nNtp3\nRp4\n" - b"S'M\\x9afV\\x13v\\xc0\\x0b\\x88\\x00\\x00\\x00'\np5\nb.") + b"S'M\\x9afV\\x13v\\xc0\\x0b\\x88\\x00\\x00\\x00'\np5\nb." + ) - if PY3: - # Have to load using 'latin-1' since these were pickled in python2.x. - oid_1_9 = pickle.loads(pickled_with_1_9, encoding='latin-1') - oid_1_10 = pickle.loads(pickled_with_1_10, encoding='latin-1') - else: - oid_1_9 = pickle.loads(pickled_with_1_9) - oid_1_10 = pickle.loads(pickled_with_1_10) + # Have to load using 'latin-1' since these were pickled in python2.x. + oid_1_9 = pickle.loads(pickled_with_1_9, encoding="latin-1") + oid_1_10 = pickle.loads(pickled_with_1_10, encoding="latin-1") self.assertEqual(oid_1_9, ObjectId("4d9a66561376c00b88000000")) self.assertEqual(oid_1_9, oid_1_10) @@ -189,9 +184,13 @@ def generate_objectid_with_timestamp(timestamp): for tstamp, exp_datetime_args in TEST_DATA.items(): oid = generate_objectid_with_timestamp(tstamp) - self.assertEqual( - oid.generation_time, - datetime.datetime(*exp_datetime_args, tzinfo=utc)) + # 32-bit platforms may overflow in datetime.fromtimestamp. + if tstamp > 0x7FFFFFFF and sys.maxsize < 2**32: + try: + oid.generation_time + except (OverflowError, ValueError): + continue + self.assertEqual(oid.generation_time, datetime.datetime(*exp_datetime_args, tzinfo=utc)) def test_random_regenerated_on_pid_change(self): # Test that change of pid triggers new random number generation. diff --git a/test/test_ocsp_cache.py b/test/test_ocsp_cache.py new file mode 100644 index 0000000000..1cc025ccb2 --- /dev/null +++ b/test/test_ocsp_cache.py @@ -0,0 +1,138 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the pymongo ocsp_support module.""" +from __future__ import annotations + +import random +import sys +from collections import namedtuple +from datetime import datetime, timedelta, timezone +from os import urandom +from time import sleep +from typing import Any + +sys.path[0:0] = [""] + +from test import unittest + +from pymongo.ocsp_cache import _OCSPCache + + +class TestOcspCache(unittest.TestCase): + MockHashAlgorithm: Any + MockOcspRequest: Any + MockOcspResponse: Any + + @classmethod + def setUpClass(cls): + cls.MockHashAlgorithm = namedtuple("MockHashAlgorithm", ["name"]) # type: ignore + cls.MockOcspRequest = namedtuple( # type: ignore + "MockOcspRequest", + ["hash_algorithm", "issuer_name_hash", "issuer_key_hash", "serial_number"], + ) + cls.MockOcspResponse = namedtuple( # type: ignore + "MockOcspResponse", ["this_update", "next_update"] + ) + + def setUp(self): + self.cache = _OCSPCache() + + def _create_mock_request(self): + hash_algorithm = self.MockHashAlgorithm(random.choice(["sha1", "md5", "sha256"])) + issuer_name_hash = urandom(8) + issuer_key_hash = urandom(8) + serial_number = random.randint(0, 10**10) + return self.MockOcspRequest( + hash_algorithm=hash_algorithm, + issuer_name_hash=issuer_name_hash, + issuer_key_hash=issuer_key_hash, + serial_number=serial_number, + ) + + def _create_mock_response(self, this_update_delta_seconds, next_update_delta_seconds): + now = datetime.now(tz=timezone.utc).replace(tzinfo=None) + this_update = now + timedelta(seconds=this_update_delta_seconds) + if next_update_delta_seconds is not None: + next_update = now + timedelta(seconds=next_update_delta_seconds) + else: + next_update = None + return self.MockOcspResponse(this_update=this_update, next_update=next_update) + + def _add_mock_cache_entry(self, mock_request, mock_response): + key = self.cache._get_cache_key(mock_request) + self.cache._data[key] = mock_response + + def test_simple(self): + # Start with 1 valid entry in the cache. + request = self._create_mock_request() + response = self._create_mock_response(-10, +3600) + self._add_mock_cache_entry(request, response) + + # Ensure entry can be retrieved. + self.assertEqual(self.cache[request], response) + + # Valid entries with an earlier next_update have no effect. + response_1 = self._create_mock_response(-20, +1800) + self.cache[request] = response_1 + self.assertEqual(self.cache[request], response) + + # Invalid entries with a later this_update have no effect. + response_2 = self._create_mock_response(+20, +1800) + self.cache[request] = response_2 + self.assertEqual(self.cache[request], response) + + # Invalid entries with passed next_update have no effect. + response_3 = self._create_mock_response(-10, -5) + self.cache[request] = response_3 + self.assertEqual(self.cache[request], response) + + # Valid entries with a later next_update update the cache. + response_new = self._create_mock_response(-5, +7200) + self.cache[request] = response_new + self.assertEqual(self.cache[request], response_new) + + # Entries with an unset next_update purge the cache. + response_notset = self._create_mock_response(-5, None) + self.cache[request] = response_notset + with self.assertRaises(KeyError): + _ = self.cache[request] + + def test_invalidate(self): + # Start with 1 valid entry in the cache. + request = self._create_mock_request() + response = self._create_mock_response(-10, +0.25) + self._add_mock_cache_entry(request, response) + + # Ensure entry can be retrieved. + self.assertEqual(self.cache[request], response) + + # Wait for entry to become invalid and ensure KeyError is raised. + sleep(0.5) + with self.assertRaises(KeyError): + _ = self.cache[request] + + def test_non_existent(self): + # Start with 1 valid entry in the cache. + request = self._create_mock_request() + response = self._create_mock_response(-10, +10) + self._add_mock_cache_entry(request, response) + + # Attempt to retrieve non-existent entry must raise KeyError. + with self.assertRaises(KeyError): + _ = self.cache[self._create_mock_request()] + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_on_demand_csfle.py b/test/test_on_demand_csfle.py new file mode 100644 index 0000000000..bfd07a83ec --- /dev/null +++ b/test/test_on_demand_csfle.py @@ -0,0 +1,113 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test client side encryption with on demand credentials.""" +from __future__ import annotations + +import os +import sys +import unittest + +sys.path[0:0] = [""] + +from test import IntegrationTest, client_context + +from bson.codec_options import CodecOptions +from pymongo.encryption import _HAVE_PYMONGOCRYPT, ClientEncryption, EncryptionError + + +class TestonDemandGCPCredentials(IntegrationTest): + @classmethod + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + @client_context.require_version_min(4, 2, -1) + def setUpClass(cls): + super().setUpClass() + + def setUp(self): + super().setUp() + self.master_key = { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle", + } + + @unittest.skipIf(not os.getenv("TEST_FLE_GCP_AUTO"), "Not testing FLE GCP auto") + def test_01_failure(self): + if os.environ["SUCCESS"].lower() == "true": + self.skipTest("Expecting success") + self.client_encryption = ClientEncryption( + kms_providers={"gcp": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client_context.client, + codec_options=CodecOptions(), + ) + with self.assertRaises(EncryptionError): + self.client_encryption.create_data_key("gcp", self.master_key) + + @unittest.skipIf(not os.getenv("TEST_FLE_GCP_AUTO"), "Not testing FLE GCP auto") + def test_02_success(self): + if os.environ["SUCCESS"].lower() == "false": + self.skipTest("Expecting failure") + self.client_encryption = ClientEncryption( + kms_providers={"gcp": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client_context.client, + codec_options=CodecOptions(), + ) + self.client_encryption.create_data_key("gcp", self.master_key) + + +class TestonDemandAzureCredentials(IntegrationTest): + @classmethod + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + @client_context.require_version_min(4, 2, -1) + def setUpClass(cls): + super().setUpClass() + + def setUp(self): + super().setUp() + self.master_key = { + "keyVaultEndpoint": os.environ["KEY_VAULT_ENDPOINT"], + "keyName": os.environ["KEY_NAME"], + } + + @unittest.skipIf(not os.getenv("TEST_FLE_AZURE_AUTO"), "Not testing FLE Azure auto") + def test_01_failure(self): + if os.environ["SUCCESS"].lower() == "true": + self.skipTest("Expecting success") + self.client_encryption = ClientEncryption( + kms_providers={"azure": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client_context.client, + codec_options=CodecOptions(), + ) + with self.assertRaises(EncryptionError): + self.client_encryption.create_data_key("azure", self.master_key) + + @unittest.skipIf(not os.getenv("TEST_FLE_AZURE_AUTO"), "Not testing FLE Azure auto") + def test_02_success(self): + if os.environ["SUCCESS"].lower() == "false": + self.skipTest("Expecting failure") + self.client_encryption = ClientEncryption( + kms_providers={"azure": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client_context.client, + codec_options=CodecOptions(), + ) + self.client_encryption.create_data_key("azure", self.master_key) + + +if __name__ == "__main__": + unittest.main(verbosity=2) diff --git a/test/test_pooling.py b/test/test_pooling.py index 922deecdcf..e91c57bc6b 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -13,6 +13,7 @@ # limitations under the License. """Test built in connection-pooling with threads.""" +from __future__ import annotations import gc import random @@ -21,27 +22,26 @@ import threading import time -from pymongo import MongoClient -from pymongo.errors import (AutoReconnect, - ConnectionFailure, - DuplicateKeyError, - ExceededMaxWaiters) +from bson.codec_options import DEFAULT_CODEC_OPTIONS +from bson.son import SON +from pymongo import MongoClient, message, timeout +from pymongo.errors import AutoReconnect, ConnectionFailure, DuplicateKeyError +from pymongo.hello import HelloCompat sys.path[0:0] = [""] -from pymongo.network import SocketChecker +from test import IntegrationTest, client_context, unittest +from test.utils import delay, get_pool, joinall, rs_or_single_client + from pymongo.pool import Pool, PoolOptions -from test import client_context, unittest -from test.utils import (get_pool, - joinall, - delay, - rs_or_single_client) +from pymongo.socket_checker import SocketChecker @client_context.require_connection def setUpModule(): pass + N = 10 DB = "pymongo-pooling-tests" @@ -60,8 +60,9 @@ def gc_collect_until_done(threads, timeout=60): class MongoThread(threading.Thread): """A thread that uses a MongoClient.""" + def __init__(self, client): - super(MongoThread, self).__init__() + super().__init__() self.daemon = True # Don't hang whole test if thread hangs. self.client = client self.db = self.client[DB] @@ -100,37 +101,32 @@ def run_mongo_thread(self): raise AssertionError("Should have raised DuplicateKeyError") -class Disconnect(MongoThread): - def run_mongo_thread(self): - for _ in range(N): - self.client.close() - - class SocketGetter(MongoThread): """Utility for TestPooling. Checks out a socket and holds it forever. Used in - test_no_wait_queue_timeout, test_wait_queue_multiple, and - test_no_wait_queue_multiple. + test_no_wait_queue_timeout. """ + def __init__(self, client, pool): - super(SocketGetter, self).__init__(client) - self.state = 'init' + super().__init__(client) + self.state = "init" self.pool = pool self.sock = None def run_mongo_thread(self): - self.state = 'get_socket' + self.state = "get_socket" - # Pass 'checkout' so we can hold the socket. - with self.pool.get_socket({}, checkout=True) as sock: + # Call 'pin_cursor' so we can hold the socket. + with self.pool.checkout() as sock: + sock.pin_cursor() self.sock = sock - self.state = 'sock' + self.state = "connection" def __del__(self): if self.sock: - self.sock.close_socket(None) + self.sock.close_conn(None) def run_cases(client, cases): @@ -138,7 +134,7 @@ def run_cases(client, cases): n_runs = 5 for case in cases: - for i in range(n_runs): + for _i in range(n_runs): t = case(client) t.start() threads.append(t) @@ -150,10 +146,11 @@ def run_cases(client, cases): assert t.passed, "%s.run() threw an exception" % repr(t) -class _TestPoolingBase(unittest.TestCase): +class _TestPoolingBase(IntegrationTest): """Base class for all connection-pool tests.""" def setUp(self): + super().setUp() self.c = rs_or_single_client() db = self.c[DB] db.unique.drop() @@ -161,70 +158,68 @@ def setUp(self): db.unique.insert_one({"_id": "jesse"}) db.test.insert_many([{} for _ in range(10)]) - def create_pool( - self, - pair=(client_context.host, client_context.port), - *args, - **kwargs): + def tearDown(self): + self.c.close() + super().tearDown() + + def create_pool(self, pair=(client_context.host, client_context.port), *args, **kwargs): # Start the pool with the correct ssl options. pool_options = client_context.client._topology_settings.pool_options - kwargs['ssl_context'] = pool_options.ssl_context - kwargs['ssl_match_hostname'] = pool_options.ssl_match_hostname - return Pool(pair, PoolOptions(*args, **kwargs)) + kwargs["ssl_context"] = pool_options._ssl_context + kwargs["tls_allow_invalid_hostnames"] = pool_options.tls_allow_invalid_hostnames + kwargs["server_api"] = pool_options.server_api + pool = Pool(pair, PoolOptions(*args, **kwargs)) + pool.ready() + return pool class TestPooling(_TestPoolingBase): def test_max_pool_size_validation(self): host, port = client_context.host, client_context.port - self.assertRaises( - ValueError, MongoClient, host=host, port=port, maxPoolSize=-1) + self.assertRaises(ValueError, MongoClient, host=host, port=port, maxPoolSize=-1) - self.assertRaises( - ValueError, MongoClient, host=host, port=port, maxPoolSize='foo') + self.assertRaises(ValueError, MongoClient, host=host, port=port, maxPoolSize="foo") c = MongoClient(host=host, port=port, maxPoolSize=100, connect=False) - self.assertEqual(c.max_pool_size, 100) + self.assertEqual(c.options.pool_options.max_pool_size, 100) def test_no_disconnect(self): run_cases(self.c, [NonUnique, Unique, InsertOneAndFind]) - def test_disconnect(self): - run_cases(self.c, [InsertOneAndFind, Disconnect, Unique]) - def test_pool_reuses_open_socket(self): # Test Pool's _check_closed() method doesn't close a healthy socket. cx_pool = self.create_pool(max_pool_size=10) cx_pool._check_interval_seconds = 0 # Always check. - with cx_pool.get_socket({}) as sock_info: + with cx_pool.checkout() as conn: pass - with cx_pool.get_socket({}) as new_sock_info: - self.assertEqual(sock_info, new_sock_info) + with cx_pool.checkout() as new_connection: + self.assertEqual(conn, new_connection) - self.assertEqual(1, len(cx_pool.sockets)) + self.assertEqual(1, len(cx_pool.conns)) def test_get_socket_and_exception(self): # get_socket() returns socket after a non-network error. cx_pool = self.create_pool(max_pool_size=1, wait_queue_timeout=1) with self.assertRaises(ZeroDivisionError): - with cx_pool.get_socket({}) as sock_info: + with cx_pool.checkout() as conn: 1 / 0 # Socket was returned, not closed. - with cx_pool.get_socket({}) as new_sock_info: - self.assertEqual(sock_info, new_sock_info) + with cx_pool.checkout() as new_connection: + self.assertEqual(conn, new_connection) - self.assertEqual(1, len(cx_pool.sockets)) + self.assertEqual(1, len(cx_pool.conns)) def test_pool_removes_closed_socket(self): # Test that Pool removes explicitly closed socket. cx_pool = self.create_pool() - with cx_pool.get_socket({}) as sock_info: - # Use SocketInfo's API to close the socket. - sock_info.close_socket(None) + with cx_pool.checkout() as conn: + # Use Connection's API to close the socket. + conn.close_conn(None) - self.assertEqual(0, len(cx_pool.sockets)) + self.assertEqual(0, len(cx_pool.conns)) def test_pool_removes_dead_socket(self): # Test that Pool removes dead socket and the socket doesn't return @@ -232,21 +227,20 @@ def test_pool_removes_dead_socket(self): cx_pool = self.create_pool(max_pool_size=1, wait_queue_timeout=1) cx_pool._check_interval_seconds = 0 # Always check. - with cx_pool.get_socket({}) as sock_info: - # Simulate a closed socket without telling the SocketInfo it's + with cx_pool.checkout() as conn: + # Simulate a closed socket without telling the Connection it's # closed. - sock_info.sock.close() - self.assertTrue( - cx_pool.socket_checker.socket_closed(sock_info.sock)) + conn.conn.close() + self.assertTrue(conn.conn_closed()) - with cx_pool.get_socket({}) as new_sock_info: - self.assertEqual(0, len(cx_pool.sockets)) - self.assertNotEqual(sock_info, new_sock_info) + with cx_pool.checkout() as new_connection: + self.assertEqual(0, len(cx_pool.conns)) + self.assertNotEqual(conn, new_connection) - self.assertEqual(1, len(cx_pool.sockets)) + self.assertEqual(1, len(cx_pool.conns)) # Semaphore was released. - with cx_pool.get_socket({}): + with cx_pool.checkout(): pass def test_socket_closed(self): @@ -257,78 +251,88 @@ def test_socket_closed(self): s.close() self.assertTrue(socket_checker.socket_closed(s)) - def test_socket_closed_thread_safe(self): + def test_socket_checker(self): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((client_context.host, client_context.port)) - self.addCleanup(s.close) socket_checker = SocketChecker() - - def check_socket(): - for _ in range(1000): - self.assertFalse(socket_checker.socket_closed(s)) - - threads = [] - for i in range(3): - thread = threading.Thread(target=check_socket) - thread.start() - threads.append(thread) - - for thread in threads: - thread.join() + # Socket has nothing to read. + self.assertFalse(socket_checker.select(s, read=True)) + self.assertFalse(socket_checker.select(s, read=True, timeout=0)) + self.assertFalse(socket_checker.select(s, read=True, timeout=0.05)) + # Socket is writable. + self.assertTrue(socket_checker.select(s, write=True, timeout=None)) + self.assertTrue(socket_checker.select(s, write=True)) + self.assertTrue(socket_checker.select(s, write=True, timeout=0)) + self.assertTrue(socket_checker.select(s, write=True, timeout=0.05)) + # Make the socket readable + _, msg, _ = message._query( + 0, "admin.$cmd", 0, -1, SON([("ping", 1)]), None, DEFAULT_CODEC_OPTIONS + ) + s.sendall(msg) + # Block until the socket is readable. + self.assertTrue(socket_checker.select(s, read=True, timeout=None)) + self.assertTrue(socket_checker.select(s, read=True)) + self.assertTrue(socket_checker.select(s, read=True, timeout=0)) + self.assertTrue(socket_checker.select(s, read=True, timeout=0.05)) + # Socket is still writable. + self.assertTrue(socket_checker.select(s, write=True, timeout=None)) + self.assertTrue(socket_checker.select(s, write=True)) + self.assertTrue(socket_checker.select(s, write=True, timeout=0)) + self.assertTrue(socket_checker.select(s, write=True, timeout=0.05)) + s.close() + self.assertTrue(socket_checker.socket_closed(s)) def test_return_socket_after_reset(self): pool = self.create_pool() - with pool.get_socket({}) as sock: + with pool.checkout() as sock: + self.assertEqual(pool.active_sockets, 1) + self.assertEqual(pool.operation_count, 1) pool.reset() self.assertTrue(sock.closed) - self.assertEqual(0, len(pool.sockets)) + self.assertEqual(0, len(pool.conns)) + self.assertEqual(pool.active_sockets, 0) + self.assertEqual(pool.operation_count, 0) def test_pool_check(self): # Test that Pool recovers from two connection failures in a row. # This exercises code at the end of Pool._check(). - cx_pool = self.create_pool(max_pool_size=1, - connect_timeout=1, - wait_queue_timeout=1) + cx_pool = self.create_pool(max_pool_size=1, connect_timeout=1, wait_queue_timeout=1) cx_pool._check_interval_seconds = 0 # Always check. self.addCleanup(cx_pool.close) - with cx_pool.get_socket({}) as sock_info: - # Simulate a closed socket without telling the SocketInfo it's + with cx_pool.checkout() as conn: + # Simulate a closed socket without telling the Connection it's # closed. - sock_info.sock.close() + conn.conn.close() # Swap pool's address with a bad one. - address, cx_pool.address = cx_pool.address, ('foo.com', 1234) + address, cx_pool.address = cx_pool.address, ("foo.com", 1234) with self.assertRaises(AutoReconnect): - with cx_pool.get_socket({}): + with cx_pool.checkout(): pass # Back to normal, semaphore was correctly released. cx_pool.address = address - with cx_pool.get_socket({}, checkout=True) as sock_info: + with cx_pool.checkout(): pass - sock_info.close_socket(None) - def test_wait_queue_timeout(self): wait_queue_timeout = 2 # Seconds - pool = self.create_pool( - max_pool_size=1, wait_queue_timeout=wait_queue_timeout) + pool = self.create_pool(max_pool_size=1, wait_queue_timeout=wait_queue_timeout) self.addCleanup(pool.close) - with pool.get_socket({}) as sock_info: + with pool.checkout(): start = time.time() with self.assertRaises(ConnectionFailure): - with pool.get_socket({}): + with pool.checkout(): pass duration = time.time() - start self.assertTrue( abs(wait_queue_timeout - duration) < 1, - "Waited %.2f seconds for a socket, expected %f" % ( - duration, wait_queue_timeout)) - + f"Waited {duration:.2f} seconds for a socket, expected {wait_queue_timeout:f}", + ) def test_no_wait_queue_timeout(self): # Verify get_socket() with no wait_queue_timeout blocks forever. @@ -336,52 +340,29 @@ def test_no_wait_queue_timeout(self): self.addCleanup(pool.close) # Reach max_size. - with pool.get_socket({}) as s1: + with pool.checkout() as s1: t = SocketGetter(self.c, pool) t.start() - while t.state != 'get_socket': + while t.state != "get_socket": time.sleep(0.1) time.sleep(1) - self.assertEqual(t.state, 'get_socket') + self.assertEqual(t.state, "get_socket") - while t.state != 'sock': + while t.state != "connection": time.sleep(0.1) - self.assertEqual(t.state, 'sock') + self.assertEqual(t.state, "connection") self.assertEqual(t.sock, s1) - def test_wait_queue_multiple(self): - wait_queue_multiple = 3 - pool = self.create_pool( - max_pool_size=2, wait_queue_multiple=wait_queue_multiple) - - # Reach max_size sockets. - with pool.get_socket({}): - with pool.get_socket({}): - - # Reach max_size * wait_queue_multiple waiters. - threads = [] - for _ in range(6): - t = SocketGetter(self.c, pool) - t.start() - threads.append(t) - - time.sleep(1) - for t in threads: - self.assertEqual(t.state, 'get_socket') - - with self.assertRaises(ExceededMaxWaiters): - with pool.get_socket({}): - pass - - def test_no_wait_queue_multiple(self): + def test_checkout_more_than_max_pool_size(self): pool = self.create_pool(max_pool_size=2) socks = [] for _ in range(2): - # Pass 'checkout' so we can hold the socket. - with pool.get_socket({}, checkout=True) as sock: + # Call 'pin_cursor' so we can hold the socket. + with pool.checkout() as sock: + sock.pin_cursor() socks.append(sock) threads = [] @@ -391,16 +372,131 @@ def test_no_wait_queue_multiple(self): threads.append(t) time.sleep(1) for t in threads: - self.assertEqual(t.state, 'get_socket') + self.assertEqual(t.state, "get_socket") for socket_info in socks: - socket_info.close_socket(None) + socket_info.close_conn(None) + + def test_maxConnecting(self): + client = rs_or_single_client() + self.addCleanup(client.close) + self.client.test.test.insert_one({}) + self.addCleanup(self.client.test.test.delete_many, {}) + pool = get_pool(client) + docs = [] + + # Run 50 short running operations + def find_one(): + docs.append(client.test.test.find_one({})) + + threads = [threading.Thread(target=find_one) for _ in range(50)] + for thread in threads: + thread.start() + for thread in threads: + thread.join(10) + + self.assertEqual(len(docs), 50) + self.assertLessEqual(len(pool.conns), 50) + # TLS and auth make connection establishment more expensive than + # the query which leads to more threads hitting maxConnecting. + # The end result is fewer total connections and better latency. + if client_context.tls and client_context.auth_enabled: + self.assertLessEqual(len(pool.conns), 30) + else: + self.assertLessEqual(len(pool.conns), 50) + # MongoDB 4.4.1 with auth + ssl: + # maxConnecting = 2: 6 connections in ~0.231+ seconds + # maxConnecting = unbounded: 50 connections in ~0.642+ seconds + # + # MongoDB 4.4.1 with no-auth no-ssl Python 3.8: + # maxConnecting = 2: 15-22 connections in ~0.108+ seconds + # maxConnecting = unbounded: 30+ connections in ~0.140+ seconds + print(len(pool.conns)) + + @client_context.require_failCommand_fail_point + def test_csot_timeout_message(self): + client = rs_or_single_client(appName="connectionTimeoutApp") + # Mock a connection failing due to timeout. + mock_connection_timeout = { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "blockConnection": True, + "blockTimeMS": 1000, + "failCommands": ["find"], + "appName": "connectionTimeoutApp", + }, + } + + client.db.t.insert_one({"x": 1}) + + with self.fail_point(mock_connection_timeout): + with self.assertRaises(Exception) as error: + with timeout(0.5): + client.db.t.find_one({"$where": delay(2)}) + + self.assertTrue("(configured timeouts: timeoutMS: 500.0ms" in str(error.exception)) + + @client_context.require_failCommand_fail_point + def test_socket_timeout_message(self): + client = rs_or_single_client(socketTimeoutMS=500, appName="connectionTimeoutApp") + + # Mock a connection failing due to timeout. + mock_connection_timeout = { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "blockConnection": True, + "blockTimeMS": 1000, + "failCommands": ["find"], + "appName": "connectionTimeoutApp", + }, + } + + client.db.t.insert_one({"x": 1}) + + with self.fail_point(mock_connection_timeout): + with self.assertRaises(Exception) as error: + client.db.t.find_one({"$where": delay(2)}) + + self.assertTrue( + "(configured timeouts: socketTimeoutMS: 500.0ms, connectTimeoutMS: 20000.0ms)" + in str(error.exception) + ) + + @client_context.require_failCommand_fail_point + @client_context.require_version_min( + 4, 9, 0 + ) # configureFailPoint does not allow failure on handshake before 4.9, fixed in SERVER-49336 + def test_connection_timeout_message(self): + # Mock a connection failing due to timeout. + mock_connection_timeout = { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "blockConnection": True, + "blockTimeMS": 1000, + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "appName": "connectionTimeoutApp", + }, + } + + with self.fail_point(mock_connection_timeout): + with self.assertRaises(Exception) as error: + client = rs_or_single_client(connectTimeoutMS=500, appName="connectionTimeoutApp") + client.admin.command("ping") + + self.assertTrue( + "(configured timeouts: socketTimeoutMS: 500.0ms, connectTimeoutMS: 500.0ms)" + in str(error.exception) + ) class TestPoolMaxSize(_TestPoolingBase): def test_max_pool_size(self): max_pool_size = 4 c = rs_or_single_client(maxPoolSize=max_pool_size) + self.addCleanup(c.close) collection = c[DB].test # Need one document. @@ -408,7 +504,7 @@ def test_max_pool_size(self): collection.insert_one({}) # nthreads had better be much larger than max_pool_size to ensure that - # max_pool_size sockets are actually required at some point in this + # max_pool_size connections are actually required at some point in this # test's execution. cx_pool = get_pool(c) nthreads = 10 @@ -418,24 +514,25 @@ def test_max_pool_size(self): def f(): for _ in range(5): - collection.find_one({'$where': delay(0.1)}) - assert len(cx_pool.sockets) <= max_pool_size + collection.find_one({"$where": delay(0.1)}) + assert len(cx_pool.conns) <= max_pool_size with lock: self.n_passed += 1 - for i in range(nthreads): + for _i in range(nthreads): t = threading.Thread(target=f) threads.append(t) t.start() joinall(threads) self.assertEqual(nthreads, self.n_passed) - self.assertTrue(len(cx_pool.sockets) > 1) - self.assertEqual(max_pool_size, cx_pool._socket_semaphore.counter) + self.assertTrue(len(cx_pool.conns) > 1) + self.assertEqual(0, cx_pool.requests) def test_max_pool_size_none(self): c = rs_or_single_client(maxPoolSize=None) + self.addCleanup(c.close) collection = c[DB].test # Need one document. @@ -450,48 +547,48 @@ def test_max_pool_size_none(self): def f(): for _ in range(5): - collection.find_one({'$where': delay(0.1)}) + collection.find_one({"$where": delay(0.1)}) with lock: self.n_passed += 1 - for i in range(nthreads): + for _i in range(nthreads): t = threading.Thread(target=f) threads.append(t) t.start() joinall(threads) self.assertEqual(nthreads, self.n_passed) - self.assertTrue(len(cx_pool.sockets) > 1) + self.assertTrue(len(cx_pool.conns) > 1) + self.assertEqual(cx_pool.max_pool_size, float("inf")) def test_max_pool_size_zero(self): - with self.assertRaises(ValueError): - rs_or_single_client(maxPoolSize=0) + c = rs_or_single_client(maxPoolSize=0) + self.addCleanup(c.close) + pool = get_pool(c) + self.assertEqual(pool.max_pool_size, float("inf")) def test_max_pool_size_with_connection_failure(self): # The pool acquires its semaphore before attempting to connect; ensure # it releases the semaphore on connection failure. test_pool = Pool( - ('somedomainthatdoesntexist.org', 27017), - PoolOptions( - max_pool_size=1, - connect_timeout=1, - socket_timeout=1, - wait_queue_timeout=1)) + ("somedomainthatdoesntexist.org", 27017), + PoolOptions(max_pool_size=1, connect_timeout=1, socket_timeout=1, wait_queue_timeout=1), + ) + test_pool.ready() # First call to get_socket fails; if pool doesn't release its semaphore # then the second call raises "ConnectionFailure: Timed out waiting for # socket from pool" instead of AutoReconnect. - for i in range(2): + for _i in range(2): with self.assertRaises(AutoReconnect) as context: - with test_pool.get_socket({}, checkout=True): + with test_pool.checkout(): pass # Testing for AutoReconnect instead of ConnectionFailure, above, # is sufficient right *now* to catch a semaphore leak. But that # seems error-prone, so check the message too. - self.assertNotIn('waiting for socket from pool', - str(context.exception)) + self.assertNotIn("waiting for socket from pool", str(context.exception)) if __name__ == "__main__": diff --git a/test/test_pymongo.py b/test/test_pymongo.py index 780a4beb8b..d4203ed5cf 100644 --- a/test/test_pymongo.py +++ b/test/test_pymongo.py @@ -13,19 +13,21 @@ # limitations under the License. """Test the pymongo module itself.""" +from __future__ import annotations import sys + sys.path[0:0] = [""] -import pymongo from test import unittest +import pymongo + class TestPyMongo(unittest.TestCase): def test_mongo_client_alias(self): # Testing that pymongo module imports mongo_client.MongoClient - self.assertEqual(pymongo.MongoClient, - pymongo.mongo_client.MongoClient) + self.assertEqual(pymongo.MongoClient, pymongo.mongo_client.MongoClient) if __name__ == "__main__": diff --git a/test/test_raw_bson.py b/test/test_raw_bson.py index d0a394e1c1..38b4dd197a 100644 --- a/test/test_raw_bson.py +++ b/test/test_raw_bson.py @@ -11,46 +11,46 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import datetime +import sys import uuid +sys.path[0:0] = [""] + +from test import client_context, unittest +from test.test_client import IntegrationTest + from bson import decode, encode -from bson.binary import Binary, JAVA_LEGACY +from bson.binary import JAVA_LEGACY, Binary, UuidRepresentation from bson.codec_options import CodecOptions from bson.errors import InvalidBSON -from bson.raw_bson import RawBSONDocument, DEFAULT_RAW_BSON_OPTIONS +from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument from bson.son import SON -from test import client_context, unittest -from test.test_client import IntegrationTest class TestRawBSONDocument(IntegrationTest): - # {u'_id': ObjectId('556df68b6e32ab21a95e0785'), - # u'name': u'Sherlock', - # u'addresses': [{u'street': u'Baker Street'}]} + # {'_id': ObjectId('556df68b6e32ab21a95e0785'), + # 'name': 'Sherlock', + # 'addresses': [{'street': 'Baker Street'}]} bson_string = ( - b'Z\x00\x00\x00\x07_id\x00Um\xf6\x8bn2\xab!\xa9^\x07\x85\x02name\x00\t' - b'\x00\x00\x00Sherlock\x00\x04addresses\x00&\x00\x00\x00\x030\x00\x1e' - b'\x00\x00\x00\x02street\x00\r\x00\x00\x00Baker Street\x00\x00\x00\x00' + b"Z\x00\x00\x00\x07_id\x00Um\xf6\x8bn2\xab!\xa9^\x07\x85\x02name\x00\t" + b"\x00\x00\x00Sherlock\x00\x04addresses\x00&\x00\x00\x00\x030\x00\x1e" + b"\x00\x00\x00\x02street\x00\r\x00\x00\x00Baker Street\x00\x00\x00\x00" ) document = RawBSONDocument(bson_string) - @classmethod - def setUpClass(cls): - super(TestRawBSONDocument, cls).setUpClass() - cls.client = client_context.client - def tearDown(self): if client_context.connected: self.client.pymongo_test.test_raw.drop() def test_decode(self): - self.assertEqual('Sherlock', self.document['name']) - first_address = self.document['addresses'][0] + self.assertEqual("Sherlock", self.document["name"]) + first_address = self.document["addresses"][0] self.assertIsInstance(first_address, RawBSONDocument) - self.assertEqual('Baker Street', first_address['street']) + self.assertEqual("Baker Street", first_address["street"]) def test_raw(self): self.assertEqual(self.bson_string, self.document.raw) @@ -58,127 +58,148 @@ def test_raw(self): def test_empty_doc(self): doc = RawBSONDocument(encode({})) with self.assertRaises(KeyError): - doc['does-not-exist'] + doc["does-not-exist"] def test_invalid_bson_sequence(self): - bson_byte_sequence = encode({'a': 1})+encode({}) - with self.assertRaisesRegex(InvalidBSON, 'invalid object length'): + bson_byte_sequence = encode({"a": 1}) + encode({}) + with self.assertRaisesRegex(InvalidBSON, "invalid object length"): RawBSONDocument(bson_byte_sequence) def test_invalid_bson_eoo(self): - invalid_bson_eoo = encode({'a': 1})[:-1] + b'\x01' - with self.assertRaisesRegex(InvalidBSON, 'bad eoo'): + invalid_bson_eoo = encode({"a": 1})[:-1] + b"\x01" + with self.assertRaisesRegex(InvalidBSON, "bad eoo"): RawBSONDocument(invalid_bson_eoo) @client_context.require_connection def test_round_trip(self): db = self.client.get_database( - 'pymongo_test', - codec_options=CodecOptions(document_class=RawBSONDocument)) + "pymongo_test", codec_options=CodecOptions(document_class=RawBSONDocument) + ) db.test_raw.insert_one(self.document) - result = db.test_raw.find_one(self.document['_id']) + result = db.test_raw.find_one(self.document["_id"]) + assert result is not None self.assertIsInstance(result, RawBSONDocument) self.assertEqual(dict(self.document.items()), dict(result.items())) @client_context.require_connection def test_round_trip_raw_uuid(self): - coll = self.client.get_database('pymongo_test').test_raw + coll = self.client.get_database("pymongo_test").test_raw uid = uuid.uuid4() - doc = {'_id': 1, - 'bin4': Binary(uid.bytes, 4), - 'bin3': Binary(uid.bytes, 3)} + doc = {"_id": 1, "bin4": Binary(uid.bytes, 4), "bin3": Binary(uid.bytes, 3)} raw = RawBSONDocument(encode(doc)) coll.insert_one(raw) - self.assertEqual(coll.find_one(), {'_id': 1, 'bin4': uid, 'bin3': uid}) + self.assertEqual(coll.find_one(), doc) + uuid_coll = coll.with_options( + codec_options=coll.codec_options.with_options( + uuid_representation=UuidRepresentation.STANDARD + ) + ) + self.assertEqual( + uuid_coll.find_one(), {"_id": 1, "bin4": uid, "bin3": Binary(uid.bytes, 3)} + ) # Test that the raw bytes haven't changed. raw_coll = coll.with_options(codec_options=DEFAULT_RAW_BSON_OPTIONS) self.assertEqual(raw_coll.find_one(), raw) def test_with_codec_options(self): - # {u'date': datetime.datetime(2015, 6, 3, 18, 40, 50, 826000), - # u'_id': UUID('026fab8f-975f-4965-9fbf-85ad874c60ff')} + # {'date': datetime.datetime(2015, 6, 3, 18, 40, 50, 826000), + # '_id': UUID('026fab8f-975f-4965-9fbf-85ad874c60ff')} # encoded with JAVA_LEGACY uuid representation. bson_string = ( - b'-\x00\x00\x00\x05_id\x00\x10\x00\x00\x00\x03eI_\x97\x8f\xabo\x02' - b'\xff`L\x87\xad\x85\xbf\x9f\tdate\x00\x8a\xd6\xb9\xbaM' - b'\x01\x00\x00\x00' + b"-\x00\x00\x00\x05_id\x00\x10\x00\x00\x00\x03eI_\x97\x8f\xabo\x02" + b"\xff`L\x87\xad\x85\xbf\x9f\tdate\x00\x8a\xd6\xb9\xbaM" + b"\x01\x00\x00\x00" ) document = RawBSONDocument( bson_string, - codec_options=CodecOptions(uuid_representation=JAVA_LEGACY, - document_class=RawBSONDocument)) + codec_options=CodecOptions( + uuid_representation=JAVA_LEGACY, document_class=RawBSONDocument + ), + ) - self.assertEqual(uuid.UUID('026fab8f-975f-4965-9fbf-85ad874c60ff'), - document['_id']) + self.assertEqual(uuid.UUID("026fab8f-975f-4965-9fbf-85ad874c60ff"), document["_id"]) @client_context.require_connection def test_round_trip_codec_options(self): doc = { - 'date': datetime.datetime(2015, 6, 3, 18, 40, 50, 826000), - '_id': uuid.UUID('026fab8f-975f-4965-9fbf-85ad874c60ff') + "date": datetime.datetime(2015, 6, 3, 18, 40, 50, 826000), + "_id": uuid.UUID("026fab8f-975f-4965-9fbf-85ad874c60ff"), } db = self.client.pymongo_test coll = db.get_collection( - 'test_raw', - codec_options=CodecOptions(uuid_representation=JAVA_LEGACY)) + "test_raw", codec_options=CodecOptions(uuid_representation=JAVA_LEGACY) + ) coll.insert_one(doc) - raw_java_legacy = CodecOptions(uuid_representation=JAVA_LEGACY, - document_class=RawBSONDocument) - coll = db.get_collection('test_raw', codec_options=raw_java_legacy) + raw_java_legacy = CodecOptions( + uuid_representation=JAVA_LEGACY, document_class=RawBSONDocument + ) + coll = db.get_collection("test_raw", codec_options=raw_java_legacy) self.assertEqual( - RawBSONDocument(encode(doc, codec_options=raw_java_legacy)), - coll.find_one()) + RawBSONDocument(encode(doc, codec_options=raw_java_legacy)), coll.find_one() + ) @client_context.require_connection def test_raw_bson_document_embedded(self): - doc = {'embedded': self.document} + doc = {"embedded": self.document} db = self.client.pymongo_test db.test_raw.insert_one(doc) result = db.test_raw.find_one() - self.assertEqual(decode(self.document.raw), result['embedded']) + assert result is not None + self.assertEqual(decode(self.document.raw), result["embedded"]) # Make sure that CodecOptions are preserved. # {'embedded': [ - # {u'date': datetime.datetime(2015, 6, 3, 18, 40, 50, 826000), - # u'_id': UUID('026fab8f-975f-4965-9fbf-85ad874c60ff')} + # {'date': datetime.datetime(2015, 6, 3, 18, 40, 50, 826000), + # '_id': UUID('026fab8f-975f-4965-9fbf-85ad874c60ff')} # ]} # encoded with JAVA_LEGACY uuid representation. bson_string = ( - b'D\x00\x00\x00\x04embedded\x005\x00\x00\x00\x030\x00-\x00\x00\x00' - b'\tdate\x00\x8a\xd6\xb9\xbaM\x01\x00\x00\x05_id\x00\x10\x00\x00' - b'\x00\x03eI_\x97\x8f\xabo\x02\xff`L\x87\xad\x85\xbf\x9f\x00\x00' - b'\x00' + b"D\x00\x00\x00\x04embedded\x005\x00\x00\x00\x030\x00-\x00\x00\x00" + b"\tdate\x00\x8a\xd6\xb9\xbaM\x01\x00\x00\x05_id\x00\x10\x00\x00" + b"\x00\x03eI_\x97\x8f\xabo\x02\xff`L\x87\xad\x85\xbf\x9f\x00\x00" + b"\x00" ) rbd = RawBSONDocument( bson_string, - codec_options=CodecOptions(uuid_representation=JAVA_LEGACY, - document_class=RawBSONDocument)) + codec_options=CodecOptions( + uuid_representation=JAVA_LEGACY, document_class=RawBSONDocument + ), + ) db.test_raw.drop() db.test_raw.insert_one(rbd) - result = db.get_collection('test_raw', codec_options=CodecOptions( - uuid_representation=JAVA_LEGACY)).find_one() - self.assertEqual(rbd['embedded'][0]['_id'], - result['embedded'][0]['_id']) + result = db.get_collection( + "test_raw", codec_options=CodecOptions(uuid_representation=JAVA_LEGACY) + ).find_one() + assert result is not None + self.assertEqual(rbd["embedded"][0]["_id"], result["embedded"][0]["_id"]) @client_context.require_connection def test_write_response_raw_bson(self): coll = self.client.get_database( - 'pymongo_test', - codec_options=CodecOptions(document_class=RawBSONDocument)).test_raw + "pymongo_test", codec_options=CodecOptions(document_class=RawBSONDocument) + ).test_raw # No Exceptions raised while handling write response. coll.insert_one(self.document) coll.delete_one(self.document) coll.insert_many([self.document]) coll.delete_many(self.document) - coll.update_one(self.document, {'$set': {'a': 'b'}}, upsert=True) - coll.update_many(self.document, {'$set': {'b': 'c'}}) + coll.update_one(self.document, {"$set": {"a": "b"}}, upsert=True) + coll.update_many(self.document, {"$set": {"b": "c"}}) def test_preserve_key_ordering(self): - keyvaluepairs = [('a', 1), ('b', 2), ('c', 3),] + keyvaluepairs = [ + ("a", 1), + ("b", 2), + ("c", 3), + ] rawdoc = RawBSONDocument(encode(SON(keyvaluepairs))) for rkey, elt in zip(rawdoc, keyvaluepairs): self.assertEqual(rkey, elt[0]) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_read_concern.py b/test/test_read_concern.py index abd69309a9..97855872cf 100644 --- a/test/test_read_concern.py +++ b/test/test_read_concern.py @@ -13,136 +13,114 @@ # limitations under the License. """Test the read_concern module.""" +from __future__ import annotations + +import sys +import unittest + +sys.path[0:0] = [""] + +from test import IntegrationTest, client_context +from test.utils import OvertCommandListener, rs_or_single_client from bson.son import SON -from pymongo.errors import ConfigurationError, OperationFailure +from pymongo.errors import OperationFailure from pymongo.read_concern import ReadConcern -from test import client_context, PyMongoTestCase -from test.utils import single_client, rs_or_single_client, OvertCommandListener - -class TestReadConcern(PyMongoTestCase): +class TestReadConcern(IntegrationTest): + listener: OvertCommandListener @classmethod @client_context.require_connection def setUpClass(cls): + super().setUpClass() cls.listener = OvertCommandListener() - cls.client = single_client(event_listeners=[cls.listener]) + cls.client = rs_or_single_client(event_listeners=[cls.listener]) cls.db = cls.client.pymongo_test - client_context.client.pymongo_test.create_collection('coll') + client_context.client.pymongo_test.create_collection("coll") @classmethod def tearDownClass(cls): - client_context.client.pymongo_test.drop_collection('coll') + cls.client.close() + client_context.client.pymongo_test.drop_collection("coll") + super().tearDownClass() def tearDown(self): - self.listener.results.clear() + self.listener.reset() + super().tearDown() def test_read_concern(self): rc = ReadConcern() self.assertIsNone(rc.level) self.assertTrue(rc.ok_for_legacy) - rc = ReadConcern('majority') - self.assertEqual('majority', rc.level) + rc = ReadConcern("majority") + self.assertEqual("majority", rc.level) self.assertFalse(rc.ok_for_legacy) - rc = ReadConcern('local') - self.assertEqual('local', rc.level) + rc = ReadConcern("local") + self.assertEqual("local", rc.level) self.assertTrue(rc.ok_for_legacy) self.assertRaises(TypeError, ReadConcern, 42) def test_read_concern_uri(self): - uri = 'mongodb://%s/?readConcernLevel=majority' % ( - client_context.pair,) + uri = f"mongodb://{client_context.pair}/?readConcernLevel=majority" client = rs_or_single_client(uri, connect=False) - self.assertEqual(ReadConcern('majority'), client.read_concern) + self.assertEqual(ReadConcern("majority"), client.read_concern) - @client_context.require_version_max(3, 1) def test_invalid_read_concern(self): - coll = self.db.get_collection( - 'coll', read_concern=ReadConcern('majority')) - self.assertRaisesRegexp( - ConfigurationError, - 'read concern level of majority is not valid ' - 'with a max wire version of [0-3]', - coll.count) - - @client_context.require_version_min(3, 1, 9, -1) + coll = self.db.get_collection("coll", read_concern=ReadConcern("unknown")) + # We rely on the server to validate read concern. + with self.assertRaises(OperationFailure): + coll.find_one() + def test_find_command(self): # readConcern not sent in command if not specified. coll = self.db.coll - tuple(coll.find({'field': 'value'})) - self.assertNotIn('readConcern', - self.listener.results['started'][0].command) + tuple(coll.find({"field": "value"})) + self.assertNotIn("readConcern", self.listener.started_events[0].command) - self.listener.results.clear() + self.listener.reset() # Explicitly set readConcern to 'local'. - coll = self.db.get_collection('coll', read_concern=ReadConcern('local')) - tuple(coll.find({'field': 'value'})) + coll = self.db.get_collection("coll", read_concern=ReadConcern("local")) + tuple(coll.find({"field": "value"})) self.assertEqualCommand( - SON([('find', 'coll'), - ('filter', {'field': 'value'}), - ('readConcern', {'level': 'local'})]), - self.listener.results['started'][0].command) + SON( + [ + ("find", "coll"), + ("filter", {"field": "value"}), + ("readConcern", {"level": "local"}), + ] + ), + self.listener.started_events[0].command, + ) - @client_context.require_version_min(3, 1, 9, -1) def test_command_cursor(self): # readConcern not sent in command if not specified. coll = self.db.coll - tuple(coll.aggregate([{'$match': {'field': 'value'}}])) - self.assertNotIn('readConcern', - self.listener.results['started'][0].command) + tuple(coll.aggregate([{"$match": {"field": "value"}}])) + self.assertNotIn("readConcern", self.listener.started_events[0].command) - self.listener.results.clear() + self.listener.reset() # Explicitly set readConcern to 'local'. - coll = self.db.get_collection('coll', read_concern=ReadConcern('local')) - tuple(coll.aggregate([{'$match': {'field': 'value'}}])) - self.assertEqual( - {'level': 'local'}, - self.listener.results['started'][0].command['readConcern']) + coll = self.db.get_collection("coll", read_concern=ReadConcern("local")) + tuple(coll.aggregate([{"$match": {"field": "value"}}])) + self.assertEqual({"level": "local"}, self.listener.started_events[0].command["readConcern"]) def test_aggregate_out(self): - coll = self.db.get_collection('coll', read_concern=ReadConcern('local')) - tuple(coll.aggregate([{'$match': {'field': 'value'}}, - {'$out': 'output_collection'}])) + coll = self.db.get_collection("coll", read_concern=ReadConcern("local")) + tuple(coll.aggregate([{"$match": {"field": "value"}}, {"$out": "output_collection"}])) # Aggregate with $out supports readConcern MongoDB 4.2 onwards. if client_context.version >= (4, 1): - self.assertIn('readConcern', - self.listener.results['started'][0].command) + self.assertIn("readConcern", self.listener.started_events[0].command) else: - self.assertNotIn('readConcern', - self.listener.results['started'][0].command) - - def test_map_reduce_out(self): - coll = self.db.get_collection('coll', read_concern=ReadConcern('local')) - coll.map_reduce('function() { emit(this._id, this.value); }', - 'function(key, values) { return 42; }', - out='output_collection') - self.assertNotIn('readConcern', - self.listener.results['started'][0].command) - - if client_context.version.at_least(3, 1, 9, -1): - self.listener.results.clear() - coll.map_reduce( - 'function() { emit(this._id, this.value); }', - 'function(key, values) { return 42; }', - out={'inline': 1}) - self.assertEqual( - {'level': 'local'}, - self.listener.results['started'][0].command['readConcern']) - - @client_context.require_version_min(3, 1, 9, -1) - def test_inline_map_reduce(self): - coll = self.db.get_collection('coll', read_concern=ReadConcern('local')) - tuple(coll.inline_map_reduce( - 'function() { emit(this._id, this.value); }', - 'function(key, values) { return 42; }')) - self.assertEqual( - {'level': 'local'}, - self.listener.results['started'][0].command['readConcern']) + self.assertNotIn("readConcern", self.listener.started_events[0].command) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_read_preferences.py b/test/test_read_preferences.py index 3a7bd69c89..986785faf6 100644 --- a/test/test_read_preferences.py +++ b/test/test_read_preferences.py @@ -13,65 +13,66 @@ # limitations under the License. """Test the replica_set_connection module.""" +from __future__ import annotations import contextlib import copy import pickle import random import sys -import warnings sys.path[0:0] = [""] -from bson.py3compat import MAXSIZE +from test import IntegrationTest, SkipTest, client_context, unittest +from test.utils import ( + OvertCommandListener, + connected, + one, + rs_client, + single_client, + wait_until, +) +from test.version import Version + from bson.son import SON from pymongo.errors import ConfigurationError, OperationFailure from pymongo.message import _maybe_add_read_preference from pymongo.mongo_client import MongoClient -from pymongo.read_preferences import (ReadPreference, MovingAverage, - Primary, PrimaryPreferred, - Secondary, SecondaryPreferred, - Nearest) +from pymongo.read_preferences import ( + MovingAverage, + Nearest, + Primary, + PrimaryPreferred, + ReadPreference, + Secondary, + SecondaryPreferred, +) from pymongo.server_description import ServerDescription -from pymongo.server_selectors import readable_server_selector, Selection +from pymongo.server_selectors import Selection, readable_server_selector from pymongo.server_type import SERVER_TYPE from pymongo.write_concern import WriteConcern -from test import (SkipTest, - client_context, - unittest, - db_user, - db_pwd) -from test.test_replica_set_client import TestReplicaSetClientBase -from test.utils import (connected, - ignore_deprecations, - one, - rs_client, - single_client, - wait_until) -from test.version import Version - - -class TestSelections(unittest.TestCase): +class TestSelections(IntegrationTest): @client_context.require_connection def test_bool(self): client = single_client() wait_until(lambda: client.address, "discover primary") - selection = Selection.from_topology_description( - client._topology.description) + selection = Selection.from_topology_description(client._topology.description) self.assertTrue(selection) self.assertFalse(selection.with_server_descriptions([])) class TestReadPreferenceObjects(unittest.TestCase): - prefs = [Primary(), - PrimaryPreferred(), - Secondary(), - Nearest(tag_sets=[{'a': 1}, {'b': 2}]), - SecondaryPreferred(max_staleness=30)] + prefs = [ + Primary(), + PrimaryPreferred(), + Secondary(), + Nearest(tag_sets=[{"a": 1}, {"b": 2}]), + SecondaryPreferred(max_staleness=30), + ] def test_pickle(self): for pref in self.prefs: @@ -86,59 +87,53 @@ def test_deepcopy(self): self.assertEqual(pref, copy.deepcopy(pref)) -class TestReadPreferencesBase(TestReplicaSetClientBase): - +class TestReadPreferencesBase(IntegrationTest): @classmethod @client_context.require_secondaries_count(1) def setUpClass(cls): - super(TestReadPreferencesBase, cls).setUpClass() + super().setUpClass() def setUp(self): - super(TestReadPreferencesBase, self).setUp() + super().setUp() # Insert some data so we can use cursors in read_from_which_host self.client.pymongo_test.test.drop() self.client.get_database( - "pymongo_test", - write_concern=WriteConcern(w=self.w)).test.insert_many( - [{'_id': i} for i in range(10)]) + "pymongo_test", write_concern=WriteConcern(w=client_context.w) + ).test.insert_many([{"_id": i} for i in range(10)]) self.addCleanup(self.client.pymongo_test.test.drop) def read_from_which_host(self, client): - """Do a find() on the client and return which host was used - """ + """Do a find() on the client and return which host was used""" cursor = client.pymongo_test.test.find() next(cursor) return cursor.address def read_from_which_kind(self, client): """Do a find() on the client and return 'primary' or 'secondary' - depending on which the client used. + depending on which the client used. """ address = self.read_from_which_host(client) if address == client.primary: - return 'primary' + return "primary" elif address in client.secondaries: - return 'secondary' + return "secondary" else: self.fail( - 'Cursor used address %s, expected either primary ' - '%s or secondaries %s' % ( - address, client.primary, client.secondaries)) + f"Cursor used address {address}, expected either primary " + f"{client.primary} or secondaries {client.secondaries}" + ) + return None def assertReadsFrom(self, expected, **kwargs): c = rs_client(**kwargs) - wait_until( - lambda: len(c.nodes - c.arbiters) == self.w, - "discovered all nodes") + wait_until(lambda: len(c.nodes - c.arbiters) == client_context.w, "discovered all nodes") used = self.read_from_which_kind(c) - self.assertEqual(expected, used, 'Cursor used %s, expected %s' % ( - used, expected)) - + self.assertEqual(expected, used, f"Cursor used {used}, expected {expected}") -class TestSingleSlaveOk(TestReadPreferencesBase): +class TestSingleSecondaryOk(TestReadPreferencesBase): def test_reads_from_secondary(self): host, port = next(iter(self.client.secondaries)) @@ -159,7 +154,6 @@ def test_reads_from_secondary(self): self.assertEqual(10, len(list(coll.find()))) # Test some database helpers. - self.assertIsNotNone(db.collection_names()) self.assertIsNotNone(db.list_collection_names()) self.assertIsNotNone(db.validate_collection("test")) self.assertIsNotNone(db.command("ping")) @@ -170,85 +164,66 @@ def test_reads_from_secondary(self): self.assertIsNotNone(coll.aggregate([])) self.assertIsNotNone(coll.index_information()) - # Test some "magic" namespace helpers. - self.assertIsNotNone(db.current_op()) - class TestReadPreferences(TestReadPreferencesBase): - def test_mode_validation(self): - for mode in (ReadPreference.PRIMARY, - ReadPreference.PRIMARY_PREFERRED, - ReadPreference.SECONDARY, - ReadPreference.SECONDARY_PREFERRED, - ReadPreference.NEAREST): - self.assertEqual( - mode, - rs_client(read_preference=mode).read_preference) - - self.assertRaises( - TypeError, - rs_client, read_preference='foo') + for mode in ( + ReadPreference.PRIMARY, + ReadPreference.PRIMARY_PREFERRED, + ReadPreference.SECONDARY, + ReadPreference.SECONDARY_PREFERRED, + ReadPreference.NEAREST, + ): + self.assertEqual(mode, rs_client(read_preference=mode).read_preference) + + self.assertRaises(TypeError, rs_client, read_preference="foo") def test_tag_sets_validation(self): S = Secondary(tag_sets=[{}]) - self.assertEqual( - [{}], - rs_client(read_preference=S).read_preference.tag_sets) + self.assertEqual([{}], rs_client(read_preference=S).read_preference.tag_sets) - S = Secondary(tag_sets=[{'k': 'v'}]) - self.assertEqual( - [{'k': 'v'}], - rs_client(read_preference=S).read_preference.tag_sets) + S = Secondary(tag_sets=[{"k": "v"}]) + self.assertEqual([{"k": "v"}], rs_client(read_preference=S).read_preference.tag_sets) - S = Secondary(tag_sets=[{'k': 'v'}, {}]) - self.assertEqual( - [{'k': 'v'}, {}], - rs_client(read_preference=S).read_preference.tag_sets) + S = Secondary(tag_sets=[{"k": "v"}, {}]) + self.assertEqual([{"k": "v"}, {}], rs_client(read_preference=S).read_preference.tag_sets) self.assertRaises(ValueError, Secondary, tag_sets=[]) # One dict not ok, must be a list of dicts - self.assertRaises(TypeError, Secondary, tag_sets={'k': 'v'}) + self.assertRaises(TypeError, Secondary, tag_sets={"k": "v"}) - self.assertRaises(TypeError, Secondary, tag_sets='foo') + self.assertRaises(TypeError, Secondary, tag_sets="foo") - self.assertRaises(TypeError, Secondary, tag_sets=['foo']) + self.assertRaises(TypeError, Secondary, tag_sets=["foo"]) def test_threshold_validation(self): - self.assertEqual(17, rs_client( - localThresholdMS=17 - ).local_threshold_ms) + self.assertEqual( + 17, rs_client(localThresholdMS=17, connect=False).options.local_threshold_ms + ) - self.assertEqual(42, rs_client( - localThresholdMS=42 - ).local_threshold_ms) + self.assertEqual( + 42, rs_client(localThresholdMS=42, connect=False).options.local_threshold_ms + ) - self.assertEqual(666, rs_client( - localthresholdms=666 - ).local_threshold_ms) + self.assertEqual( + 666, rs_client(localThresholdMS=666, connect=False).options.local_threshold_ms + ) - self.assertEqual(0, rs_client( - localthresholdms=0 - ).local_threshold_ms) + self.assertEqual(0, rs_client(localThresholdMS=0, connect=False).options.local_threshold_ms) - self.assertRaises(ValueError, - rs_client, - localthresholdms=-1) + self.assertRaises(ValueError, rs_client, localthresholdms=-1) def test_zero_latency(self): - ping_times = set() + ping_times: set = set() # Generate unique ping times. while len(ping_times) < len(self.client.nodes): ping_times.add(random.random()) for ping_time, host in zip(ping_times, self.client.nodes): ServerDescription._host_to_round_trip_time[host] = ping_time try: - client = connected( - rs_client(readPreference='nearest', localThresholdMS=0)) - wait_until( - lambda: client.nodes == self.client.nodes, - "discovered all nodes") + client = connected(rs_client(readPreference="nearest", localThresholdMS=0)) + wait_until(lambda: client.nodes == self.client.nodes, "discovered all nodes") host = self.read_from_which_host(client) for _ in range(5): self.assertEqual(host, self.read_from_which_host(client)) @@ -256,41 +231,33 @@ def test_zero_latency(self): ServerDescription._host_to_round_trip_time.clear() def test_primary(self): - self.assertReadsFrom( - 'primary', read_preference=ReadPreference.PRIMARY) + self.assertReadsFrom("primary", read_preference=ReadPreference.PRIMARY) def test_primary_with_tags(self): # Tags not allowed with PRIMARY - self.assertRaises( - ConfigurationError, - rs_client, tag_sets=[{'dc': 'ny'}]) + self.assertRaises(ConfigurationError, rs_client, tag_sets=[{"dc": "ny"}]) def test_primary_preferred(self): - self.assertReadsFrom( - 'primary', read_preference=ReadPreference.PRIMARY_PREFERRED) + self.assertReadsFrom("primary", read_preference=ReadPreference.PRIMARY_PREFERRED) def test_secondary(self): - self.assertReadsFrom( - 'secondary', read_preference=ReadPreference.SECONDARY) + self.assertReadsFrom("secondary", read_preference=ReadPreference.SECONDARY) def test_secondary_preferred(self): - self.assertReadsFrom( - 'secondary', read_preference=ReadPreference.SECONDARY_PREFERRED) + self.assertReadsFrom("secondary", read_preference=ReadPreference.SECONDARY_PREFERRED) def test_nearest(self): # With high localThresholdMS, expect to read from any # member - c = rs_client( - read_preference=ReadPreference.NEAREST, - localThresholdMS=10000) # 10 seconds + c = rs_client(read_preference=ReadPreference.NEAREST, localThresholdMS=10000) # 10 seconds - data_members = set(self.hosts).difference(set(self.arbiters)) + data_members = {self.client.primary} | self.client.secondaries # This is a probabilistic test; track which members we've read from so # far, and keep reading until we've used all the members or give up. # Chance of using only 2 of 3 members 10k times if there's no bug = # 3 * (2/3)**10000, very low. - used = set() + used: set = set() i = 0 while data_members.difference(used) and i < 10000: address = self.read_from_which_host(c) @@ -298,77 +265,77 @@ def test_nearest(self): i += 1 not_used = data_members.difference(used) - latencies = ', '.join( - '%s: %dms' % (server.description.address, - server.description.round_trip_time) - for server in c._get_topology().select_servers( - readable_server_selector)) + latencies = ", ".join( + "%s: %sms" % (server.description.address, server.description.round_trip_time) + for server in c._get_topology().select_servers(readable_server_selector) + ) self.assertFalse( not_used, "Expected to use primary and all secondaries for mode NEAREST," - " but didn't use %s\nlatencies: %s" % (not_used, latencies)) + f" but didn't use {not_used}\nlatencies: {latencies}", + ) class ReadPrefTester(MongoClient): def __init__(self, *args, **kwargs): self.has_read_from = set() - client_options = client_context.default_client_options.copy() + client_options = client_context.client_options client_options.update(kwargs) - super(ReadPrefTester, self).__init__(*args, **client_options) + super().__init__(*args, **client_options) @contextlib.contextmanager - def _socket_for_reads(self, read_preference, session): - context = super(ReadPrefTester, self)._socket_for_reads( - read_preference, session) - with context as (sock_info, slave_ok): - self.record_a_read(sock_info.address) - yield sock_info, slave_ok + def _conn_for_reads(self, read_preference, session): + context = super()._conn_for_reads(read_preference, session) + with context as (conn, read_preference): + self.record_a_read(conn.address) + yield conn, read_preference @contextlib.contextmanager - def _slaveok_for_server(self, read_preference, server, session, - exhaust=False): - context = super(ReadPrefTester, self)._slaveok_for_server( - read_preference, server, session, exhaust=exhaust) - with context as (sock_info, slave_ok): - self.record_a_read(sock_info.address) - yield sock_info, slave_ok + def _conn_from_server(self, read_preference, server, session): + context = super()._conn_from_server(read_preference, server, session) + with context as (conn, read_preference): + self.record_a_read(conn.address) + yield conn, read_preference def record_a_read(self, address): server = self._get_topology().select_server_by_address(address, 0) self.has_read_from.add(server) + _PREF_MAP = [ (Primary, SERVER_TYPE.RSPrimary), (PrimaryPreferred, SERVER_TYPE.RSPrimary), (Secondary, SERVER_TYPE.RSSecondary), (SecondaryPreferred, SERVER_TYPE.RSSecondary), - (Nearest, 'any') + (Nearest, "any"), ] -class TestCommandAndReadPreference(TestReplicaSetClientBase): +class TestCommandAndReadPreference(IntegrationTest): + c: ReadPrefTester + client_version: Version @classmethod @client_context.require_secondaries_count(1) def setUpClass(cls): - super(TestCommandAndReadPreference, cls).setUpClass() + super().setUpClass() cls.c = ReadPrefTester( client_context.pair, - replicaSet=cls.name, # Ignore round trip times, to test ReadPreference modes only. - localThresholdMS=1000*1000) - if client_context.auth_enabled: - cls.c.admin.authenticate(db_user, db_pwd) + localThresholdMS=1000 * 1000, + ) cls.client_version = Version.from_client(cls.c) - # mapReduce and group fail with no collection + # mapReduce fails if the collection does not exist. coll = cls.c.pymongo_test.get_collection( - 'test', write_concern=WriteConcern(w=cls.w)) + "test", write_concern=WriteConcern(w=client_context.w) + ) coll.insert_one({}) @classmethod def tearDownClass(cls): - cls.c.drop_database('pymongo_test') + cls.c.drop_database("pymongo_test") + cls.c.close() def executed_on_which_server(self, client, fn, *args, **kwargs): """Execute fn(*args, **kwargs) and return the Server instance used.""" @@ -379,12 +346,13 @@ def executed_on_which_server(self, client, fn, *args, **kwargs): def assertExecutedOn(self, server_type, client, fn, *args, **kwargs): server = self.executed_on_which_server(client, fn, *args, **kwargs) - self.assertEqual(SERVER_TYPE._fields[server_type], - SERVER_TYPE._fields[server.description.server_type]) + self.assertEqual( + SERVER_TYPE._fields[server_type], SERVER_TYPE._fields[server.description.server_type] + ) def _test_fn(self, server_type, fn): for _ in range(10): - if server_type == 'any': + if server_type == "any": used = set() for _ in range(1000): server = self.executed_on_which_server(self.c, fn) @@ -393,13 +361,10 @@ def _test_fn(self, server_type, fn): # Success break - unused = self.c.secondaries.union( - set([self.c.primary]) - ).difference(used) + assert self.c.primary is not None + unused = self.c.secondaries.union({self.c.primary}).difference(used) if unused: - self.fail( - "Some members not used for NEAREST: %s" % ( - unused)) + self.fail("Some members not used for NEAREST: %s" % (unused)) else: self.assertExecutedOn(server_type, self.c, fn) @@ -410,7 +375,10 @@ def _test_primary_helper(self, func): def _test_coll_helper(self, secondary_ok, coll, meth, *args, **kwargs): for mode, server_type in _PREF_MAP: new_coll = coll.with_options(read_preference=mode()) - func = lambda: getattr(new_coll, meth)(*args, **kwargs) + + def func(): + return getattr(new_coll, meth)(*args, **kwargs) + if secondary_ok: self._test_fn(server_type, func) else: @@ -420,8 +388,10 @@ def test_command(self): # Test that the generic command helper obeys the read preference # passed to it. for mode, server_type in _PREF_MAP: - func = lambda: self.c.pymongo_test.command('dbStats', - read_preference=mode()) + + def func(): + return self.c.pymongo_test.command("dbStats", read_preference=mode()) + self._test_fn(server_type, func) def test_create_collection(self): @@ -429,49 +399,33 @@ def test_create_collection(self): # the collection already exists. self._test_primary_helper( lambda: self.c.pymongo_test.create_collection( - 'some_collection%s' % random.randint(0, MAXSIZE))) - - @client_context.require_version_max(4, 1, 0, -1) - def test_group(self): - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - self._test_coll_helper(True, self.c.pymongo_test.test, 'group', - {'a': 1}, {}, {}, 'function() { }') - - def test_map_reduce(self): - self._test_coll_helper(False, self.c.pymongo_test.test, 'map_reduce', - 'function() { }', 'function() { }', - {'inline': 1}) - - def test_inline_map_reduce(self): - self._test_coll_helper(True, self.c.pymongo_test.test, - 'inline_map_reduce', - 'function() { }', 'function() { }') - - @ignore_deprecations - def test_count(self): - self._test_coll_helper(True, self.c.pymongo_test.test, 'count') + "some_collection%s" % random.randint(0, sys.maxsize) + ) + ) def test_count_documents(self): - self._test_coll_helper( - True, self.c.pymongo_test.test, 'count_documents', {}) + self._test_coll_helper(True, self.c.pymongo_test.test, "count_documents", {}) def test_estimated_document_count(self): - self._test_coll_helper( - True, self.c.pymongo_test.test, 'estimated_document_count') + self._test_coll_helper(True, self.c.pymongo_test.test, "estimated_document_count") def test_distinct(self): - self._test_coll_helper(True, self.c.pymongo_test.test, 'distinct', 'a') + self._test_coll_helper(True, self.c.pymongo_test.test, "distinct", "a") def test_aggregate(self): - self._test_coll_helper(True, self.c.pymongo_test.test, - 'aggregate', - [{'$project': {'_id': 1}}]) + self._test_coll_helper( + True, self.c.pymongo_test.test, "aggregate", [{"$project": {"_id": 1}}] + ) def test_aggregate_write(self): - self._test_coll_helper(False, self.c.pymongo_test.test, - 'aggregate', - [{'$project': {'_id': 1}}, {'$out': "agg_write_test"}]) + # 5.0 servers support $out on secondaries. + secondary_ok = client_context.version.at_least(5, 0) + self._test_coll_helper( + secondary_ok, + self.c.pymongo_test.test, + "aggregate", + [{"$project": {"_id": 1}}, {"$out": "agg_write_test"}], + ) class TestMovingAverage(unittest.TestCase): @@ -479,87 +433,60 @@ def test_moving_average(self): avg = MovingAverage() self.assertIsNone(avg.get()) avg.add_sample(10) - self.assertAlmostEqual(10, avg.get()) + self.assertAlmostEqual(10, avg.get()) # type: ignore avg.add_sample(20) - self.assertAlmostEqual(12, avg.get()) + self.assertAlmostEqual(12, avg.get()) # type: ignore avg.add_sample(30) - self.assertAlmostEqual(15.6, avg.get()) + self.assertAlmostEqual(15.6, avg.get()) # type: ignore -class TestMongosAndReadPreference(unittest.TestCase): +class TestMongosAndReadPreference(IntegrationTest): def test_read_preference_document(self): pref = Primary() - self.assertEqual( - pref.document, - {'mode': 'primary'}) + self.assertEqual(pref.document, {"mode": "primary"}) pref = PrimaryPreferred() + self.assertEqual(pref.document, {"mode": "primaryPreferred"}) + pref = PrimaryPreferred(tag_sets=[{"dc": "sf"}]) + self.assertEqual(pref.document, {"mode": "primaryPreferred", "tags": [{"dc": "sf"}]}) + pref = PrimaryPreferred(tag_sets=[{"dc": "sf"}], max_staleness=30) self.assertEqual( pref.document, - {'mode': 'primaryPreferred'}) - pref = PrimaryPreferred(tag_sets=[{'dc': 'sf'}]) - self.assertEqual( - pref.document, - {'mode': 'primaryPreferred', 'tags': [{'dc': 'sf'}]}) - pref = PrimaryPreferred( - tag_sets=[{'dc': 'sf'}], max_staleness=30) - self.assertEqual( - pref.document, - {'mode': 'primaryPreferred', - 'tags': [{'dc': 'sf'}], - 'maxStalenessSeconds': 30}) + {"mode": "primaryPreferred", "tags": [{"dc": "sf"}], "maxStalenessSeconds": 30}, + ) pref = Secondary() + self.assertEqual(pref.document, {"mode": "secondary"}) + pref = Secondary(tag_sets=[{"dc": "sf"}]) + self.assertEqual(pref.document, {"mode": "secondary", "tags": [{"dc": "sf"}]}) + pref = Secondary(tag_sets=[{"dc": "sf"}], max_staleness=30) self.assertEqual( - pref.document, - {'mode': 'secondary'}) - pref = Secondary(tag_sets=[{'dc': 'sf'}]) - self.assertEqual( - pref.document, - {'mode': 'secondary', 'tags': [{'dc': 'sf'}]}) - pref = Secondary( - tag_sets=[{'dc': 'sf'}], max_staleness=30) - self.assertEqual( - pref.document, - {'mode': 'secondary', - 'tags': [{'dc': 'sf'}], - 'maxStalenessSeconds': 30}) + pref.document, {"mode": "secondary", "tags": [{"dc": "sf"}], "maxStalenessSeconds": 30} + ) pref = SecondaryPreferred() + self.assertEqual(pref.document, {"mode": "secondaryPreferred"}) + pref = SecondaryPreferred(tag_sets=[{"dc": "sf"}]) + self.assertEqual(pref.document, {"mode": "secondaryPreferred", "tags": [{"dc": "sf"}]}) + pref = SecondaryPreferred(tag_sets=[{"dc": "sf"}], max_staleness=30) self.assertEqual( pref.document, - {'mode': 'secondaryPreferred'}) - pref = SecondaryPreferred(tag_sets=[{'dc': 'sf'}]) - self.assertEqual( - pref.document, - {'mode': 'secondaryPreferred', 'tags': [{'dc': 'sf'}]}) - pref = SecondaryPreferred( - tag_sets=[{'dc': 'sf'}], max_staleness=30) - self.assertEqual( - pref.document, - {'mode': 'secondaryPreferred', - 'tags': [{'dc': 'sf'}], - 'maxStalenessSeconds': 30}) + {"mode": "secondaryPreferred", "tags": [{"dc": "sf"}], "maxStalenessSeconds": 30}, + ) pref = Nearest() + self.assertEqual(pref.document, {"mode": "nearest"}) + pref = Nearest(tag_sets=[{"dc": "sf"}]) + self.assertEqual(pref.document, {"mode": "nearest", "tags": [{"dc": "sf"}]}) + pref = Nearest(tag_sets=[{"dc": "sf"}], max_staleness=30) self.assertEqual( - pref.document, - {'mode': 'nearest'}) - pref = Nearest(tag_sets=[{'dc': 'sf'}]) - self.assertEqual( - pref.document, - {'mode': 'nearest', 'tags': [{'dc': 'sf'}]}) - pref = Nearest( - tag_sets=[{'dc': 'sf'}], max_staleness=30) - self.assertEqual( - pref.document, - {'mode': 'nearest', - 'tags': [{'dc': 'sf'}], - 'maxStalenessSeconds': 30}) + pref.document, {"mode": "nearest", "tags": [{"dc": "sf"}], "maxStalenessSeconds": 30} + ) with self.assertRaises(TypeError): - Nearest(max_staleness=1.5) # Float is prohibited. + # Float is prohibited. + Nearest(max_staleness=1.5) # type: ignore with self.assertRaises(ValueError): Nearest(max_staleness=0) @@ -567,6 +494,70 @@ def test_read_preference_document(self): with self.assertRaises(ValueError): Nearest(max_staleness=-2) + def test_read_preference_document_hedge(self): + cases = { + "primaryPreferred": PrimaryPreferred, + "secondary": Secondary, + "secondaryPreferred": SecondaryPreferred, + "nearest": Nearest, + } + for mode, cls in cases.items(): + with self.assertRaises(TypeError): + cls(hedge=[]) # type: ignore + + pref = cls(hedge={}) + self.assertEqual(pref.document, {"mode": mode}) + out = _maybe_add_read_preference({}, pref) + if cls == SecondaryPreferred: + # SecondaryPreferred without hedge doesn't add $readPreference. + self.assertEqual(out, {}) + else: + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + hedge = {"enabled": True} + pref = cls(hedge=hedge) + self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + hedge = {"enabled": False} + pref = cls(hedge=hedge) + self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + hedge = {"enabled": False, "extra": "option"} + pref = cls(hedge=hedge) + self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + def test_send_hedge(self): + cases = { + "primaryPreferred": PrimaryPreferred, + "secondaryPreferred": SecondaryPreferred, + "nearest": Nearest, + } + if client_context.supports_secondary_read_pref: + cases["secondary"] = Secondary + listener = OvertCommandListener() + client = rs_client(event_listeners=[listener]) + self.addCleanup(client.close) + client.admin.command("ping") + for _mode, cls in cases.items(): + pref = cls(hedge={"enabled": True}) + coll = client.test.get_collection("test", read_preference=pref) + listener.reset() + coll.find_one() + started = listener.started_events + self.assertEqual(len(started), 1, started) + cmd = started[0].command + if client_context.is_rs or client_context.is_mongos: + self.assertIn("$readPreference", cmd) + self.assertEqual(cmd["$readPreference"], pref.document) + else: + self.assertNotIn("$readPreference", cmd) + def test_maybe_add_read_preference(self): # Primary doesn't add $readPreference @@ -575,70 +566,74 @@ def test_maybe_add_read_preference(self): pref = PrimaryPreferred() out = _maybe_add_read_preference({}, pref) - self.assertEqual( - out, SON([("$query", {}), ("$readPreference", pref.document)])) - pref = PrimaryPreferred(tag_sets=[{'dc': 'nyc'}]) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + pref = PrimaryPreferred(tag_sets=[{"dc": "nyc"}]) out = _maybe_add_read_preference({}, pref) - self.assertEqual( - out, SON([("$query", {}), ("$readPreference", pref.document)])) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) pref = Secondary() out = _maybe_add_read_preference({}, pref) - self.assertEqual( - out, SON([("$query", {}), ("$readPreference", pref.document)])) - pref = Secondary(tag_sets=[{'dc': 'nyc'}]) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + pref = Secondary(tag_sets=[{"dc": "nyc"}]) out = _maybe_add_read_preference({}, pref) - self.assertEqual( - out, SON([("$query", {}), ("$readPreference", pref.document)])) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) # SecondaryPreferred without tag_sets or max_staleness doesn't add # $readPreference pref = SecondaryPreferred() out = _maybe_add_read_preference({}, pref) self.assertEqual(out, {}) - pref = SecondaryPreferred(tag_sets=[{'dc': 'nyc'}]) + pref = SecondaryPreferred(tag_sets=[{"dc": "nyc"}]) out = _maybe_add_read_preference({}, pref) - self.assertEqual( - out, SON([("$query", {}), ("$readPreference", pref.document)])) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) pref = SecondaryPreferred(max_staleness=120) out = _maybe_add_read_preference({}, pref) - self.assertEqual( - out, SON([("$query", {}), ("$readPreference", pref.document)])) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) pref = Nearest() out = _maybe_add_read_preference({}, pref) - self.assertEqual( - out, SON([("$query", {}), ("$readPreference", pref.document)])) - pref = Nearest(tag_sets=[{'dc': 'nyc'}]) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + pref = Nearest(tag_sets=[{"dc": "nyc"}]) out = _maybe_add_read_preference({}, pref) - self.assertEqual( - out, SON([("$query", {}), ("$readPreference", pref.document)])) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) criteria = SON([("$query", {}), ("$orderby", SON([("_id", 1)]))]) pref = Nearest() out = _maybe_add_read_preference(criteria, pref) self.assertEqual( out, - SON([("$query", {}), - ("$orderby", SON([("_id", 1)])), - ("$readPreference", pref.document)])) - pref = Nearest(tag_sets=[{'dc': 'nyc'}]) + SON( + [ + ("$query", {}), + ("$orderby", SON([("_id", 1)])), + ("$readPreference", pref.document), + ] + ), + ) + pref = Nearest(tag_sets=[{"dc": "nyc"}]) out = _maybe_add_read_preference(criteria, pref) self.assertEqual( out, - SON([("$query", {}), - ("$orderby", SON([("_id", 1)])), - ("$readPreference", pref.document)])) + SON( + [ + ("$query", {}), + ("$orderby", SON([("_id", 1)])), + ("$readPreference", pref.document), + ] + ), + ) @client_context.require_mongos def test_mongos(self): - shard = client_context.client.config.shards.find_one()['host'] - num_members = shard.count(',') + 1 + res = client_context.client.config.shards.find_one() + assert res is not None + shard = res["host"] + num_members = shard.count(",") + 1 if num_members == 1: raise SkipTest("Need a replica set shard to test.") coll = client_context.client.pymongo_test.get_collection( - "test", - write_concern=WriteConcern(w=num_members)) + "test", write_concern=WriteConcern(w=num_members) + ) coll.drop() res = coll.insert_many([{} for _ in range(5)]) first_id = res.inserted_ids[0] @@ -646,11 +641,7 @@ def test_mongos(self): # Note - this isn't a perfect test since there's no way to # tell what shard member a query ran on. - for pref in (Primary(), - PrimaryPreferred(), - Secondary(), - SecondaryPreferred(), - Nearest()): + for pref in (Primary(), PrimaryPreferred(), Secondary(), SecondaryPreferred(), Nearest()): qcoll = coll.with_options(read_preference=pref) results = list(qcoll.find().sort([("_id", 1)])) self.assertEqual(first_id, results[0]["_id"]) @@ -660,16 +651,17 @@ def test_mongos(self): self.assertEqual(last_id, results[0]["_id"]) @client_context.require_mongos - @client_context.require_version_min(3, 3, 12) def test_mongos_max_staleness(self): # Sanity check that we're sending maxStalenessSeconds coll = client_context.client.pymongo_test.get_collection( - "test", read_preference=SecondaryPreferred(max_staleness=120)) + "test", read_preference=SecondaryPreferred(max_staleness=120) + ) # No error coll.find_one() coll = client_context.client.pymongo_test.get_collection( - "test", read_preference=SecondaryPreferred(max_staleness=10)) + "test", read_preference=SecondaryPreferred(max_staleness=10) + ) try: coll.find_one() except OperationFailure as exc: @@ -678,14 +670,14 @@ def test_mongos_max_staleness(self): self.fail("mongos accepted invalid staleness") coll = single_client( - readPreference='secondaryPreferred', - maxStalenessSeconds=120).pymongo_test.test + readPreference="secondaryPreferred", maxStalenessSeconds=120 + ).pymongo_test.test # No error coll.find_one() coll = single_client( - readPreference='secondaryPreferred', - maxStalenessSeconds=10).pymongo_test.test + readPreference="secondaryPreferred", maxStalenessSeconds=10 + ).pymongo_test.test try: coll.find_one() except OperationFailure as exc: @@ -693,5 +685,6 @@ def test_mongos_max_staleness(self): else: self.fail("mongos accepted invalid staleness") + if __name__ == "__main__": unittest.main() diff --git a/test/test_read_write_concern_spec.py b/test/test_read_write_concern_spec.py index 568eb5c951..939f05faf2 100644 --- a/test/test_read_write_concern_spec.py +++ b/test/test_read_write_concern_spec.py @@ -13,6 +13,7 @@ # limitations under the License. """Run the read and write concern tests.""" +from __future__ import annotations import json import os @@ -21,34 +22,38 @@ sys.path[0:0] = [""] +from test import IntegrationTest, client_context, unittest +from test.utils import ( + EventListener, + SpecTestCreator, + disable_replication, + enable_replication, + rs_or_single_client, +) +from test.utils_spec_runner import SpecRunner + from pymongo import DESCENDING -from pymongo.errors import (BulkWriteError, - ConfigurationError, - WTimeoutError, - WriteConcernError) +from pymongo.errors import ( + BulkWriteError, + ConfigurationError, + WriteConcernError, + WriteError, + WTimeoutError, +) from pymongo.mongo_client import MongoClient from pymongo.operations import IndexModel, InsertOne from pymongo.read_concern import ReadConcern from pymongo.write_concern import WriteConcern -from test import client_context, unittest -from test.utils import (IMPOSSIBLE_WRITE_CONCERN, - EventListener, - disable_replication, - enable_replication, - rs_or_single_client) - - -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'read_write_concern') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "read_write_concern") -class TestReadWriteConcernSpec(unittest.TestCase): - @client_context.require_connection +class TestReadWriteConcernSpec(IntegrationTest): def test_omit_default_read_write_concern(self): listener = EventListener() # Client with default readConcern and writeConcern client = rs_or_single_client(event_listeners=[listener]) + self.addCleanup(client.close) collection = client.pymongo_test.collection # Prepare for tests of find() and aggregate(). collection.insert_many([{} for _ in range(10)]) @@ -59,83 +64,87 @@ def test_omit_default_read_write_concern(self): def rename_and_drop(): # Ensure collection exists. collection.insert_one({}) - collection.rename('collection2') + collection.rename("collection2") client.pymongo_test.collection2.drop() def insert_command_default_write_concern(): collection.database.command( - 'insert', 'collection', documents=[{}], - write_concern=WriteConcern()) + "insert", "collection", documents=[{}], write_concern=WriteConcern() + ) ops = [ - ('aggregate', lambda: list(collection.aggregate([]))), - ('find', lambda: list(collection.find())), - ('insert_one', lambda: collection.insert_one({})), - ('update_one', - lambda: collection.update_one({}, {'$set': {'x': 1}})), - ('update_many', - lambda: collection.update_many({}, {'$set': {'x': 1}})), - ('delete_one', lambda: collection.delete_one({})), - ('delete_many', lambda: collection.delete_many({})), - ('bulk_write', lambda: collection.bulk_write([InsertOne({})])), - ('rename_and_drop', rename_and_drop), - ('command', insert_command_default_write_concern) + ("aggregate", lambda: list(collection.aggregate([]))), + ("find", lambda: list(collection.find())), + ("insert_one", lambda: collection.insert_one({})), + ("update_one", lambda: collection.update_one({}, {"$set": {"x": 1}})), + ("update_many", lambda: collection.update_many({}, {"$set": {"x": 1}})), + ("delete_one", lambda: collection.delete_one({})), + ("delete_many", lambda: collection.delete_many({})), + ("bulk_write", lambda: collection.bulk_write([InsertOne({})])), + ("rename_and_drop", rename_and_drop), + ("command", insert_command_default_write_concern), ] for name, f in ops: - listener.results.clear() + listener.reset() f() - self.assertGreaterEqual(len(listener.results['started']), 1) - for i, event in enumerate(listener.results['started']): + self.assertGreaterEqual(len(listener.started_events), 1) + for _i, event in enumerate(listener.started_events): self.assertNotIn( - 'readConcern', event.command, - "%s sent default readConcern with %s" % ( - name, event.command_name)) + "readConcern", + event.command, + f"{name} sent default readConcern with {event.command_name}", + ) self.assertNotIn( - 'writeConcern', event.command, - "%s sent default writeConcern with %s" % ( - name, event.command_name)) + "writeConcern", + event.command, + f"{name} sent default writeConcern with {event.command_name}", + ) def assertWriteOpsRaise(self, write_concern, expected_exception): - client = rs_or_single_client(**write_concern.document) - db = client.get_database('pymongo_test') + wc = write_concern.document + # Set socket timeout to avoid indefinite stalls + client = rs_or_single_client(w=wc["w"], wTimeoutMS=wc["wtimeout"], socketTimeoutMS=30000) + db = client.get_database("pymongo_test") coll = db.test def insert_command(): coll.database.command( - 'insert', 'new_collection', documents=[{}], + "insert", + "new_collection", + documents=[{}], writeConcern=write_concern.document, - parse_write_concern_error=True) + parse_write_concern_error=True, + ) ops = [ - ('insert_one', lambda: coll.insert_one({})), - ('insert_many', lambda: coll.insert_many([{}, {}])), - ('update_one', lambda: coll.update_one({}, {'$set': {'x': 1}})), - ('update_many', lambda: coll.update_many({}, {'$set': {'x': 1}})), - ('delete_one', lambda: coll.delete_one({})), - ('delete_many', lambda: coll.delete_many({})), - ('bulk_write', lambda: coll.bulk_write([InsertOne({})])), - ('command', insert_command), + ("insert_one", lambda: coll.insert_one({})), + ("insert_many", lambda: coll.insert_many([{}, {}])), + ("update_one", lambda: coll.update_one({}, {"$set": {"x": 1}})), + ("update_many", lambda: coll.update_many({}, {"$set": {"x": 1}})), + ("delete_one", lambda: coll.delete_one({})), + ("delete_many", lambda: coll.delete_many({})), + ("bulk_write", lambda: coll.bulk_write([InsertOne({})])), + ("command", insert_command), + ("aggregate", lambda: coll.aggregate([{"$out": "out"}])), + # SERVER-46668 Delete all the documents in the collection to + # workaround a hang in createIndexes. + ("delete_many", lambda: coll.delete_many({})), + ("create_index", lambda: coll.create_index([("a", DESCENDING)])), + ("create_indexes", lambda: coll.create_indexes([IndexModel("b")])), + ("drop_index", lambda: coll.drop_index([("a", DESCENDING)])), + ("create", lambda: db.create_collection("new")), + ("rename", lambda: coll.rename("new")), + ("drop", lambda: db.new.drop()), ] - ops_require_34 = [ - ('aggregate', lambda: coll.aggregate([{'$out': 'out'}])), - ('create_index', lambda: coll.create_index([('a', DESCENDING)])), - ('create_indexes', lambda: coll.create_indexes([IndexModel('b')])), - ('drop_index', lambda: coll.drop_index([('a', DESCENDING)])), - ('create', lambda: db.create_collection('new')), - ('rename', lambda: coll.rename('new')), - ('drop', lambda: db.new.drop()), - ] - if client_context.version > (3, 4): - ops.extend(ops_require_34) - # SERVER-34776: Drop database does not respect wtimeout in 4.0. - if client_context.version <= (3, 6): - ops.append(('drop_database', lambda: client.drop_database(db))) + # SERVER-47194: dropDatabase does not respect wtimeout in 3.6. + if client_context.version[:2] != (3, 6): + ops.append(("drop_database", lambda: client.drop_database(db))) for name, f in ops: # Ensure insert_many and bulk_write still raise BulkWriteError. - if name in ('insert_many', 'bulk_write'): + if name in ("insert_many", "bulk_write"): expected = BulkWriteError else: expected = expected_exception @@ -143,102 +152,139 @@ def insert_command(): f() if expected == BulkWriteError: bulk_result = cm.exception.details - wc_errors = bulk_result['writeConcernErrors'] + assert bulk_result is not None + wc_errors = bulk_result["writeConcernErrors"] self.assertTrue(wc_errors) @client_context.require_replica_set def test_raise_write_concern_error(self): - self.addCleanup(client_context.client.drop_database, 'pymongo_test') + self.addCleanup(client_context.client.drop_database, "pymongo_test") + assert client_context.w is not None self.assertWriteOpsRaise( - WriteConcern(w=client_context.w+1, wtimeout=1), WriteConcernError) + WriteConcern(w=client_context.w + 1, wtimeout=1), WriteConcernError + ) - # MongoDB 3.2 introduced the stopReplProducer failpoint. - @client_context.require_version_min(3, 2) @client_context.require_secondaries_count(1) @client_context.require_test_commands def test_raise_wtimeout(self): - self.addCleanup(client_context.client.drop_database, 'pymongo_test') + self.addCleanup(client_context.client.drop_database, "pymongo_test") self.addCleanup(enable_replication, client_context.client) # Disable replication to guarantee a wtimeout error. disable_replication(client_context.client) - self.assertWriteOpsRaise(WriteConcern(w=client_context.w, wtimeout=1), - WTimeoutError) + self.assertWriteOpsRaise(WriteConcern(w=client_context.w, wtimeout=1), WTimeoutError) + + @client_context.require_failCommand_fail_point + def test_error_includes_errInfo(self): + expected_wce = { + "code": 100, + "codeName": "UnsatisfiableWriteConcern", + "errmsg": "Not enough data-bearing nodes", + "errInfo": {"writeConcern": {"w": 2, "wtimeout": 0, "provenance": "clientSupplied"}}, + } + cause_wce = { + "configureFailPoint": "failCommand", + "mode": {"times": 2}, + "data": {"failCommands": ["insert"], "writeConcernError": expected_wce}, + } + with self.fail_point(cause_wce): + # Write concern error on insert includes errInfo. + with self.assertRaises(WriteConcernError) as ctx: + self.db.test.insert_one({}) + self.assertEqual(ctx.exception.details, expected_wce) + + # Test bulk_write as well. + with self.assertRaises(BulkWriteError) as ctx: + self.db.test.bulk_write([InsertOne({})]) + expected_details = { + "writeErrors": [], + "writeConcernErrors": [expected_wce], + "nInserted": 1, + "nUpserted": 0, + "nMatched": 0, + "nModified": 0, + "nRemoved": 0, + "upserted": [], + } + self.assertEqual(ctx.exception.details, expected_details) + + @client_context.require_version_min(4, 9) + def test_write_error_details_exposes_errinfo(self): + listener = EventListener() + client = rs_or_single_client(event_listeners=[listener]) + self.addCleanup(client.close) + db = client.errinfotest + self.addCleanup(client.drop_database, "errinfotest") + validator = {"x": {"$type": "string"}} + db.create_collection("test", validator=validator) + with self.assertRaises(WriteError) as ctx: + db.test.insert_one({"x": 1}) + self.assertEqual(ctx.exception.code, 121) + self.assertIsNotNone(ctx.exception.details) + assert ctx.exception.details is not None + self.assertIsNotNone(ctx.exception.details.get("errInfo")) + for event in listener.succeeded_events: + if event.command_name == "insert": + self.assertEqual(event.reply["writeErrors"][0], ctx.exception.details) + break + else: + self.fail("Couldn't find insert event.") def normalize_write_concern(concern): result = {} for key in concern: - if key.lower() == 'wtimeoutms': - result['wtimeout'] = concern[key] - elif key == 'journal': - result['j'] = concern[key] + if key.lower() == "wtimeoutms": + result["wtimeout"] = concern[key] + elif key == "journal": + result["j"] = concern[key] else: result[key] = concern[key] return result def create_connection_string_test(test_case): - def run_test(self): - uri = test_case['uri'] - valid = test_case['valid'] - warning = test_case['warning'] + uri = test_case["uri"] + valid = test_case["valid"] + warning = test_case["warning"] if not valid: if warning is False: - self.assertRaises( - (ConfigurationError, ValueError), - MongoClient, - uri, - connect=False) + self.assertRaises((ConfigurationError, ValueError), MongoClient, uri, connect=False) else: with warnings.catch_warnings(): - warnings.simplefilter('error', UserWarning) - self.assertRaises( - UserWarning, - MongoClient, - uri, - connect=False) + warnings.simplefilter("error", UserWarning) + self.assertRaises(UserWarning, MongoClient, uri, connect=False) else: client = MongoClient(uri, connect=False) - if 'writeConcern' in test_case: + if "writeConcern" in test_case: document = client.write_concern.document - self.assertEqual( - document, - normalize_write_concern(test_case['writeConcern'])) - if 'readConcern' in test_case: + self.assertEqual(document, normalize_write_concern(test_case["writeConcern"])) + if "readConcern" in test_case: document = client.read_concern.document - self.assertEqual(document, test_case['readConcern']) + self.assertEqual(document, test_case["readConcern"]) return run_test def create_document_test(test_case): - def run_test(self): - valid = test_case['valid'] + valid = test_case["valid"] - if 'writeConcern' in test_case: - normalized = normalize_write_concern(test_case['writeConcern']) + if "writeConcern" in test_case: + normalized = normalize_write_concern(test_case["writeConcern"]) if not valid: - self.assertRaises( - (ConfigurationError, ValueError), - WriteConcern, - **normalized) + self.assertRaises((ConfigurationError, ValueError), WriteConcern, **normalized) else: - concern = WriteConcern(**normalized) - self.assertEqual( - concern.document, test_case['writeConcernDocument']) - self.assertEqual( - concern.acknowledged, test_case['isAcknowledged']) - self.assertEqual( - concern.is_server_default, test_case['isServerDefault']) - if 'readConcern' in test_case: - # Any string for 'level' is equaly valid - concern = ReadConcern(**test_case['readConcern']) - self.assertEqual(concern.document, test_case['readConcernDocument']) - self.assertEqual( - not bool(concern.level), test_case['isServerDefault']) + write_concern = WriteConcern(**normalized) + self.assertEqual(write_concern.document, test_case["writeConcernDocument"]) + self.assertEqual(write_concern.acknowledged, test_case["isAcknowledged"]) + self.assertEqual(write_concern.is_server_default, test_case["isServerDefault"]) + if "readConcern" in test_case: + # Any string for 'level' is equally valid + read_concern = ReadConcern(**test_case["readConcern"]) + self.assertEqual(read_concern.document, test_case["readConcernDocument"]) + self.assertEqual(not bool(read_concern.level), test_case["isServerDefault"]) return run_test @@ -247,22 +293,26 @@ def create_tests(): for dirpath, _, filenames in os.walk(_TEST_PATH): dirname = os.path.split(dirpath)[-1] - if dirname == 'connection-string': + if dirname == "operation": + # This directory is tested by TestOperations. + continue + elif dirname == "connection-string": create_test = create_connection_string_test else: create_test = create_document_test for filename in filenames: with open(os.path.join(dirpath, filename)) as test_stream: - test_cases = json.load(test_stream)['tests'] + test_cases = json.load(test_stream)["tests"] fname = os.path.splitext(filename)[0] for test_case in test_cases: new_test = create_test(test_case) - test_name = 'test_%s_%s_%s' % ( - dirname.replace('-', '_'), - fname.replace('-', '_'), - str(test_case['description'].lower().replace(' ', '_'))) + test_name = "test_{}_{}_{}".format( + dirname.replace("-", "_"), + fname.replace("-", "_"), + str(test_case["description"].lower().replace(" ", "_")), + ) new_test.__name__ = test_name setattr(TestReadWriteConcernSpec, new_test.__name__, new_test) @@ -271,5 +321,26 @@ def create_tests(): create_tests() -if __name__ == '__main__': +class TestOperation(SpecRunner): + # Location of JSON test specifications. + TEST_PATH = os.path.join(_TEST_PATH, "operation") + + def get_outcome_coll_name(self, outcome, collection): + """Spec says outcome has an optional 'collection.name'.""" + return outcome["collection"].get("name", collection.name) + + +def create_operation_test(scenario_def, test, name): + @client_context.require_test_commands + def run_scenario(self): + self.run_scenario(scenario_def, test) + + return run_scenario + + +test_creator = SpecTestCreator(create_operation_test, TestOperation, TestOperation.TEST_PATH) +test_creator.create_tests() + + +if __name__ == "__main__": unittest.main() diff --git a/test/test_replica_set_client.py b/test/test_replica_set_client.py deleted file mode 100644 index 7c8d216b8d..0000000000 --- a/test/test_replica_set_client.py +++ /dev/null @@ -1,373 +0,0 @@ -# Copyright 2011-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Test the mongo_replica_set_client module.""" - -import sys -import warnings -import time - -sys.path[0:0] = [""] - -from bson.codec_options import CodecOptions -from bson.son import SON -from pymongo.common import MAX_SUPPORTED_WIRE_VERSION, partition_node -from pymongo.errors import (AutoReconnect, - ConfigurationError, - ConnectionFailure, - NetworkTimeout, - NotMasterError, - OperationFailure) -from pymongo.mongo_client import MongoClient -from pymongo.mongo_replica_set_client import MongoReplicaSetClient -from pymongo.read_preferences import ReadPreference, Secondary, Nearest -from pymongo.write_concern import WriteConcern -from test import (client_context, - client_knobs, - IntegrationTest, - unittest, - SkipTest, - db_pwd, - db_user, - MockClientTest, - HAVE_IPADDRESS) -from test.pymongo_mocks import MockClient -from test.utils import (connected, - delay, - ignore_deprecations, - one, - rs_client, - single_client, - wait_until) - - -class TestReplicaSetClientBase(IntegrationTest): - - @classmethod - @client_context.require_replica_set - def setUpClass(cls): - super(TestReplicaSetClientBase, cls).setUpClass() - cls.name = client_context.replica_set_name - cls.w = client_context.w - - ismaster = client_context.ismaster - cls.hosts = set(partition_node(h.lower()) for h in ismaster['hosts']) - cls.arbiters = set(partition_node(h) - for h in ismaster.get("arbiters", [])) - - repl_set_status = client_context.client.admin.command( - 'replSetGetStatus') - primary_info = [ - m for m in repl_set_status['members'] - if m['stateStr'] == 'PRIMARY' - ][0] - - cls.primary = partition_node(primary_info['name'].lower()) - cls.secondaries = set( - partition_node(m['name'].lower()) - for m in repl_set_status['members'] - if m['stateStr'] == 'SECONDARY') - - -class TestReplicaSetClient(TestReplicaSetClientBase): - def test_deprecated(self): - with warnings.catch_warnings(): - warnings.simplefilter("error", DeprecationWarning) - with self.assertRaises(DeprecationWarning): - MongoReplicaSetClient() - - def test_connect(self): - client = MongoClient( - client_context.pair, - replicaSet='fdlksjfdslkjfd', - serverSelectionTimeoutMS=100) - - with self.assertRaises(ConnectionFailure): - client.test.test.find_one() - - def test_repr(self): - with ignore_deprecations(): - client = MongoReplicaSetClient( - client_context.host, - client_context.port, - replicaSet=self.name) - - self.assertIn("MongoReplicaSetClient(host=[", repr(client)) - self.assertIn(client_context.pair, repr(client)) - - def test_properties(self): - c = client_context.client - c.admin.command('ping') - - wait_until(lambda: c.primary == self.primary, "discover primary") - wait_until(lambda: c.arbiters == self.arbiters, "discover arbiters") - wait_until(lambda: c.secondaries == self.secondaries, - "discover secondaries") - - self.assertEqual(c.primary, self.primary) - self.assertEqual(c.secondaries, self.secondaries) - self.assertEqual(c.arbiters, self.arbiters) - self.assertEqual(c.max_pool_size, 100) - - # Make sure MongoClient's properties are copied to Database and - # Collection. - for obj in c, c.pymongo_test, c.pymongo_test.test: - self.assertEqual(obj.codec_options, CodecOptions()) - self.assertEqual(obj.read_preference, ReadPreference.PRIMARY) - self.assertEqual(obj.write_concern, WriteConcern()) - - cursor = c.pymongo_test.test.find() - self.assertEqual( - ReadPreference.PRIMARY, cursor._read_preference()) - - tag_sets = [{'dc': 'la', 'rack': '2'}, {'foo': 'bar'}] - secondary = Secondary(tag_sets=tag_sets) - c = rs_client( - maxPoolSize=25, - document_class=SON, - tz_aware=True, - read_preference=secondary, - localThresholdMS=77, - j=True) - - self.assertEqual(c.max_pool_size, 25) - - for obj in c, c.pymongo_test, c.pymongo_test.test: - self.assertEqual(obj.codec_options, CodecOptions(SON, True)) - self.assertEqual(obj.read_preference, secondary) - self.assertEqual(obj.write_concern, WriteConcern(j=True)) - - cursor = c.pymongo_test.test.find() - self.assertEqual( - secondary, cursor._read_preference()) - - nearest = Nearest(tag_sets=[{'dc': 'ny'}, {}]) - cursor = c.pymongo_test.get_collection( - "test", read_preference=nearest).find() - - self.assertEqual(nearest, cursor._read_preference()) - self.assertEqual(c.max_bson_size, 16777216) - c.close() - - @client_context.require_secondaries_count(1) - def test_timeout_does_not_mark_member_down(self): - # If a query times out, the client shouldn't mark the member "down". - - # Disable background refresh. - with client_knobs(heartbeat_frequency=999999): - c = rs_client(socketTimeoutMS=3000, w=self.w) - collection = c.pymongo_test.test - collection.insert_one({}) - - # Query the primary. - self.assertRaises( - NetworkTimeout, - collection.find_one, - {'$where': delay(5)}) - - self.assertTrue(c.primary) - collection.find_one() # No error. - - coll = collection.with_options( - read_preference=ReadPreference.SECONDARY) - - # Query the secondary. - self.assertRaises( - NetworkTimeout, - coll.find_one, - {'$where': delay(5)}) - - self.assertTrue(c.secondaries) - - # No error. - coll.find_one() - - @client_context.require_ipv6 - def test_ipv6(self): - if client_context.ssl: - if not HAVE_IPADDRESS: - raise SkipTest("Need the ipaddress module to test with SSL") - - port = client_context.port - c = rs_client("mongodb://[::1]:%d" % (port,)) - - # Client switches to IPv4 once it has first ismaster response. - msg = 'discovered primary with IPv4 address "%r"' % (self.primary,) - wait_until(lambda: c.primary == self.primary, msg) - - # Same outcome with both IPv4 and IPv6 seeds. - c = rs_client("mongodb://[::1]:%d,localhost:%d" % (port, port)) - - wait_until(lambda: c.primary == self.primary, msg) - - if client_context.auth_enabled: - auth_str = "%s:%s@" % (db_user, db_pwd) - else: - auth_str = "" - - uri = "mongodb://%slocalhost:%d,[::1]:%d" % (auth_str, port, port) - client = rs_client(uri) - client.pymongo_test.test.insert_one({"dummy": u"object"}) - client.pymongo_test_bernie.test.insert_one({"dummy": u"object"}) - - dbs = client.list_database_names() - self.assertTrue("pymongo_test" in dbs) - self.assertTrue("pymongo_test_bernie" in dbs) - client.close() - - def _test_kill_cursor_explicit(self, read_pref): - with client_knobs(kill_cursor_frequency=0.01): - c = rs_client(read_preference=read_pref, w=self.w) - db = c.pymongo_test - db.drop_collection("test") - - test = db.test - test.insert_many([{"i": i} for i in range(20)]) - - # Partially evaluate cursor so it's left alive, then kill it - cursor = test.find().batch_size(10) - next(cursor) - self.assertNotEqual(0, cursor.cursor_id) - - if read_pref == ReadPreference.PRIMARY: - msg = "Expected cursor's address to be %s, got %s" % ( - c.primary, cursor.address) - - self.assertEqual(cursor.address, c.primary, msg) - else: - self.assertNotEqual( - cursor.address, c.primary, - "Expected cursor's address not to be primary") - - cursor_id = cursor.cursor_id - - # Cursor dead on server - trigger a getMore on the same cursor_id - # and check that the server returns an error. - cursor2 = cursor.clone() - cursor2._Cursor__id = cursor_id - - if sys.platform.startswith('java') or 'PyPy' in sys.version: - # Explicitly kill cursor. - cursor.close() - else: - # Implicitly kill it in CPython. - del cursor - - time.sleep(5) - self.assertRaises(OperationFailure, lambda: list(cursor2)) - - def test_kill_cursor_explicit_primary(self): - self._test_kill_cursor_explicit(ReadPreference.PRIMARY) - - @client_context.require_secondaries_count(1) - def test_kill_cursor_explicit_secondary(self): - self._test_kill_cursor_explicit(ReadPreference.SECONDARY) - - @client_context.require_secondaries_count(1) - def test_not_master_error(self): - secondary_address = one(self.secondaries) - direct_client = single_client(*secondary_address) - - with self.assertRaises(NotMasterError): - direct_client.pymongo_test.collection.insert_one({}) - - db = direct_client.get_database( - "pymongo_test", write_concern=WriteConcern(w=0)) - with self.assertRaises(NotMasterError): - db.collection.insert_one({}) - - -class TestReplicaSetWireVersion(MockClientTest): - - @client_context.require_connection - @client_context.require_no_auth - def test_wire_version(self): - c = MockClient( - standalones=[], - members=['a:1', 'b:2', 'c:3'], - mongoses=[], - host='a:1', - replicaSet='rs', - connect=False) - - c.set_wire_version_range('a:1', 3, 7) - c.set_wire_version_range('b:2', 2, 3) - c.set_wire_version_range('c:3', 3, 4) - c.db.command('ismaster') # Connect. - - # A secondary doesn't overlap with us. - c.set_wire_version_range('b:2', - MAX_SUPPORTED_WIRE_VERSION + 1, - MAX_SUPPORTED_WIRE_VERSION + 2) - - def raises_configuration_error(): - try: - c.db.collection.find_one() - return False - except ConfigurationError: - return True - - wait_until(raises_configuration_error, - 'notice we are incompatible with server') - - self.assertRaises(ConfigurationError, c.db.collection.insert_one, {}) - - -class TestReplicaSetClientInternalIPs(MockClientTest): - - @client_context.require_connection - def test_connect_with_internal_ips(self): - # Client is passed an IP it can reach, 'a:1', but the RS config - # only contains unreachable IPs like 'internal-ip'. PYTHON-608. - with self.assertRaises(AutoReconnect) as context: - connected(MockClient( - standalones=[], - members=['a:1'], - mongoses=[], - ismaster_hosts=['internal-ip:27017'], - host='a:1', - replicaSet='rs', - serverSelectionTimeoutMS=100)) - - self.assertEqual( - "Could not reach any servers in [('internal-ip', 27017)]." - " Replica set is configured with internal hostnames or IPs?", - str(context.exception)) - -class TestReplicaSetClientMaxWriteBatchSize(MockClientTest): - - @client_context.require_connection - def test_max_write_batch_size(self): - c = MockClient( - standalones=[], - members=['a:1', 'b:2'], - mongoses=[], - host='a:1', - replicaSet='rs', - connect=False) - - c.set_max_write_batch_size('a:1', 1) - c.set_max_write_batch_size('b:2', 2) - - # Uses primary's max batch size. - self.assertEqual(c.max_write_batch_size, 1) - - # b becomes primary. - c.mock_primary = 'b:2' - wait_until(lambda: c.max_write_batch_size == 2, - 'update max_write_batch_size') - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_replica_set_reconfig.py b/test/test_replica_set_reconfig.py index 347bbab491..1dae0aea86 100644 --- a/test/test_replica_set_reconfig.py +++ b/test/test_replica_set_reconfig.py @@ -13,19 +13,22 @@ # limitations under the License. """Test clients and replica set configuration changes, using mocks.""" +from __future__ import annotations import sys sys.path[0:0] = [""] -from pymongo.errors import ConnectionFailure, AutoReconnect -from pymongo import ReadPreference -from test import unittest, client_context, client_knobs, MockClientTest +from test import MockClientTest, client_context, client_knobs, unittest from test.pymongo_mocks import MockClient from test.utils import wait_until +from pymongo import ReadPreference +from pymongo.errors import ConnectionFailure, ServerSelectionTimeoutError + @client_context.require_connection +@client_context.require_no_load_balancer def setUpModule(): pass @@ -37,56 +40,53 @@ class TestSecondaryBecomesStandalone(MockClientTest): def test_client(self): c = MockClient( standalones=[], - members=['a:1', 'b:2', 'c:3'], + members=["a:1", "b:2", "c:3"], mongoses=[], - host='a:1,b:2,c:3', - replicaSet='rs', - serverSelectionTimeoutMS=100) + host="a:1,b:2,c:3", + replicaSet="rs", + serverSelectionTimeoutMS=100, + connect=False, + ) self.addCleanup(c.close) - # MongoClient connects to primary by default. - wait_until(lambda: c.address is not None, 'connect to primary') - self.assertEqual(c.address, ('a', 1)) - # C is brought up as a standalone. - c.mock_members.remove('c:3') - c.mock_standalones.append('c:3') + c.mock_members.remove("c:3") + c.mock_standalones.append("c:3") # Fail over. - c.kill_host('a:1') - c.kill_host('b:2') - - # Force reconnect. - c.close() - - with self.assertRaises(AutoReconnect): - c.db.command('ismaster') + c.kill_host("a:1") + c.kill_host("b:2") + with self.assertRaises(ServerSelectionTimeoutError): + c.db.command("ping") self.assertEqual(c.address, None) + # Client can still discover the primary node + c.revive_host("a:1") + wait_until(lambda: c.address is not None, "connect to primary") + self.assertEqual(c.address, ("a", 1)) + def test_replica_set_client(self): c = MockClient( standalones=[], - members=['a:1', 'b:2', 'c:3'], + members=["a:1", "b:2", "c:3"], mongoses=[], - host='a:1,b:2,c:3', - replicaSet='rs') + host="a:1,b:2,c:3", + replicaSet="rs", + ) self.addCleanup(c.close) - wait_until(lambda: ('b', 2) in c.secondaries, - 'discover host "b"') + wait_until(lambda: ("b", 2) in c.secondaries, 'discover host "b"') - wait_until(lambda: ('c', 3) in c.secondaries, - 'discover host "c"') + wait_until(lambda: ("c", 3) in c.secondaries, 'discover host "c"') # C is brought up as a standalone. - c.mock_members.remove('c:3') - c.mock_standalones.append('c:3') + c.mock_members.remove("c:3") + c.mock_standalones.append("c:3") - wait_until(lambda: set([('b', 2)]) == c.secondaries, - 'update the list of secondaries') + wait_until(lambda: {("b", 2)} == c.secondaries, "update the list of secondaries") - self.assertEqual(('a', 1), c.primary) + self.assertEqual(("a", 1), c.primary) class TestSecondaryRemoved(MockClientTest): @@ -95,21 +95,21 @@ class TestSecondaryRemoved(MockClientTest): def test_replica_set_client(self): c = MockClient( standalones=[], - members=['a:1', 'b:2', 'c:3'], + members=["a:1", "b:2", "c:3"], mongoses=[], - host='a:1,b:2,c:3', - replicaSet='rs') + host="a:1,b:2,c:3", + replicaSet="rs", + ) self.addCleanup(c.close) - wait_until(lambda: ('b', 2) in c.secondaries, 'discover host "b"') - wait_until(lambda: ('c', 3) in c.secondaries, 'discover host "c"') + wait_until(lambda: ("b", 2) in c.secondaries, 'discover host "b"') + wait_until(lambda: ("c", 3) in c.secondaries, 'discover host "c"') # C is removed. - c.mock_ismaster_hosts.remove('c:3') - wait_until(lambda: set([('b', 2)]) == c.secondaries, - 'update list of secondaries') + c.mock_hello_hosts.remove("c:3") + wait_until(lambda: {("b", 2)} == c.secondaries, "update list of secondaries") - self.assertEqual(('a', 1), c.primary) + self.assertEqual(("a", 1), c.primary) class TestSocketError(MockClientTest): @@ -118,20 +118,22 @@ def test_socket_error_marks_member_down(self): with client_knobs(heartbeat_frequency=999999): c = MockClient( standalones=[], - members=['a:1', 'b:2'], + members=["a:1", "b:2"], mongoses=[], - host='a:1', - replicaSet='rs') + host="a:1", + replicaSet="rs", + serverSelectionTimeoutMS=100, + ) self.addCleanup(c.close) - wait_until(lambda: len(c.nodes) == 2, 'discover both nodes') + wait_until(lambda: len(c.nodes) == 2, "discover both nodes") # b now raises socket.error. - c.mock_down_hosts.append('b:2') + c.mock_down_hosts.append("b:2") self.assertRaises( ConnectionFailure, - c.db.collection.with_options( - read_preference=ReadPreference.SECONDARY).find_one) + c.db.collection.with_options(read_preference=ReadPreference.SECONDARY).find_one, + ) self.assertEqual(1, len(c.nodes)) @@ -139,52 +141,44 @@ def test_socket_error_marks_member_down(self): class TestSecondaryAdded(MockClientTest): def test_client(self): c = MockClient( - standalones=[], - members=['a:1', 'b:2'], - mongoses=[], - host='a:1', - replicaSet='rs') + standalones=[], members=["a:1", "b:2"], mongoses=[], host="a:1", replicaSet="rs" + ) self.addCleanup(c.close) - wait_until(lambda: len(c.nodes) == 2, 'discover both nodes') + wait_until(lambda: len(c.nodes) == 2, "discover both nodes") # MongoClient connects to primary by default. - self.assertEqual(c.address, ('a', 1)) - self.assertEqual(set([('a', 1), ('b', 2)]), c.nodes) + self.assertEqual(c.address, ("a", 1)) + self.assertEqual({("a", 1), ("b", 2)}, c.nodes) # C is added. - c.mock_members.append('c:3') - c.mock_ismaster_hosts.append('c:3') + c.mock_members.append("c:3") + c.mock_hello_hosts.append("c:3") - c.close() - c.db.command('ismaster') + c.db.command("ping") - self.assertEqual(c.address, ('a', 1)) + self.assertEqual(c.address, ("a", 1)) - wait_until(lambda: set([('a', 1), ('b', 2), ('c', 3)]) == c.nodes, - 'reconnect to both secondaries') + wait_until( + lambda: {("a", 1), ("b", 2), ("c", 3)} == c.nodes, "reconnect to both secondaries" + ) def test_replica_set_client(self): c = MockClient( - standalones=[], - members=['a:1', 'b:2'], - mongoses=[], - host='a:1', - replicaSet='rs') + standalones=[], members=["a:1", "b:2"], mongoses=[], host="a:1", replicaSet="rs" + ) self.addCleanup(c.close) - wait_until(lambda: ('a', 1) == c.primary, 'discover the primary') - wait_until(lambda: set([('b', 2)]) == c.secondaries, - 'discover the secondary') + wait_until(lambda: c.primary == ("a", 1), "discover the primary") + wait_until(lambda: {("b", 2)} == c.secondaries, "discover the secondary") # C is added. - c.mock_members.append('c:3') - c.mock_ismaster_hosts.append('c:3') + c.mock_members.append("c:3") + c.mock_hello_hosts.append("c:3") - wait_until(lambda: set([('b', 2), ('c', 3)]) == c.secondaries, - 'discover the new secondary') + wait_until(lambda: {("b", 2), ("c", 3)} == c.secondaries, "discover the new secondary") - self.assertEqual(('a', 1), c.primary) + self.assertEqual(("a", 1), c.primary) if __name__ == "__main__": diff --git a/test/test_results.py b/test/test_results.py new file mode 100644 index 0000000000..19e086a9a5 --- /dev/null +++ b/test/test_results.py @@ -0,0 +1,138 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test results module.""" +from __future__ import annotations + +import sys + +sys.path[0:0] = [""] + +from test import unittest + +from pymongo.errors import InvalidOperation +from pymongo.results import ( + BulkWriteResult, + DeleteResult, + InsertManyResult, + InsertOneResult, + UpdateResult, +) + + +class TestResults(unittest.TestCase): + def repr_test(self, cls, result_arg): + for acknowledged in (True, False): + result = cls(result_arg, acknowledged) + expected_repr = "%s(%r, acknowledged=%r)" % (cls.__name__, result_arg, acknowledged) + self.assertEqual(acknowledged, result.acknowledged) + self.assertEqual(expected_repr, repr(result)) + + def test_bulk_write_result(self): + raw_result = { + "writeErrors": [], + "writeConcernErrors": [], + "nInserted": 1, + "nUpserted": 2, + "nMatched": 2, + "nModified": 2, + "nRemoved": 2, + "upserted": [ + {"index": 5, "_id": 1}, + {"index": 9, "_id": 2}, + ], + } + self.repr_test(BulkWriteResult, raw_result) + + result = BulkWriteResult(raw_result, True) + self.assertEqual(raw_result, result.bulk_api_result) + self.assertEqual(raw_result["nInserted"], result.inserted_count) + self.assertEqual(raw_result["nMatched"], result.matched_count) + self.assertEqual(raw_result["nModified"], result.modified_count) + self.assertEqual(raw_result["nRemoved"], result.deleted_count) + self.assertEqual(raw_result["nUpserted"], result.upserted_count) + self.assertEqual({5: 1, 9: 2}, result.upserted_ids) + + result = BulkWriteResult(raw_result, False) + self.assertEqual(raw_result, result.bulk_api_result) + error_msg = "A value for .* is not available when" + with self.assertRaisesRegex(InvalidOperation, error_msg): + result.inserted_count + with self.assertRaisesRegex(InvalidOperation, error_msg): + result.matched_count + with self.assertRaisesRegex(InvalidOperation, error_msg): + result.modified_count + with self.assertRaisesRegex(InvalidOperation, error_msg): + result.deleted_count + with self.assertRaisesRegex(InvalidOperation, error_msg): + result.upserted_count + with self.assertRaisesRegex(InvalidOperation, error_msg): + result.upserted_ids + + def test_delete_result(self): + raw_result = {"n": 5} + self.repr_test(DeleteResult, {"n": 0}) + + result = DeleteResult(raw_result, True) + self.assertEqual(raw_result, result.raw_result) + self.assertEqual(raw_result["n"], result.deleted_count) + + result = DeleteResult(raw_result, False) + self.assertEqual(raw_result, result.raw_result) + error_msg = "A value for .* is not available when" + with self.assertRaisesRegex(InvalidOperation, error_msg): + result.deleted_count + + def test_insert_many_result(self): + inserted_ids = [1, 2, 3] + self.repr_test(InsertManyResult, inserted_ids) + + for acknowledged in (True, False): + result = InsertManyResult(inserted_ids, acknowledged) + self.assertEqual(inserted_ids, result.inserted_ids) + + def test_insert_one_result(self): + self.repr_test(InsertOneResult, 0) + + for acknowledged in (True, False): + result = InsertOneResult(0, acknowledged) + self.assertEqual(0, result.inserted_id) + + def test_update_result(self): + raw_result = { + "n": 1, + "nModified": 1, + "upserted": None, + } + self.repr_test(UpdateResult, raw_result) + + result = UpdateResult(raw_result, True) + self.assertEqual(raw_result, result.raw_result) + self.assertEqual(raw_result["n"], result.matched_count) + self.assertEqual(raw_result["nModified"], result.modified_count) + self.assertEqual(raw_result["upserted"], result.upserted_id) + + result = UpdateResult(raw_result, False) + self.assertEqual(raw_result, result.raw_result) + error_msg = "A value for .* is not available when" + with self.assertRaisesRegex(InvalidOperation, error_msg): + result.matched_count + with self.assertRaisesRegex(InvalidOperation, error_msg): + result.modified_count + with self.assertRaisesRegex(InvalidOperation, error_msg): + result.upserted_id + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_retryable_reads.py b/test/test_retryable_reads.py index cce6f0a451..8779ea1ed8 100644 --- a/test/test_retryable_reads.py +++ b/test/test_retryable_reads.py @@ -13,89 +13,122 @@ # limitations under the License. """Test retryable reads spec.""" +from __future__ import annotations import os +import pprint import sys +import threading sys.path[0:0] = [""] -from pymongo.mongo_client import MongoClient -from pymongo.write_concern import WriteConcern - -from test import unittest, client_context, PyMongoTestCase -from test.utils import TestCreator +from test import ( + IntegrationTest, + PyMongoTestCase, + client_context, + client_knobs, + unittest, +) +from test.utils import ( + CMAPListener, + OvertCommandListener, + SpecTestCreator, + rs_or_single_client, +) from test.utils_spec_runner import SpecRunner +from pymongo.mongo_client import MongoClient +from pymongo.monitoring import ( + ConnectionCheckedOutEvent, + ConnectionCheckOutFailedEvent, + ConnectionCheckOutFailedReason, + PoolClearedEvent, +) +from pymongo.write_concern import WriteConcern # Location of JSON test specifications. -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'retryable_reads') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "retryable_reads", "legacy") class TestClientOptions(PyMongoTestCase): def test_default(self): client = MongoClient(connect=False) - self.assertEqual(client.retry_reads, True) + self.assertEqual(client.options.retry_reads, True) def test_kwargs(self): client = MongoClient(retryReads=True, connect=False) - self.assertEqual(client.retry_reads, True) + self.assertEqual(client.options.retry_reads, True) client = MongoClient(retryReads=False, connect=False) - self.assertEqual(client.retry_reads, False) + self.assertEqual(client.options.retry_reads, False) def test_uri(self): - client = MongoClient('mongodb://h/?retryReads=true', connect=False) - self.assertEqual(client.retry_reads, True) - client = MongoClient('mongodb://h/?retryReads=false', connect=False) - self.assertEqual(client.retry_reads, False) + client = MongoClient("mongodb://h/?retryReads=true", connect=False) + self.assertEqual(client.options.retry_reads, True) + client = MongoClient("mongodb://h/?retryReads=false", connect=False) + self.assertEqual(client.options.retry_reads, False) class TestSpec(SpecRunner): + RUN_ON_LOAD_BALANCER = True + RUN_ON_SERVERLESS = True @classmethod - @client_context.require_version_min(4, 0) + @client_context.require_failCommand_fail_point # TODO: remove this once PYTHON-1948 is done. @client_context.require_no_mmap def setUpClass(cls): - super(TestSpec, cls).setUpClass() - if client_context.is_mongos and client_context.version[:2] <= (4, 0): - raise unittest.SkipTest("4.0 mongos does not support failCommand") + super().setUpClass() def maybe_skip_scenario(self, test): - super(TestSpec, self).maybe_skip_scenario(test) - skip_names = [ - 'listCollectionObjects', 'listIndexNames', 'listDatabaseObjects'] + super().maybe_skip_scenario(test) + skip_names = ["listCollectionObjects", "listIndexNames", "listDatabaseObjects"] for name in skip_names: - if name.lower() in test['description'].lower(): - self.skipTest('PyMongo does not support %s' % (name,)) - - # Skip changeStream related tests on MMAPv1. - test_name = self.id().rsplit('.')[-1] - if ('changestream' in test_name.lower() and - client_context.storage_engine == 'mmapv1'): - self.skipTest("MMAPv1 does not support change streams.") + if name.lower() in test["description"].lower(): + self.skipTest(f"PyMongo does not support {name}") + + # Serverless does not support $out and collation. + if client_context.serverless: + for operation in test["operations"]: + if operation["name"] == "aggregate": + for stage in operation["arguments"]["pipeline"]: + if "$out" in stage: + self.skipTest("MongoDB Serverless does not support $out") + if "collation" in operation["arguments"]: + self.skipTest("MongoDB Serverless does not support collations") + + # Skip changeStream related tests on MMAPv1 and serverless. + test_name = self.id().rsplit(".")[-1] + if "changestream" in test_name.lower(): + if client_context.storage_engine == "mmapv1": + self.skipTest("MMAPv1 does not support change streams.") + if client_context.serverless: + self.skipTest("Serverless does not support change streams.") def get_scenario_coll_name(self, scenario_def): """Override a test's collection name to support GridFS tests.""" - if 'bucket_name' in scenario_def: - return scenario_def['bucket_name'] - return super(TestSpec, self).get_scenario_coll_name(scenario_def) + if "bucket_name" in scenario_def: + return scenario_def["bucket_name"] + return super().get_scenario_coll_name(scenario_def) def setup_scenario(self, scenario_def): """Override a test's setup to support GridFS tests.""" - if 'bucket_name' in scenario_def: + if "bucket_name" in scenario_def: + data = scenario_def["data"] db_name = self.get_scenario_db_name(scenario_def) - db = client_context.client.get_database( - db_name, write_concern=WriteConcern(w='majority')) - # Create a bucket for the retryable reads GridFS tests. - client_context.client.drop_database(db_name) - if scenario_def['data']: - data = scenario_def['data'] - # Load data. - db['fs.chunks'].insert_many(data['fs.chunks']) - db['fs.files'].insert_many(data['fs.files']) + db = client_context.client[db_name] + # Create a bucket for the retryable reads GridFS tests with as few + # majority writes as possible. + wc = WriteConcern(w="majority") + if data: + db["fs.chunks"].drop() + db["fs.files"].drop() + db["fs.chunks"].insert_many(data["fs.chunks"]) + db.get_collection("fs.files", write_concern=wc).insert_many(data["fs.files"]) + else: + db.get_collection("fs.chunks").drop() + db.get_collection("fs.files", write_concern=wc).drop() else: - super(TestSpec, self).setup_scenario(scenario_def) + super().setup_scenario(scenario_def) def create_test(scenario_def, test, name): @@ -106,8 +139,87 @@ def run_scenario(self): return run_scenario -test_creator = TestCreator(create_test, TestSpec, _TEST_PATH) +test_creator = SpecTestCreator(create_test, TestSpec, _TEST_PATH) test_creator.create_tests() + +class FindThread(threading.Thread): + def __init__(self, collection): + super().__init__() + self.daemon = True + self.collection = collection + self.passed = False + + def run(self): + self.collection.find_one({}) + self.passed = True + + +class TestPoolPausedError(IntegrationTest): + # Pools don't get paused in load balanced mode. + RUN_ON_LOAD_BALANCER = False + RUN_ON_SERVERLESS = False + + @client_context.require_failCommand_blockConnection + @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) + def test_pool_paused_error_is_retryable(self): + if "PyPy" in sys.version: + # Tracked in PYTHON-3519 + self.skipTest("Test is flakey on PyPy") + cmap_listener = CMAPListener() + cmd_listener = OvertCommandListener() + client = rs_or_single_client(maxPoolSize=1, event_listeners=[cmap_listener, cmd_listener]) + self.addCleanup(client.close) + for _ in range(10): + cmap_listener.reset() + cmd_listener.reset() + threads = [FindThread(client.pymongo_test.test) for _ in range(2)] + fail_command = { + "mode": {"times": 1}, + "data": { + "failCommands": ["find"], + "blockConnection": True, + "blockTimeMS": 1000, + "errorCode": 91, + }, + } + with self.fail_point(fail_command): + for thread in threads: + thread.start() + for thread in threads: + thread.join() + for thread in threads: + self.assertTrue(thread.passed) + + # It's possible that SDAM can rediscover the server and mark the + # pool ready before the thread in the wait queue has a chance + # to run. Repeat the test until the thread actually encounters + # a PoolClearedError. + if cmap_listener.event_count(ConnectionCheckOutFailedEvent): + break + + # Via CMAP monitoring, assert that the first check out succeeds. + cmap_events = cmap_listener.events_by_type( + (ConnectionCheckedOutEvent, ConnectionCheckOutFailedEvent, PoolClearedEvent) + ) + msg = pprint.pformat(cmap_listener.events) + self.assertIsInstance(cmap_events[0], ConnectionCheckedOutEvent, msg) + self.assertIsInstance(cmap_events[1], PoolClearedEvent, msg) + self.assertIsInstance(cmap_events[2], ConnectionCheckOutFailedEvent, msg) + self.assertEqual(cmap_events[2].reason, ConnectionCheckOutFailedReason.CONN_ERROR, msg) + self.assertIsInstance(cmap_events[3], ConnectionCheckedOutEvent, msg) + + # Connection check out failures are not reflected in command + # monitoring because we only publish command events _after_ checking + # out a connection. + started = cmd_listener.started_events + msg = pprint.pformat(cmd_listener.results) + self.assertEqual(3, len(started), msg) + succeeded = cmd_listener.succeeded_events + self.assertEqual(2, len(succeeded), msg) + failed = cmd_listener.failed_events + self.assertEqual(1, len(failed), msg) + + if __name__ == "__main__": unittest.main() diff --git a/test/test_retryable_reads_unified.py b/test/test_retryable_reads_unified.py new file mode 100644 index 0000000000..69bee081a5 --- /dev/null +++ b/test/test_retryable_reads_unified.py @@ -0,0 +1,33 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the Retryable Reads unified spec tests.""" +from __future__ import annotations + +import os +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.unified_format import generate_test_classes + +# Location of JSON test specifications. +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "retryable_reads", "unified") + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index 7f9a429b1e..2da6f53f4b 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -13,69 +13,103 @@ # limitations under the License. """Test retryable writes.""" +from __future__ import annotations import copy import os +import pprint import sys +import threading sys.path[0:0] = [""] +from test import IntegrationTest, SkipTest, client_context, client_knobs, unittest +from test.utils import ( + CMAPListener, + DeprecationFilter, + EventListener, + OvertCommandListener, + SpecTestCreator, + rs_or_single_client, +) +from test.utils_spec_runner import SpecRunner +from test.version import Version + +from bson.codec_options import DEFAULT_CODEC_OPTIONS from bson.int64 import Int64 -from bson.objectid import ObjectId +from bson.raw_bson import RawBSONDocument from bson.son import SON - - -from pymongo.errors import (ConnectionFailure, - OperationFailure, - ServerSelectionTimeoutError) +from pymongo.errors import ( + ConnectionFailure, + OperationFailure, + ServerSelectionTimeoutError, + WriteConcernError, +) from pymongo.mongo_client import MongoClient -from pymongo.operations import (InsertOne, - DeleteMany, - DeleteOne, - ReplaceOne, - UpdateMany, - UpdateOne) +from pymongo.monitoring import ( + CommandSucceededEvent, + ConnectionCheckedOutEvent, + ConnectionCheckOutFailedEvent, + ConnectionCheckOutFailedReason, + PoolClearedEvent, +) +from pymongo.operations import ( + DeleteMany, + DeleteOne, + InsertOne, + ReplaceOne, + UpdateMany, + UpdateOne, +) from pymongo.write_concern import WriteConcern -from test import unittest, client_context, IntegrationTest, SkipTest, client_knobs -from test.test_crud_v1 import check_result as crud_v1_check_result -from test.utils import (rs_or_single_client, - DeprecationFilter, - OvertCommandListener, - TestCreator) -from test.utils_spec_runner import SpecRunner - # Location of JSON test specifications. -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'retryable_writes') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "retryable_writes", "legacy") + + +class InsertEventListener(EventListener): + def succeeded(self, event: CommandSucceededEvent) -> None: + super().succeeded(event) + if ( + event.command_name == "insert" + and event.reply.get("writeConcernError", {}).get("code", None) == 91 + ): + client_context.client.admin.command( + { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "errorCode": 10107, + "errorLabels": ["RetryableWriteError", "NoWritesPerformed"], + "failCommands": ["insert"], + }, + } + ) class TestAllScenarios(SpecRunner): + RUN_ON_LOAD_BALANCER = True + RUN_ON_SERVERLESS = True def get_object_name(self, op): - return op.get('object', 'collection') + return op.get("object", "collection") def get_scenario_db_name(self, scenario_def): - return scenario_def.get('database_name', 'pymongo_test') + return scenario_def.get("database_name", "pymongo_test") def get_scenario_coll_name(self, scenario_def): - return scenario_def.get('collection_name', 'test') + return scenario_def.get("collection_name", "test") def run_test_ops(self, sessions, collection, test): - outcome = test['outcome'] - should_fail = outcome.get('error') - result = None - error = None - try: - result = self.run_operation( - sessions, collection, test['operation']) - except (ConnectionFailure, OperationFailure) as exc: - error = exc - if should_fail: - self.assertIsNotNone(error, 'should have raised an error') - else: - self.assertIsNone(error) - crud_v1_check_result(self, outcome['result'], result) + # Transform retryable writes spec format into transactions. + operation = test["operation"] + outcome = test["outcome"] + if "error" in outcome: + operation["error"] = outcome["error"] + if "result" in outcome: + operation["result"] = outcome["result"] + test["operations"] = [operation] + super().run_test_ops(sessions, collection, test) def create_test(scenario_def, test, name): @@ -86,105 +120,72 @@ def run_scenario(self): return run_scenario -test_creator = TestCreator(create_test, TestAllScenarios, _TEST_PATH) + +test_creator = SpecTestCreator(create_test, TestAllScenarios, _TEST_PATH) test_creator.create_tests() -def _retryable_single_statement_ops(coll): +def retryable_single_statement_ops(coll): return [ (coll.bulk_write, [[InsertOne({}), InsertOne({})]], {}), - (coll.bulk_write, [[InsertOne({}), - InsertOne({})]], {'ordered': False}), - (coll.bulk_write, [[ReplaceOne({}, {})]], {}), - (coll.bulk_write, [[ReplaceOne({}, {}), ReplaceOne({}, {})]], {}), - (coll.bulk_write, [[UpdateOne({}, {'$set': {'a': 1}}), - UpdateOne({}, {'$set': {'a': 1}})]], {}), + (coll.bulk_write, [[InsertOne({}), InsertOne({})]], {"ordered": False}), + (coll.bulk_write, [[ReplaceOne({}, {"a1": 1})]], {}), + (coll.bulk_write, [[ReplaceOne({}, {"a2": 1}), ReplaceOne({}, {"a3": 1})]], {}), + ( + coll.bulk_write, + [[UpdateOne({}, {"$set": {"a4": 1}}), UpdateOne({}, {"$set": {"a5": 1}})]], + {}, + ), (coll.bulk_write, [[DeleteOne({})]], {}), (coll.bulk_write, [[DeleteOne({}), DeleteOne({})]], {}), (coll.insert_one, [{}], {}), (coll.insert_many, [[{}, {}]], {}), - (coll.replace_one, [{}, {}], {}), - (coll.update_one, [{}, {'$set': {'a': 1}}], {}), + (coll.replace_one, [{}, {"a6": 1}], {}), + (coll.update_one, [{}, {"$set": {"a7": 1}}], {}), (coll.delete_one, [{}], {}), - (coll.find_one_and_replace, [{}, {'a': 3}], {}), - (coll.find_one_and_update, [{}, {'$set': {'a': 1}}], {}), - (coll.find_one_and_delete, [{}, {}], {}), - ] - - -def retryable_single_statement_ops(coll): - return _retryable_single_statement_ops(coll) + [ - # Deprecated methods. - # Insert with single or multiple documents. - (coll.insert, [{}], {}), - (coll.insert, [[{}]], {}), - (coll.insert, [[{}, {}]], {}), - # Save with and without an _id. - (coll.save, [{}], {}), - (coll.save, [{'_id': ObjectId()}], {}), - # Non-multi update. - (coll.update, [{}, {'$set': {'a': 1}}], {}), - # Non-multi remove. - (coll.remove, [{}], {'multi': False}), - # Replace. - (coll.find_and_modify, [{}, {'a': 3}], {}), - # Update. - (coll.find_and_modify, [{}, {'$set': {'a': 1}}], {}), - # Delete. - (coll.find_and_modify, [{}, {}], {'remove': True}), + (coll.find_one_and_replace, [{}, {"a8": 1}], {}), + (coll.find_one_and_update, [{}, {"$set": {"a9": 1}}], {}), + (coll.find_one_and_delete, [{}, {"a10": 1}], {}), ] def non_retryable_single_statement_ops(coll): return [ - (coll.bulk_write, [[UpdateOne({}, {'$set': {'a': 1}}), - UpdateMany({}, {'$set': {'a': 1}})]], {}), + ( + coll.bulk_write, + [[UpdateOne({}, {"$set": {"a": 1}}), UpdateMany({}, {"$set": {"a": 1}})]], + {}, + ), (coll.bulk_write, [[DeleteOne({}), DeleteMany({})]], {}), - (coll.update_many, [{}, {'$set': {'a': 1}}], {}), + (coll.update_many, [{}, {"$set": {"a": 1}}], {}), (coll.delete_many, [{}], {}), - # Deprecated methods. - # Multi remove. - (coll.remove, [{}], {}), - # Multi update. - (coll.update, [{}, {'$set': {'a': 1}}], {'multi': True}), - # Unacknowledged deprecated methods. - (coll.insert, [{}], {'w': 0}), - # Unacknowledged Non-multi update. - (coll.update, [{}, {'$set': {'a': 1}}], {'w': 0}), - # Unacknowledged Non-multi remove. - (coll.remove, [{}], {'multi': False, 'w': 0}), - # Unacknowledged Replace. - (coll.find_and_modify, [{}, {'a': 3}], {'writeConcern': {'w': 0}}), - # Unacknowledged Update. - (coll.find_and_modify, [{}, {'$set': {'a': 1}}], - {'writeConcern': {'w': 0}}), - # Unacknowledged Delete. - (coll.find_and_modify, [{}, {}], - {'remove': True, 'writeConcern': {'w': 0}}), ] class IgnoreDeprecationsTest(IntegrationTest): + RUN_ON_LOAD_BALANCER = True + RUN_ON_SERVERLESS = True + deprecation_filter: DeprecationFilter @classmethod def setUpClass(cls): - super(IgnoreDeprecationsTest, cls).setUpClass() + super().setUpClass() cls.deprecation_filter = DeprecationFilter() @classmethod def tearDownClass(cls): cls.deprecation_filter.stop() - super(IgnoreDeprecationsTest, cls).tearDownClass() + super().tearDownClass() class TestRetryableWritesMMAPv1(IgnoreDeprecationsTest): + knobs: client_knobs @classmethod def setUpClass(cls): - super(TestRetryableWritesMMAPv1, cls).setUpClass() + super().setUpClass() # Speed up the tests by decreasing the heartbeat frequency. - cls.knobs = client_knobs(heartbeat_frequency=0.1, - min_heartbeat_interval=0.1) + cls.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) cls.knobs.enable() cls.client = rs_or_single_client(retryWrites=True) cls.db = cls.client.pymongo_test @@ -192,161 +193,165 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): cls.knobs.disable() + cls.client.close() + super().tearDownClass() - @client_context.require_version_min(3, 5) @client_context.require_no_standalone def test_actionable_error_message(self): - if client_context.storage_engine != 'mmapv1': - raise SkipTest('This cluster is not running MMAPv1') - - expected_msg = ("This MongoDB deployment does not support retryable " - "writes. Please add retryWrites=false to your " - "connection string.") - for method, args, kwargs in retryable_single_statement_ops( - self.db.retryable_write_test): + if client_context.storage_engine != "mmapv1": + raise SkipTest("This cluster is not running MMAPv1") + + expected_msg = ( + "This MongoDB deployment does not support retryable " + "writes. Please add retryWrites=false to your " + "connection string." + ) + for method, args, kwargs in retryable_single_statement_ops(self.db.retryable_write_test): with self.assertRaisesRegex(OperationFailure, expected_msg): method(*args, **kwargs) class TestRetryableWrites(IgnoreDeprecationsTest): + listener: OvertCommandListener + knobs: client_knobs @classmethod @client_context.require_no_mmap def setUpClass(cls): - super(TestRetryableWrites, cls).setUpClass() + super().setUpClass() # Speed up the tests by decreasing the heartbeat frequency. - cls.knobs = client_knobs(heartbeat_frequency=0.1, - min_heartbeat_interval=0.1) + cls.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) cls.knobs.enable() cls.listener = OvertCommandListener() - cls.client = rs_or_single_client( - retryWrites=True, event_listeners=[cls.listener]) + cls.client = rs_or_single_client(retryWrites=True, event_listeners=[cls.listener]) cls.db = cls.client.pymongo_test @classmethod def tearDownClass(cls): cls.knobs.disable() - super(TestRetryableWrites, cls).tearDownClass() + cls.client.close() + super().tearDownClass() def setUp(self): - if (client_context.version.at_least(3, 5) and client_context.is_rs - and client_context.test_commands_enabled): - self.client.admin.command(SON([ - ('configureFailPoint', 'onPrimaryTransactionalWrite'), - ('mode', 'alwaysOn')])) + if client_context.is_rs and client_context.test_commands_enabled: + self.client.admin.command( + SON([("configureFailPoint", "onPrimaryTransactionalWrite"), ("mode", "alwaysOn")]) + ) def tearDown(self): - if (client_context.version.at_least(3, 5) and client_context.is_rs - and client_context.test_commands_enabled): - self.client.admin.command(SON([ - ('configureFailPoint', 'onPrimaryTransactionalWrite'), - ('mode', 'off')])) + if client_context.is_rs and client_context.test_commands_enabled: + self.client.admin.command( + SON([("configureFailPoint", "onPrimaryTransactionalWrite"), ("mode", "off")]) + ) def test_supported_single_statement_no_retry(self): listener = OvertCommandListener() - client = rs_or_single_client( - retryWrites=False, event_listeners=[listener]) + client = rs_or_single_client(retryWrites=False, event_listeners=[listener]) self.addCleanup(client.close) - for method, args, kwargs in retryable_single_statement_ops( - client.db.retryable_write_test): - msg = '%s(*%r, **%r)' % (method.__name__, args, kwargs) - listener.results.clear() + for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" + listener.reset() method(*args, **kwargs) - for event in listener.results['started']: + for event in listener.started_events: self.assertNotIn( - 'txnNumber', event.command, - '%s sent txnNumber with %s' % (msg, event.command_name)) + "txnNumber", + event.command, + f"{msg} sent txnNumber with {event.command_name}", + ) - @client_context.require_version_min(3, 5) @client_context.require_no_standalone def test_supported_single_statement_supported_cluster(self): - for method, args, kwargs in retryable_single_statement_ops( - self.db.retryable_write_test): - msg = '%s(*%r, **%r)' % (method.__name__, args, kwargs) - self.listener.results.clear() + for method, args, kwargs in retryable_single_statement_ops(self.db.retryable_write_test): + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" + self.listener.reset() method(*args, **kwargs) - commands_started = self.listener.results['started'] - self.assertEqual(len(self.listener.results['succeeded']), 1, msg) + commands_started = self.listener.started_events + self.assertEqual(len(self.listener.succeeded_events), 1, msg) first_attempt = commands_started[0] self.assertIn( - 'lsid', first_attempt.command, - '%s sent no lsid with %s' % (msg, first_attempt.command_name)) - initial_session_id = first_attempt.command['lsid'] + "lsid", + first_attempt.command, + f"{msg} sent no lsid with {first_attempt.command_name}", + ) + initial_session_id = first_attempt.command["lsid"] self.assertIn( - 'txnNumber', first_attempt.command, - '%s sent no txnNumber with %s' % ( - msg, first_attempt.command_name)) + "txnNumber", + first_attempt.command, + f"{msg} sent no txnNumber with {first_attempt.command_name}", + ) # There should be no retry when the failpoint is not active. - if (client_context.is_mongos or - not client_context.test_commands_enabled): + if client_context.is_mongos or not client_context.test_commands_enabled: self.assertEqual(len(commands_started), 1) continue - initial_transaction_id = first_attempt.command['txnNumber'] + initial_transaction_id = first_attempt.command["txnNumber"] retry_attempt = commands_started[1] self.assertIn( - 'lsid', retry_attempt.command, - '%s sent no lsid with %s' % (msg, first_attempt.command_name)) - self.assertEqual( - retry_attempt.command['lsid'], initial_session_id, msg) + "lsid", + retry_attempt.command, + f"{msg} sent no lsid with {first_attempt.command_name}", + ) + self.assertEqual(retry_attempt.command["lsid"], initial_session_id, msg) self.assertIn( - 'txnNumber', retry_attempt.command, - '%s sent no txnNumber with %s' % ( - msg, first_attempt.command_name)) - self.assertEqual(retry_attempt.command['txnNumber'], - initial_transaction_id, msg) + "txnNumber", + retry_attempt.command, + f"{msg} sent no txnNumber with {first_attempt.command_name}", + ) + self.assertEqual(retry_attempt.command["txnNumber"], initial_transaction_id, msg) def test_supported_single_statement_unsupported_cluster(self): - if client_context.version.at_least(3, 5) and ( - client_context.is_rs or client_context.is_mongos): - raise SkipTest('This cluster supports retryable writes') - - for method, args, kwargs in retryable_single_statement_ops( - self.db.retryable_write_test): - msg = '%s(*%r, **%r)' % (method.__name__, args, kwargs) - self.listener.results.clear() + if client_context.is_rs or client_context.is_mongos: + raise SkipTest("This cluster supports retryable writes") + + for method, args, kwargs in retryable_single_statement_ops(self.db.retryable_write_test): + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" + self.listener.reset() method(*args, **kwargs) - for event in self.listener.results['started']: + for event in self.listener.started_events: self.assertNotIn( - 'txnNumber', event.command, - '%s sent txnNumber with %s' % (msg, event.command_name)) + "txnNumber", + event.command, + f"{msg} sent txnNumber with {event.command_name}", + ) def test_unsupported_single_statement(self): coll = self.db.retryable_write_test coll.insert_many([{}, {}]) coll_w0 = coll.with_options(write_concern=WriteConcern(w=0)) - for method, args, kwargs in (non_retryable_single_statement_ops(coll) + - retryable_single_statement_ops(coll_w0)): - msg = '%s(*%r, **%r)' % (method.__name__, args, kwargs) - self.listener.results.clear() + for method, args, kwargs in non_retryable_single_statement_ops( + coll + ) + retryable_single_statement_ops(coll_w0): + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" + self.listener.reset() method(*args, **kwargs) - started_events = self.listener.results['started'] - self.assertEqual(len(self.listener.results['succeeded']), - len(started_events), msg) - self.assertEqual(len(self.listener.results['failed']), 0, msg) + started_events = self.listener.started_events + self.assertEqual(len(self.listener.succeeded_events), len(started_events), msg) + self.assertEqual(len(self.listener.failed_events), 0, msg) for event in started_events: self.assertNotIn( - 'txnNumber', event.command, - '%s sent txnNumber with %s' % (msg, event.command_name)) + "txnNumber", + event.command, + f"{msg} sent txnNumber with {event.command_name}", + ) def test_server_selection_timeout_not_retried(self): """A ServerSelectionTimeoutError is not retried.""" listener = OvertCommandListener() client = MongoClient( - 'somedomainthatdoesntexist.org', + "somedomainthatdoesntexist.org", serverSelectionTimeoutMS=1, - retryWrites=True, event_listeners=[listener]) - for method, args, kwargs in retryable_single_statement_ops( - client.db.retryable_write_test): - msg = '%s(*%r, **%r)' % (method.__name__, args, kwargs) - listener.results.clear() + retryWrites=True, + event_listeners=[listener], + ) + for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" + listener.reset() with self.assertRaises(ServerSelectionTimeoutError, msg=msg): method(*args, **kwargs) - self.assertEqual(len(listener.results['started']), 0, msg) + self.assertEqual(len(listener.started_events), 0, msg) - @client_context.require_version_min(3, 5) @client_context.require_replica_set @client_context.require_test_commands def test_retry_timeout_raises_original_error(self): @@ -354,8 +359,7 @@ def test_retry_timeout_raises_original_error(self): original error. """ listener = OvertCommandListener() - client = rs_or_single_client( - retryWrites=True, event_listeners=[listener]) + client = rs_or_single_client(retryWrites=True, event_listeners=[listener]) self.addCleanup(client.close) topology = client._topology select_server = topology.select_server @@ -364,44 +368,44 @@ def mock_select_server(*args, **kwargs): server = select_server(*args, **kwargs) def raise_error(*args, **kwargs): - raise ServerSelectionTimeoutError( - 'No primary available for writes') + raise ServerSelectionTimeoutError("No primary available for writes") + # Raise ServerSelectionTimeout on the retry attempt. topology.select_server = raise_error return server - for method, args, kwargs in retryable_single_statement_ops( - client.db.retryable_write_test): - msg = '%s(*%r, **%r)' % (method.__name__, args, kwargs) - listener.results.clear() + for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" + listener.reset() topology.select_server = mock_select_server with self.assertRaises(ConnectionFailure, msg=msg): method(*args, **kwargs) - self.assertEqual(len(listener.results['started']), 1, msg) + self.assertEqual(len(listener.started_events), 1, msg) - @client_context.require_version_min(3, 5) @client_context.require_replica_set @client_context.require_test_commands def test_batch_splitting(self): """Test retry succeeds after failures during batch splitting.""" - large = 's' * 1024 * 1024 * 15 + large = "s" * 1024 * 1024 * 15 coll = self.db.retryable_write_test coll.delete_many({}) - self.listener.results.clear() - bulk_result = coll.bulk_write([ - InsertOne({'_id': 1, 'l': large}), - InsertOne({'_id': 2, 'l': large}), - InsertOne({'_id': 3, 'l': large}), - UpdateOne({'_id': 1, 'l': large}, - {'$unset': {'l': 1}, '$inc': {'count': 1}}), - UpdateOne({'_id': 2, 'l': large}, {'$set': {'foo': 'bar'}}), - DeleteOne({'l': large}), - DeleteOne({'l': large})]) + self.listener.reset() + bulk_result = coll.bulk_write( + [ + InsertOne({"_id": 1, "l": large}), + InsertOne({"_id": 2, "l": large}), + InsertOne({"_id": 3, "l": large}), + UpdateOne({"_id": 1, "l": large}, {"$unset": {"l": 1}, "$inc": {"count": 1}}), + UpdateOne({"_id": 2, "l": large}, {"$set": {"foo": "bar"}}), + DeleteOne({"l": large}), + DeleteOne({"l": large}), + ] + ) # Each command should fail and be retried. # With OP_MSG 3 inserts are one batch. 2 updates another. # 2 deletes a third. - self.assertEqual(len(self.listener.results['started']), 6) - self.assertEqual(coll.find_one(), {'_id': 1, 'count': 1}) + self.assertEqual(len(self.listener.started_events), 6) + self.assertEqual(coll.find_one(), {"_id": 1, "count": 1}) # Assert the final result expected_result = { "writeErrors": [], @@ -415,52 +419,232 @@ def test_batch_splitting(self): } self.assertEqual(bulk_result.bulk_api_result, expected_result) - @client_context.require_version_min(3, 5) @client_context.require_replica_set @client_context.require_test_commands def test_batch_splitting_retry_fails(self): """Test retry fails during batch splitting.""" - large = 's' * 1024 * 1024 * 15 + large = "s" * 1024 * 1024 * 15 coll = self.db.retryable_write_test coll.delete_many({}) - self.client.admin.command(SON([ - ('configureFailPoint', 'onPrimaryTransactionalWrite'), - ('mode', {'skip': 3}), # The number of _documents_ to skip. - ('data', {'failBeforeCommitExceptionCode': 1})])) - self.listener.results.clear() + self.client.admin.command( + SON( + [ + ("configureFailPoint", "onPrimaryTransactionalWrite"), + ("mode", {"skip": 3}), # The number of _documents_ to skip. + ("data", {"failBeforeCommitExceptionCode": 1}), + ] + ) + ) + self.listener.reset() with self.client.start_session() as session: initial_txn = session._server_session._transaction_id try: - coll.bulk_write([InsertOne({'_id': 1, 'l': large}), - InsertOne({'_id': 2, 'l': large}), - InsertOne({'_id': 3, 'l': large}), - InsertOne({'_id': 4, 'l': large})], - session=session) + coll.bulk_write( + [ + InsertOne({"_id": 1, "l": large}), + InsertOne({"_id": 2, "l": large}), + InsertOne({"_id": 3, "l": large}), + InsertOne({"_id": 4, "l": large}), + ], + session=session, + ) except ConnectionFailure: pass else: self.fail("bulk_write should have failed") - started = self.listener.results['started'] + started = self.listener.started_events self.assertEqual(len(started), 3) - self.assertEqual(len(self.listener.results['succeeded']), 1) + self.assertEqual(len(self.listener.succeeded_events), 1) expected_txn = Int64(initial_txn + 1) - self.assertEqual(started[0].command['txnNumber'], expected_txn) - self.assertEqual(started[0].command['lsid'], session.session_id) + self.assertEqual(started[0].command["txnNumber"], expected_txn) + self.assertEqual(started[0].command["lsid"], session.session_id) expected_txn = Int64(initial_txn + 2) - self.assertEqual(started[1].command['txnNumber'], expected_txn) - self.assertEqual(started[1].command['lsid'], session.session_id) - started[1].command.pop('$clusterTime') - started[2].command.pop('$clusterTime') + self.assertEqual(started[1].command["txnNumber"], expected_txn) + self.assertEqual(started[1].command["lsid"], session.session_id) + started[1].command.pop("$clusterTime") + started[2].command.pop("$clusterTime") self.assertEqual(started[1].command, started[2].command) final_txn = session._server_session._transaction_id self.assertEqual(final_txn, expected_txn) - self.assertEqual(coll.find_one(projection={'_id': True}), {'_id': 1}) + self.assertEqual(coll.find_one(projection={"_id": True}), {"_id": 1}) + + +class TestWriteConcernError(IntegrationTest): + RUN_ON_LOAD_BALANCER = True + RUN_ON_SERVERLESS = True + fail_insert: dict + + @classmethod + @client_context.require_replica_set + @client_context.require_no_mmap + @client_context.require_failCommand_fail_point + def setUpClass(cls): + super().setUpClass() + cls.fail_insert = { + "configureFailPoint": "failCommand", + "mode": {"times": 2}, + "data": { + "failCommands": ["insert"], + "writeConcernError": {"code": 91, "errmsg": "Replication is being shut down"}, + }, + } + + @client_context.require_version_min(4, 0) + @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) + def test_RetryableWriteError_error_label(self): + listener = OvertCommandListener() + client = rs_or_single_client(retryWrites=True, event_listeners=[listener]) + self.addCleanup(client.close) + + # Ensure collection exists. + client.pymongo_test.testcoll.insert_one({}) + + with self.fail_point(self.fail_insert): + with self.assertRaises(WriteConcernError) as cm: + client.pymongo_test.testcoll.insert_one({}) + self.assertTrue(cm.exception.has_error_label("RetryableWriteError")) + + if client_context.version >= Version(4, 4): + # In MongoDB 4.4+ we rely on the server returning the error label. + self.assertIn("RetryableWriteError", listener.succeeded_events[-1].reply["errorLabels"]) + + @client_context.require_version_min(4, 4) + def test_RetryableWriteError_error_label_RawBSONDocument(self): + # using RawBSONDocument should not cause errorLabel parsing to fail + with self.fail_point(self.fail_insert): + with self.client.start_session() as s: + s._start_retryable_write() + result = self.client.pymongo_test.command( + "insert", + "testcoll", + documents=[{"_id": 1}], + txnNumber=s._server_session.transaction_id, + session=s, + codec_options=DEFAULT_CODEC_OPTIONS.with_options( + document_class=RawBSONDocument + ), + ) + + self.assertIn("writeConcernError", result) + self.assertIn("RetryableWriteError", result["errorLabels"]) + + +class InsertThread(threading.Thread): + def __init__(self, collection): + super().__init__() + self.daemon = True + self.collection = collection + self.passed = False + + def run(self): + self.collection.insert_one({}) + self.passed = True + + +class TestPoolPausedError(IntegrationTest): + # Pools don't get paused in load balanced mode. + RUN_ON_LOAD_BALANCER = False + RUN_ON_SERVERLESS = False + + @client_context.require_failCommand_blockConnection + @client_context.require_retryable_writes + @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) + def test_pool_paused_error_is_retryable(self): + cmap_listener = CMAPListener() + cmd_listener = OvertCommandListener() + client = rs_or_single_client(maxPoolSize=1, event_listeners=[cmap_listener, cmd_listener]) + self.addCleanup(client.close) + for _ in range(10): + cmap_listener.reset() + cmd_listener.reset() + threads = [InsertThread(client.pymongo_test.test) for _ in range(2)] + fail_command = { + "mode": {"times": 1}, + "data": { + "failCommands": ["insert"], + "blockConnection": True, + "blockTimeMS": 1000, + "errorCode": 91, + "errorLabels": ["RetryableWriteError"], + }, + } + with self.fail_point(fail_command): + for thread in threads: + thread.start() + for thread in threads: + thread.join() + for thread in threads: + self.assertTrue(thread.passed) + # It's possible that SDAM can rediscover the server and mark the + # pool ready before the thread in the wait queue has a chance + # to run. Repeat the test until the thread actually encounters + # a PoolClearedError. + if cmap_listener.event_count(ConnectionCheckOutFailedEvent): + break + + # Via CMAP monitoring, assert that the first check out succeeds. + cmap_events = cmap_listener.events_by_type( + (ConnectionCheckedOutEvent, ConnectionCheckOutFailedEvent, PoolClearedEvent) + ) + msg = pprint.pformat(cmap_listener.events) + self.assertIsInstance(cmap_events[0], ConnectionCheckedOutEvent, msg) + self.assertIsInstance(cmap_events[1], PoolClearedEvent, msg) + self.assertIsInstance(cmap_events[2], ConnectionCheckOutFailedEvent, msg) + self.assertEqual(cmap_events[2].reason, ConnectionCheckOutFailedReason.CONN_ERROR, msg) + self.assertIsInstance(cmap_events[3], ConnectionCheckedOutEvent, msg) + + # Connection check out failures are not reflected in command + # monitoring because we only publish command events _after_ checking + # out a connection. + started = cmd_listener.started_events + msg = pprint.pformat(cmd_listener.results) + self.assertEqual(3, len(started), msg) + succeeded = cmd_listener.succeeded_events + self.assertEqual(2, len(succeeded), msg) + failed = cmd_listener.failed_events + self.assertEqual(1, len(failed), msg) + + @client_context.require_failCommand_fail_point + @client_context.require_replica_set + @client_context.require_version_min( + 6, 0, 0 + ) # the spec requires that this prose test only be run on 6.0+ + @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) + def test_returns_original_error_code( + self, + ): + cmd_listener = InsertEventListener() + client = rs_or_single_client(retryWrites=True, event_listeners=[cmd_listener]) + client.test.test.drop() + self.addCleanup(client.close) + cmd_listener.reset() + client.admin.command( + { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "writeConcernError": { + "code": 91, + "errorLabels": ["RetryableWriteError"], + }, + "failCommands": ["insert"], + }, + } + ) + with self.assertRaises(WriteConcernError) as exc: + client.test.test.insert_one({"_id": 1}) + self.assertEqual(exc.exception.code, 91) + client.admin.command( + { + "configureFailPoint": "failCommand", + "mode": "off", + } + ) # TODO: Make this a real integration test where we stepdown the primary. class TestRetryableWritesTxnNumber(IgnoreDeprecationsTest): - @client_context.require_version_min(3, 6) @client_context.require_replica_set @client_context.require_no_mmap def test_increment_transaction_id_without_sending_command(self): @@ -468,8 +652,8 @@ def test_increment_transaction_id_without_sending_command(self): the first attempt fails before sending the command. """ listener = OvertCommandListener() - client = rs_or_single_client( - retryWrites=True, event_listeners=[listener]) + client = rs_or_single_client(retryWrites=True, event_listeners=[listener]) + self.addCleanup(client.close) topology = client._topology select_server = topology.select_server @@ -477,28 +661,27 @@ def raise_connection_err_select_server(*args, **kwargs): # Raise ConnectionFailure on the first attempt and perform # normal selection on the retry attempt. topology.select_server = select_server - raise ConnectionFailure('Connection refused') + raise ConnectionFailure("Connection refused") - for method, args, kwargs in _retryable_single_statement_ops( - client.db.retryable_write_test): - listener.results.clear() + for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): + listener.reset() topology.select_server = raise_connection_err_select_server with client.start_session() as session: kwargs = copy.deepcopy(kwargs) - kwargs['session'] = session - msg = '%s(*%r, **%r)' % (method.__name__, args, kwargs) + kwargs["session"] = session + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" initial_txn_id = session._server_session.transaction_id # Each operation should fail on the first attempt and succeed # on the second. method(*args, **kwargs) - self.assertEqual(len(listener.results['started']), 1, msg) - retry_cmd = listener.results['started'][0].command - sent_txn_id = retry_cmd['txnNumber'] + self.assertEqual(len(listener.started_events), 1, msg) + retry_cmd = listener.started_events[0].command + sent_txn_id = retry_cmd["txnNumber"] final_txn_id = session._server_session.transaction_id self.assertEqual(Int64(initial_txn_id + 1), sent_txn_id, msg) self.assertEqual(sent_txn_id, final_txn_id, msg) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/test_retryable_writes_unified.py b/test/test_retryable_writes_unified.py new file mode 100644 index 0000000000..da16166ec6 --- /dev/null +++ b/test/test_retryable_writes_unified.py @@ -0,0 +1,33 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the Retryable Writes unified spec tests.""" +from __future__ import annotations + +import os +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.unified_format import generate_test_classes + +# Location of JSON test specifications. +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "retryable_writes", "unified") + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_run_command.py b/test/test_run_command.py new file mode 100644 index 0000000000..486a4c7e39 --- /dev/null +++ b/test/test_run_command.py @@ -0,0 +1,19 @@ +from __future__ import annotations + +import os +import unittest +from test.unified_format import generate_test_classes + +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "run_command") + + +globals().update( + generate_test_classes( + os.path.join(_TEST_PATH, "unified"), + module=__name__, + ) +) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_saslprep.py b/test/test_saslprep.py index 6fdf0452d8..e825cafa35 100644 --- a/test/test_saslprep.py +++ b/test/test_saslprep.py @@ -11,33 +11,34 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import sys -import warnings sys.path[0:0] = [""] -from pymongo.saslprep import saslprep from test import unittest -class TestSASLprep(unittest.TestCase): +from pymongo.saslprep import saslprep + +class TestSASLprep(unittest.TestCase): def test_saslprep(self): try: import stringprep except ImportError: - self.assertRaises(TypeError, saslprep, u"anything...") + self.assertRaises(TypeError, saslprep, "anything...") # Bytes strings are ignored. self.assertEqual(saslprep(b"user"), b"user") else: # Examples from RFC4013, Section 3. - self.assertEqual(saslprep(u"I\u00ADX"), u"IX") - self.assertEqual(saslprep(u"user"), u"user") - self.assertEqual(saslprep(u"USER"), u"USER") - self.assertEqual(saslprep(u"\u00AA"), u"a") - self.assertEqual(saslprep(u"\u2168"), u"IX") - self.assertRaises(ValueError, saslprep, u"\u0007") - self.assertRaises(ValueError, saslprep, u"\u0627\u0031") + self.assertEqual(saslprep("I\u00ADX"), "IX") + self.assertEqual(saslprep("user"), "user") + self.assertEqual(saslprep("USER"), "USER") + self.assertEqual(saslprep("\u00AA"), "a") + self.assertEqual(saslprep("\u2168"), "IX") + self.assertRaises(ValueError, saslprep, "\u0007") + self.assertRaises(ValueError, saslprep, "\u0627\u0031") # Bytes strings are ignored. self.assertEqual(saslprep(b"user"), b"user") diff --git a/test/test_sdam_monitoring_spec.py b/test/test_sdam_monitoring_spec.py index cb5ebcd5e3..f687eab313 100644 --- a/test/test_sdam_monitoring_spec.py +++ b/test/test_sdam_monitoring_spec.py @@ -13,54 +13,55 @@ # limitations under the License. """Run the sdam monitoring spec tests.""" +from __future__ import annotations import json import os import sys -import weakref +import time sys.path[0:0] = [""] +from test import IntegrationTest, client_context, client_knobs, unittest +from test.utils import ( + ServerAndTopologyEventListener, + rs_or_single_client, + server_name_to_type, + wait_until, +) + from bson.json_util import object_hook -from pymongo import monitoring -from pymongo import periodic_executor -from pymongo.ismaster import IsMaster +from pymongo import MongoClient, monitoring +from pymongo.collection import Collection +from pymongo.common import clean_node +from pymongo.errors import ConnectionFailure, NotPrimaryError +from pymongo.hello import Hello from pymongo.monitor import Monitor -from pymongo.read_preferences import MovingAverage from pymongo.server_description import ServerDescription -from pymongo.server_type import SERVER_TYPE from pymongo.topology_description import TOPOLOGY_TYPE -from test import unittest, client_context, client_knobs -from test.utils import (ServerAndTopologyEventListener, - single_client, - wait_until) # Location of JSON test specifications. -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), - 'sdam_monitoring') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sdam_monitoring") def compare_server_descriptions(expected, actual): - if ((not expected['address'] == "%s:%s" % actual.address) or - (not SERVER_TYPE.__getattribute__(expected['type']) == - actual.server_type)): + if (expected["address"] != "{}:{}".format(*actual.address)) or ( + server_name_to_type(expected["type"]) != actual.server_type + ): return False - expected_hosts = set( - expected['arbiters'] + expected['passives'] + expected['hosts']) - return expected_hosts == set("%s:%s" % s for s in actual.all_hosts) + expected_hosts = set(expected["arbiters"] + expected["passives"] + expected["hosts"]) + return expected_hosts == {"{}:{}".format(*s) for s in actual.all_hosts} def compare_topology_descriptions(expected, actual): - if not (TOPOLOGY_TYPE.__getattribute__( - expected['topologyType']) == actual.topology_type): + if TOPOLOGY_TYPE.__getattribute__(expected["topologyType"]) != actual.topology_type: return False - expected = expected['servers'] + expected = expected["servers"] actual = actual.server_descriptions() if len(expected) != len(actual): return False for exp_server in expected: - for address, actual_server in actual.items(): + for _address, actual_server in actual.items(): if compare_server_descriptions(exp_server, actual_server): break else: @@ -78,70 +79,74 @@ def compare_events(expected_dict, actual): if expected_type == "server_opening_event": if not isinstance(actual, monitoring.ServerOpeningEvent): - return False, "Expected ServerOpeningEvent, got %s" % ( - actual.__class__) - if not expected['address'] == "%s:%s" % actual.server_address: - return (False, - "ServerOpeningEvent published with wrong address (expected" - " %s, got %s" % (expected['address'], - actual.server_address)) + return False, "Expected ServerOpeningEvent, got %s" % (actual.__class__) + if expected["address"] != "{}:{}".format(*actual.server_address): + return ( + False, + "ServerOpeningEvent published with wrong address (expected" + " {}, got {}".format(expected["address"], actual.server_address), + ) elif expected_type == "server_description_changed_event": if not isinstance(actual, monitoring.ServerDescriptionChangedEvent): - return (False, - "Expected ServerDescriptionChangedEvent, got %s" % ( - actual.__class__)) - if not expected['address'] == "%s:%s" % actual.server_address: - return (False, "ServerDescriptionChangedEvent has wrong address" - " (expected %s, got %s" % (expected['address'], - actual.server_address)) - + return (False, "Expected ServerDescriptionChangedEvent, got %s" % (actual.__class__)) + if expected["address"] != "{}:{}".format(*actual.server_address): + return ( + False, + "ServerDescriptionChangedEvent has wrong address" + " (expected {}, got {}".format(expected["address"], actual.server_address), + ) + + if not compare_server_descriptions(expected["newDescription"], actual.new_description): + return (False, "New ServerDescription incorrect in ServerDescriptionChangedEvent") if not compare_server_descriptions( - expected['newDescription'], actual.new_description): - return (False, "New ServerDescription incorrect in" - " ServerDescriptionChangedEvent") - if not compare_server_descriptions(expected['previousDescription'], - actual.previous_description): - return (False, "Previous ServerDescription incorrect in" - " ServerDescriptionChangedEvent") + expected["previousDescription"], actual.previous_description + ): + return ( + False, + "Previous ServerDescription incorrect in ServerDescriptionChangedEvent", + ) elif expected_type == "server_closed_event": if not isinstance(actual, monitoring.ServerClosedEvent): - return False, "Expected ServerClosedEvent, got %s" % ( - actual.__class__) - if not expected['address'] == "%s:%s" % actual.server_address: - return (False, "ServerClosedEvent published with wrong address" - " (expected %s, got %s" % (expected['address'], - actual.server_address)) + return False, "Expected ServerClosedEvent, got %s" % (actual.__class__) + if expected["address"] != "{}:{}".format(*actual.server_address): + return ( + False, + "ServerClosedEvent published with wrong address" + " (expected {}, got {}".format(expected["address"], actual.server_address), + ) elif expected_type == "topology_opening_event": if not isinstance(actual, monitoring.TopologyOpenedEvent): - return False, "Expected TopologyOpeningEvent, got %s" % ( - actual.__class__) + return False, "Expected TopologyOpeningEvent, got %s" % (actual.__class__) elif expected_type == "topology_description_changed_event": if not isinstance(actual, monitoring.TopologyDescriptionChangedEvent): - return (False, "Expected TopologyDescriptionChangedEvent," - " got %s" % (actual.__class__)) - if not compare_topology_descriptions(expected['newDescription'], - actual.new_description): - return (False, "New TopologyDescription incorrect in " - "TopologyDescriptionChangedEvent") + return ( + False, + "Expected TopologyDescriptionChangedEvent, got %s" % (actual.__class__), + ) + if not compare_topology_descriptions(expected["newDescription"], actual.new_description): + return ( + False, + "New TopologyDescription incorrect in TopologyDescriptionChangedEvent", + ) if not compare_topology_descriptions( - expected['previousDescription'], - actual.previous_description): - return (False, "Previous TopologyDescription incorrect in" - " TopologyDescriptionChangedEvent") + expected["previousDescription"], actual.previous_description + ): + return ( + False, + "Previous TopologyDescription incorrect in TopologyDescriptionChangedEvent", + ) elif expected_type == "topology_closed_event": if not isinstance(actual, monitoring.TopologyClosedEvent): - return False, "Expected TopologyClosedEvent, got %s" % ( - actual.__class__) + return False, "Expected TopologyClosedEvent, got %s" % (actual.__class__) else: - return False, "Incorrect event: expected %s, actual %s" % ( - expected_type, actual) + return False, f"Incorrect event: expected {expected_type}, actual {actual}" return True, "" @@ -149,12 +154,10 @@ def compare_events(expected_dict, actual): def compare_multiple_events(i, expected_results, actual_results): events_in_a_row = [] j = i - while(j < len(expected_results) and isinstance( - actual_results[j], - actual_results[i].__class__)): + while j < len(expected_results) and isinstance(actual_results[j], actual_results[i].__class__): events_in_a_row.append(actual_results[j]) j += 1 - message = '' + message = "" for event in events_in_a_row: for k in range(i, j): passed, message = compare_events(expected_results[k], event) @@ -163,103 +166,82 @@ def compare_multiple_events(i, expected_results, actual_results): break else: return i, False, message - return j, True, '' + return j, True, "" -class TestAllScenarios(unittest.TestCase): - - @classmethod - @client_context.require_connection - def setUp(cls): - cls.all_listener = ServerAndTopologyEventListener() +class TestAllScenarios(IntegrationTest): + def setUp(self): + super().setUp() + self.all_listener = ServerAndTopologyEventListener() def create_test(scenario_def): def run_scenario(self): - responses = (r for r in scenario_def['phases'][0]['responses']) - with client_knobs(events_queue_frequency=0.1): - class MockMonitor(Monitor): - def __init__(self, server_description, topology, pool, - topology_settings): - """Have to copy entire constructor from Monitor so that we - can override _run and change the periodic executor's - interval.""" - - self._server_description = server_description - self._pool = pool - self._settings = topology_settings - self._avg_round_trip_time = MovingAverage() - options = self._settings._pool_options - self._listeners = options.event_listeners - self._publish = self._listeners is not None - - def target(): - monitor = self_ref() - if monitor is None: - return False - MockMonitor._run(monitor) # Change target to subclass - return True - - # Shorten interval - executor = periodic_executor.PeriodicExecutor( - interval=0.1, - min_interval=0.1, - target=target, - name="pymongo_server_monitor_thread") - self._executor = executor - self_ref = weakref.ref(self, executor.close) - self._topology = weakref.proxy(topology, executor.close) - - def _run(self): - try: - if self._server_description.address != ('a', 27017): - # Because PyMongo doesn't keep information about - # the order of addresses, we might accidentally - # start a MockMonitor on the wrong server first, - # so we need to only mock responses for the server - # the test's response is supposed to come from. - return - response = next(responses)[1] - isMaster = IsMaster(response) - self._server_description = ServerDescription( - address=self._server_description.address, - ismaster=isMaster) - self._topology.on_change(self._server_description) - except (ReferenceError, StopIteration): - # Topology was garbage-collected. - self.close() - - m = single_client(h=scenario_def['uri'], p=27017, - event_listeners=(self.all_listener,), - _monitor_class=MockMonitor) - - expected_results = scenario_def['phases'][0]['outcome']['events'] - - expected_len = len(expected_results) - wait_until(lambda: len(self.all_listener.results) >= expected_len, - "publish all events", timeout=15) + _run_scenario(self) - try: - i = 0 - while i < expected_len: - result = self.all_listener.results[i] if len( - self.all_listener.results) > i else None - # The order of ServerOpening/ClosedEvents doesn't matter - if (isinstance(result, - monitoring.ServerOpeningEvent) or - isinstance(result, - monitoring.ServerClosedEvent)): - i, passed, message = compare_multiple_events( - i, expected_results, self.all_listener.results) - self.assertTrue(passed, message) - else: - self.assertTrue( - *compare_events(expected_results[i], result)) - i += 1 + def _run_scenario(self): + class NoopMonitor(Monitor): + """Override the _run method to do nothing.""" + + def _run(self): + time.sleep(0.05) + m = MongoClient( + host=scenario_def["uri"], + port=27017, + event_listeners=[self.all_listener], + _monitor_class=NoopMonitor, + ) + topology = m._get_topology() + + try: + for phase in scenario_def["phases"]: + for (source, response) in phase.get("responses", []): + source_address = clean_node(source) + topology.on_change( + ServerDescription( + address=source_address, hello=Hello(response), round_trip_time=0 + ) + ) + + expected_results = phase["outcome"]["events"] + expected_len = len(expected_results) + wait_until( + lambda: len(self.all_listener.results) >= expected_len, + "publish all events", + timeout=15, + ) + + # Wait some time to catch possible lagging extra events. + time.sleep(0.5) + + i = 0 + while i < expected_len: + result = ( + self.all_listener.results[i] if len(self.all_listener.results) > i else None + ) + # The order of ServerOpening/ClosedEvents doesn't matter + if isinstance( + result, (monitoring.ServerOpeningEvent, monitoring.ServerClosedEvent) + ): + i, passed, message = compare_multiple_events( + i, expected_results, self.all_listener.results + ) + self.assertTrue(passed, message) + else: + self.assertTrue(*compare_events(expected_results[i], result)) + i += 1 + + # Assert no extra events. + extra_events = self.all_listener.results[expected_len:] + if extra_events: + self.fail(f"Extra events {extra_events!r}") + + self.all_listener.reset() finally: m.close() + return run_scenario @@ -267,16 +249,114 @@ def create_tests(): for dirpath, _, filenames in os.walk(_TEST_PATH): for filename in filenames: with open(os.path.join(dirpath, filename)) as scenario_stream: - scenario_def = json.load( - scenario_stream, object_hook=object_hook) + scenario_def = json.load(scenario_stream, object_hook=object_hook) # Construct test from scenario. new_test = create_test(scenario_def) - test_name = 'test_%s' % (os.path.splitext(filename)[0],) + test_name = f"test_{os.path.splitext(filename)[0]}" new_test.__name__ = test_name setattr(TestAllScenarios, new_test.__name__, new_test) create_tests() + +class TestSdamMonitoring(IntegrationTest): + knobs: client_knobs + listener: ServerAndTopologyEventListener + test_client: MongoClient + coll: Collection + + @classmethod + @client_context.require_failCommand_fail_point + def setUpClass(cls): + super().setUpClass() + # Speed up the tests by decreasing the event publish frequency. + cls.knobs = client_knobs(events_queue_frequency=0.1) + cls.knobs.enable() + cls.listener = ServerAndTopologyEventListener() + retry_writes = client_context.supports_transactions() + cls.test_client = rs_or_single_client( + event_listeners=[cls.listener], retryWrites=retry_writes + ) + cls.coll = cls.test_client[cls.client.db.name].test + cls.coll.insert_one({}) + + @classmethod + def tearDownClass(cls): + cls.test_client.close() + cls.knobs.disable() + super().tearDownClass() + + def setUp(self): + self.listener.reset() + + def _test_app_error(self, fail_command_opts, expected_error): + address = self.test_client.address + + # Test that an application error causes a ServerDescriptionChangedEvent + # to be published. + data = {"failCommands": ["insert"]} + data.update(fail_command_opts) + fail_insert = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": data, + } + with self.fail_point(fail_insert): + if self.test_client.options.retry_writes: + self.coll.insert_one({}) + else: + with self.assertRaises(expected_error): + self.coll.insert_one({}) + self.coll.insert_one({}) + + def marked_unknown(event): + return ( + isinstance(event, monitoring.ServerDescriptionChangedEvent) + and event.server_address == address + and not event.new_description.is_server_type_known + ) + + def discovered_node(event): + return ( + isinstance(event, monitoring.ServerDescriptionChangedEvent) + and event.server_address == address + and not event.previous_description.is_server_type_known + and event.new_description.is_server_type_known + ) + + def marked_unknown_and_rediscovered(): + return ( + len(self.listener.matching(marked_unknown)) >= 1 + and len(self.listener.matching(discovered_node)) >= 1 + ) + + # Topology events are published asynchronously + wait_until(marked_unknown_and_rediscovered, "rediscover node") + + # Expect a single ServerDescriptionChangedEvent for the network error. + marked_unknown_events = self.listener.matching(marked_unknown) + self.assertEqual(len(marked_unknown_events), 1, marked_unknown_events) + self.assertIsInstance(marked_unknown_events[0].new_description.error, expected_error) + + def test_network_error_publishes_events(self): + self._test_app_error({"closeConnection": True}, ConnectionFailure) + + # In 4.4+, not primary errors from failCommand don't cause SDAM state + # changes because topologyVersion is not incremented. + @client_context.require_version_max(4, 3) + def test_not_primary_error_publishes_events(self): + self._test_app_error( + {"errorCode": 10107, "closeConnection": False, "errorLabels": ["RetryableWriteError"]}, + NotPrimaryError, + ) + + def test_shutdown_error_publishes_events(self): + self._test_app_error( + {"errorCode": 91, "closeConnection": False, "errorLabels": ["RetryableWriteError"]}, + NotPrimaryError, + ) + + if __name__ == "__main__": unittest.main() diff --git a/test/test_server.py b/test/test_server.py index d6b92e2fde..1d71a614d3 100644 --- a/test/test_server.py +++ b/test/test_server.py @@ -13,23 +13,25 @@ # limitations under the License. """Test the server module.""" +from __future__ import annotations import sys sys.path[0:0] = [""] -from pymongo.ismaster import IsMaster +from test import unittest + +from pymongo.hello import Hello from pymongo.server import Server from pymongo.server_description import ServerDescription -from test import unittest class TestServer(unittest.TestCase): def test_repr(self): - ismaster = IsMaster({'ok': 1}) - sd = ServerDescription(('localhost', 27017), ismaster) - server = Server(sd, pool=object(), monitor=object()) - self.assertTrue('Standalone' in str(server)) + hello = Hello({"ok": 1}) + sd = ServerDescription(("localhost", 27017), hello) + server = Server(sd, pool=object(), monitor=object()) # type: ignore[arg-type] + self.assertTrue("Standalone" in str(server)) if __name__ == "__main__": diff --git a/test/test_server_description.py b/test/test_server_description.py index a79b9875e7..ee05e95cf8 100644 --- a/test/test_server_description.py +++ b/test/test_server_description.py @@ -13,109 +13,119 @@ # limitations under the License. """Test the server_description module.""" +from __future__ import annotations import sys sys.path[0:0] = [""] -from pymongo.server_type import SERVER_TYPE -from pymongo.ismaster import IsMaster -from pymongo.server_description import ServerDescription from test import unittest -address = ('localhost', 27017) +from bson.int64 import Int64 +from bson.objectid import ObjectId +from pymongo.hello import Hello, HelloCompat +from pymongo.server_description import ServerDescription +from pymongo.server_type import SERVER_TYPE +address = ("localhost", 27017) -def parse_ismaster_response(doc): - ismaster_response = IsMaster(doc) - return ServerDescription(address, ismaster_response) + +def parse_hello_response(doc): + hello_response = Hello(doc) + return ServerDescription(address, hello_response) class TestServerDescription(unittest.TestCase): def test_unknown(self): - # Default, no ismaster_response. + # Default, no hello_response. s = ServerDescription(address) self.assertEqual(SERVER_TYPE.Unknown, s.server_type) self.assertFalse(s.is_writable) self.assertFalse(s.is_readable) def test_mongos(self): - s = parse_ismaster_response({'ok': 1, 'msg': 'isdbgrid'}) + s = parse_hello_response({"ok": 1, "msg": "isdbgrid"}) self.assertEqual(SERVER_TYPE.Mongos, s.server_type) - self.assertEqual('Mongos', s.server_type_name) + self.assertEqual("Mongos", s.server_type_name) self.assertTrue(s.is_writable) self.assertTrue(s.is_readable) def test_primary(self): - s = parse_ismaster_response( - {'ok': 1, 'ismaster': True, 'setName': 'rs'}) + s = parse_hello_response({"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs"}) self.assertEqual(SERVER_TYPE.RSPrimary, s.server_type) - self.assertEqual('RSPrimary', s.server_type_name) + self.assertEqual("RSPrimary", s.server_type_name) self.assertTrue(s.is_writable) self.assertTrue(s.is_readable) def test_secondary(self): - s = parse_ismaster_response( - {'ok': 1, 'ismaster': False, 'secondary': True, 'setName': 'rs'}) + s = parse_hello_response( + {"ok": 1, HelloCompat.LEGACY_CMD: False, "secondary": True, "setName": "rs"} + ) self.assertEqual(SERVER_TYPE.RSSecondary, s.server_type) - self.assertEqual('RSSecondary', s.server_type_name) + self.assertEqual("RSSecondary", s.server_type_name) self.assertFalse(s.is_writable) self.assertTrue(s.is_readable) def test_arbiter(self): - s = parse_ismaster_response( - {'ok': 1, 'ismaster': False, 'arbiterOnly': True, 'setName': 'rs'}) + s = parse_hello_response( + {"ok": 1, HelloCompat.LEGACY_CMD: False, "arbiterOnly": True, "setName": "rs"} + ) self.assertEqual(SERVER_TYPE.RSArbiter, s.server_type) - self.assertEqual('RSArbiter', s.server_type_name) + self.assertEqual("RSArbiter", s.server_type_name) self.assertFalse(s.is_writable) self.assertFalse(s.is_readable) def test_other(self): - s = parse_ismaster_response( - {'ok': 1, 'ismaster': False, 'setName': 'rs'}) + s = parse_hello_response({"ok": 1, HelloCompat.LEGACY_CMD: False, "setName": "rs"}) self.assertEqual(SERVER_TYPE.RSOther, s.server_type) - self.assertEqual('RSOther', s.server_type_name) - - s = parse_ismaster_response({ - 'ok': 1, - 'ismaster': False, - 'secondary': True, - 'hidden': True, - 'setName': 'rs'}) + self.assertEqual("RSOther", s.server_type_name) + + s = parse_hello_response( + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "hidden": True, + "setName": "rs", + } + ) self.assertEqual(SERVER_TYPE.RSOther, s.server_type) self.assertFalse(s.is_writable) self.assertFalse(s.is_readable) def test_ghost(self): - s = parse_ismaster_response({'ok': 1, 'isreplicaset': True}) + s = parse_hello_response({"ok": 1, "isreplicaset": True}) self.assertEqual(SERVER_TYPE.RSGhost, s.server_type) - self.assertEqual('RSGhost', s.server_type_name) + self.assertEqual("RSGhost", s.server_type_name) self.assertFalse(s.is_writable) self.assertFalse(s.is_readable) def test_fields(self): - s = parse_ismaster_response({ - 'ok': 1, - 'ismaster': False, - 'secondary': True, - 'primary': 'a:27017', - 'tags': {'a': 'foo', 'b': 'baz'}, - 'maxMessageSizeBytes': 1, - 'maxBsonObjectSize': 2, - 'maxWriteBatchSize': 3, - 'minWireVersion': 4, - 'maxWireVersion': 5, - 'setName': 'rs'}) + s = parse_hello_response( + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "primary": "a:27017", + "tags": {"a": "foo", "b": "baz"}, + "maxMessageSizeBytes": 1, + "maxBsonObjectSize": 2, + "maxWriteBatchSize": 3, + "minWireVersion": 4, + "maxWireVersion": 5, + "setName": "rs", + } + ) self.assertEqual(SERVER_TYPE.RSSecondary, s.server_type) - self.assertEqual(('a', 27017), s.primary) - self.assertEqual({'a': 'foo', 'b': 'baz'}, s.tags) + self.assertEqual(("a", 27017), s.primary) + self.assertEqual({"a": "foo", "b": "baz"}, s.tags) self.assertEqual(1, s.max_message_size) self.assertEqual(2, s.max_bson_size) self.assertEqual(3, s.max_write_batch_size) @@ -123,42 +133,72 @@ def test_fields(self): self.assertEqual(5, s.max_wire_version) def test_default_max_message_size(self): - s = parse_ismaster_response({ - 'ok': 1, - 'ismaster': True, - 'maxBsonObjectSize': 2}) + s = parse_hello_response({"ok": 1, HelloCompat.LEGACY_CMD: True, "maxBsonObjectSize": 2}) # Twice max_bson_size. self.assertEqual(4, s.max_message_size) def test_standalone(self): - s = parse_ismaster_response({'ok': 1, 'ismaster': True}) + s = parse_hello_response({"ok": 1, HelloCompat.LEGACY_CMD: True}) self.assertEqual(SERVER_TYPE.Standalone, s.server_type) # Mongod started with --slave. - s = parse_ismaster_response({'ok': 1, 'ismaster': False}) + # master-slave replication was removed in MongoDB 4.0. + s = parse_hello_response({"ok": 1, HelloCompat.LEGACY_CMD: False}) self.assertEqual(SERVER_TYPE.Standalone, s.server_type) self.assertTrue(s.is_writable) self.assertTrue(s.is_readable) def test_ok_false(self): - s = parse_ismaster_response({'ok': 0, 'ismaster': True}) + s = parse_hello_response({"ok": 0, HelloCompat.LEGACY_CMD: True}) self.assertEqual(SERVER_TYPE.Unknown, s.server_type) self.assertFalse(s.is_writable) self.assertFalse(s.is_readable) def test_all_hosts(self): - s = parse_ismaster_response({ - 'ok': 1, - 'ismaster': True, - 'hosts': ['a'], - 'passives': ['b:27018'], - 'arbiters': ['c'] - }) - + s = parse_hello_response( + { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "hosts": ["a"], + "passives": ["b:27018"], + "arbiters": ["c"], + } + ) + + self.assertEqual([("a", 27017), ("b", 27018), ("c", 27017)], sorted(s.all_hosts)) + + def test_repr(self): + s = parse_hello_response({"ok": 1, "msg": "isdbgrid"}) self.assertEqual( - [('a', 27017), ('b', 27018), ('c', 27017)], - sorted(s.all_hosts)) + repr(s), "" + ) + + def test_topology_version(self): + topology_version = {"processId": ObjectId(), "counter": Int64("0")} + s = parse_hello_response( + { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "setName": "rs", + "topologyVersion": topology_version, + } + ) + + self.assertEqual(SERVER_TYPE.RSPrimary, s.server_type) + self.assertEqual(topology_version, s.topology_version) + + # Resetting a server to unknown preserves topology_version. + s_unknown = s.to_unknown() + self.assertEqual(SERVER_TYPE.Unknown, s_unknown.server_type) + self.assertEqual(topology_version, s_unknown.topology_version) + + def test_topology_version_not_present(self): + # No topologyVersion field. + s = parse_hello_response({"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs"}) + + self.assertEqual(SERVER_TYPE.RSPrimary, s.server_type) + self.assertEqual(None, s.topology_version) if __name__ == "__main__": diff --git a/test/test_server_selection.py b/test/test_server_selection.py index fc8f643163..01f19ad87f 100644 --- a/test/test_server_selection.py +++ b/test/test_server_selection.py @@ -13,35 +13,45 @@ # limitations under the License. """Test the topology module's Server Selection Spec implementation.""" +from __future__ import annotations import os import sys -from pymongo import MongoClient -from pymongo import ReadPreference +from pymongo import MongoClient, ReadPreference from pymongo.errors import ServerSelectionTimeoutError +from pymongo.hello import HelloCompat from pymongo.server_selectors import writable_server_selector from pymongo.settings import TopologySettings from pymongo.topology import Topology +from pymongo.typings import strip_optional sys.path[0:0] = [""] -from test import client_context, unittest, IntegrationTest -from test.utils import (rs_or_single_client, wait_until, EventListener, - FunctionCallRecorder) +from test import IntegrationTest, client_context, unittest +from test.utils import ( + EventListener, + FunctionCallRecorder, + rs_or_single_client, + wait_until, +) from test.utils_selection_tests import ( - create_selection_tests, get_addresses, get_topology_settings_dict, - make_server_description) - + create_selection_tests, + get_addresses, + get_topology_settings_dict, + make_server_description, +) # Location of JSON test specifications. _TEST_PATH = os.path.join( os.path.dirname(os.path.realpath(__file__)), - os.path.join('server_selection', 'server_selection')) + os.path.join("server_selection", "server_selection"), +) -class SelectionStoreSelector(object): +class SelectionStoreSelector: """No-op selector that keeps track of what was passed to it.""" + def __init__(self): self.selection = None @@ -50,8 +60,7 @@ def __call__(self, selection): return selection - -class TestAllScenarios(create_selection_tests(_TEST_PATH)): +class TestAllScenarios(create_selection_tests(_TEST_PATH)): # type: ignore pass @@ -66,37 +75,36 @@ def custom_selector(servers): # Initialize client with appropriate listeners. listener = EventListener() - client = rs_or_single_client( - server_selector=custom_selector, event_listeners=[listener]) + client = rs_or_single_client(server_selector=custom_selector, event_listeners=[listener]) self.addCleanup(client.close) - coll = client.get_database( - 'testdb', read_preference=ReadPreference.NEAREST).coll - self.addCleanup(client.drop_database, 'testdb') + coll = client.get_database("testdb", read_preference=ReadPreference.NEAREST).coll + self.addCleanup(client.drop_database, "testdb") # Wait the node list to be fully populated. def all_hosts_started(): - return (len(client.admin.command('isMaster')['hosts']) == - len(client._topology._description.readable_servers)) + return len(client.admin.command(HelloCompat.LEGACY_CMD)["hosts"]) == len( + client._topology._description.readable_servers + ) - wait_until(all_hosts_started, 'receive heartbeat from all hosts') - expected_port = max([ - n.address[1] - for n in client._topology._description.readable_servers]) + wait_until(all_hosts_started, "receive heartbeat from all hosts") + + expected_port = max( + [strip_optional(n.address[1]) for n in client._topology._description.readable_servers] + ) # Insert 1 record and access it 10 times. - coll.insert_one({'name': 'John Doe'}) + coll.insert_one({"name": "John Doe"}) for _ in range(10): - coll.find_one({'name': 'John Doe'}) + coll.find_one({"name": "John Doe"}) # Confirm all find commands are run against appropriate host. - for command in listener.results['started']: - if command.command_name == 'find': - self.assertEqual( - command.connection_id[1], expected_port) + for command in listener.started_events: + if command.command_name == "find": + self.assertEqual(command.connection_id[1], expected_port) def test_invalid_server_selector(self): # Client initialization must fail if server_selector is not callable. - for selector_candidate in [list(), 10, 'string', {}]: + for selector_candidate in [[], 10, "string", {}]: with self.assertRaisesRegex(ValueError, "must be a callable"): MongoClient(connect=False, server_selector=selector_candidate) @@ -110,101 +118,82 @@ def test_selector_called(self): # Client setup. mongo_client = rs_or_single_client(server_selector=selector) test_collection = mongo_client.testdb.test_collection - self.addCleanup(mongo_client.drop_database, 'testdb') self.addCleanup(mongo_client.close) + self.addCleanup(mongo_client.drop_database, "testdb") # Do N operations and test selector is called at least N times. - test_collection.insert_one({'age': 20, 'name': 'John'}) - test_collection.insert_one({'age': 31, 'name': 'Jane'}) - test_collection.update_one({'name': 'Jane'}, {'$set': {'age': 21}}) - test_collection.find_one({'name': 'Roe'}) + test_collection.insert_one({"age": 20, "name": "John"}) + test_collection.insert_one({"age": 31, "name": "Jane"}) + test_collection.update_one({"name": "Jane"}, {"$set": {"age": 21}}) + test_collection.find_one({"name": "Roe"}) self.assertGreaterEqual(selector.call_count, 4) @client_context.require_replica_set def test_latency_threshold_application(self): selector = SelectionStoreSelector() - scenario_def = { - 'topology_description': { - 'type': 'ReplicaSetWithPrimary', 'servers': [ - {'address': 'b:27017', - 'avg_rtt_ms': 10000, - 'type': 'RSSecondary', - 'tag': {}}, - {'address': 'c:27017', - 'avg_rtt_ms': 20000, - 'type': 'RSSecondary', - 'tag': {}}, - {'address': 'a:27017', - 'avg_rtt_ms': 30000, - 'type': 'RSPrimary', - 'tag': {}}, - ]}} + scenario_def: dict = { + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + {"address": "b:27017", "avg_rtt_ms": 10000, "type": "RSSecondary", "tag": {}}, + {"address": "c:27017", "avg_rtt_ms": 20000, "type": "RSSecondary", "tag": {}}, + {"address": "a:27017", "avg_rtt_ms": 30000, "type": "RSPrimary", "tag": {}}, + ], + } + } # Create & populate Topology such that all but one server is too slow. - rtt_times = [srv['avg_rtt_ms'] for srv in - scenario_def['topology_description']['servers']] + rtt_times = [srv["avg_rtt_ms"] for srv in scenario_def["topology_description"]["servers"]] min_rtt_idx = rtt_times.index(min(rtt_times)) - seeds, hosts = get_addresses( - scenario_def["topology_description"]["servers"]) + seeds, hosts = get_addresses(scenario_def["topology_description"]["servers"]) settings = get_topology_settings_dict( - heartbeat_frequency=1, local_threshold_ms=1, seeds=seeds, - server_selector=selector) + heartbeat_frequency=1, local_threshold_ms=1, seeds=seeds, server_selector=selector + ) topology = Topology(TopologySettings(**settings)) topology.open() - for server in scenario_def['topology_description']['servers']: + for server in scenario_def["topology_description"]["servers"]: server_description = make_server_description(server, hosts) topology.on_change(server_description) # Invoke server selection and assert no filtering based on latency # prior to custom server selection logic kicking in. server = topology.select_server(ReadPreference.NEAREST) - self.assertEqual( - len(selector.selection), - len(topology.description.server_descriptions())) + assert selector.selection is not None + self.assertEqual(len(selector.selection), len(topology.description.server_descriptions())) # Ensure proper filtering based on latency after custom selection. - self.assertEqual( - server.description.address, seeds[min_rtt_idx]) + self.assertEqual(server.description.address, seeds[min_rtt_idx]) @client_context.require_replica_set def test_server_selector_bypassed(self): selector = FunctionCallRecorder(lambda x: x) scenario_def = { - 'topology_description': { - 'type': 'ReplicaSetNoPrimary', 'servers': [ - {'address': 'b:27017', - 'avg_rtt_ms': 10000, - 'type': 'RSSecondary', - 'tag': {}}, - {'address': 'c:27017', - 'avg_rtt_ms': 20000, - 'type': 'RSSecondary', - 'tag': {}}, - {'address': 'a:27017', - 'avg_rtt_ms': 30000, - 'type': 'RSSecondary', - 'tag': {}}, - ]}} + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + {"address": "b:27017", "avg_rtt_ms": 10000, "type": "RSSecondary", "tag": {}}, + {"address": "c:27017", "avg_rtt_ms": 20000, "type": "RSSecondary", "tag": {}}, + {"address": "a:27017", "avg_rtt_ms": 30000, "type": "RSSecondary", "tag": {}}, + ], + } + } # Create & populate Topology such that no server is writeable. - seeds, hosts = get_addresses( - scenario_def["topology_description"]["servers"]) + seeds, hosts = get_addresses(scenario_def["topology_description"]["servers"]) settings = get_topology_settings_dict( - heartbeat_frequency=1, local_threshold_ms=1, seeds=seeds, - server_selector=selector) + heartbeat_frequency=1, local_threshold_ms=1, seeds=seeds, server_selector=selector + ) topology = Topology(TopologySettings(**settings)) topology.open() - for server in scenario_def['topology_description']['servers']: + for server in scenario_def["topology_description"]["servers"]: server_description = make_server_description(server, hosts) topology.on_change(server_description) # Invoke server selection and assert no calls to our custom selector. - with self.assertRaisesRegex( - ServerSelectionTimeoutError, 'No primary available for writes'): - topology.select_server( - writable_server_selector, server_selection_timeout=0.1) + with self.assertRaisesRegex(ServerSelectionTimeoutError, "No primary available for writes"): + topology.select_server(writable_server_selector, server_selection_timeout=0.1) self.assertEqual(selector.call_count, 0) diff --git a/test/test_server_selection_in_window.py b/test/test_server_selection_in_window.py new file mode 100644 index 0000000000..52873882f0 --- /dev/null +++ b/test/test_server_selection_in_window.py @@ -0,0 +1,169 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the topology module's Server Selection Spec implementation.""" +from __future__ import annotations + +import os +import threading +from test import IntegrationTest, client_context, unittest +from test.utils import ( + OvertCommandListener, + SpecTestCreator, + get_pool, + rs_client, + wait_until, +) +from test.utils_selection_tests import create_topology + +from pymongo.common import clean_node +from pymongo.read_preferences import ReadPreference + +# Location of JSON test specifications. +TEST_PATH = os.path.join( + os.path.dirname(os.path.realpath(__file__)), os.path.join("server_selection", "in_window") +) + + +class TestAllScenarios(unittest.TestCase): + def run_scenario(self, scenario_def): + topology = create_topology(scenario_def) + + # Update mock operation_count state: + for mock in scenario_def["mocked_topology_state"]: + address = clean_node(mock["address"]) + server = topology.get_server_by_address(address) + server.pool.operation_count = mock["operation_count"] + + pref = ReadPreference.NEAREST + counts = {address: 0 for address in topology._description.server_descriptions()} + + # Number of times to repeat server selection + iterations = scenario_def["iterations"] + for _ in range(iterations): + server = topology.select_server(pref, server_selection_timeout=0) + counts[server.description.address] += 1 + + # Verify expected_frequencies + outcome = scenario_def["outcome"] + tolerance = outcome["tolerance"] + expected_frequencies = outcome["expected_frequencies"] + for host_str, freq in expected_frequencies.items(): + address = clean_node(host_str) + actual_freq = float(counts[address]) / iterations + if freq == 0: + # Should be exactly 0. + self.assertEqual(actual_freq, 0) + else: + # Should be within 'tolerance'. + self.assertAlmostEqual(actual_freq, freq, delta=tolerance) + + +def create_test(scenario_def, test, name): + def run_scenario(self): + self.run_scenario(scenario_def) + + return run_scenario + + +class CustomSpecTestCreator(SpecTestCreator): + def tests(self, scenario_def): + """Extract the tests from a spec file. + + Server selection in_window tests do not have a 'tests' field. + The whole file represents a single test case. + """ + return [scenario_def] + + +CustomSpecTestCreator(create_test, TestAllScenarios, TEST_PATH).create_tests() + + +class FinderThread(threading.Thread): + def __init__(self, collection, iterations): + super().__init__() + self.daemon = True + self.collection = collection + self.iterations = iterations + self.passed = False + + def run(self): + for _ in range(self.iterations): + self.collection.find_one({}) + self.passed = True + + +class TestProse(IntegrationTest): + def frequencies(self, client, listener, n_finds=10): + coll = client.test.test + N_THREADS = 10 + threads = [FinderThread(coll, n_finds) for _ in range(N_THREADS)] + for thread in threads: + thread.start() + for thread in threads: + thread.join() + for thread in threads: + self.assertTrue(thread.passed) + + events = listener.started_events + self.assertEqual(len(events), n_finds * N_THREADS) + nodes = client.nodes + self.assertEqual(len(nodes), 2) + freqs = {address: 0.0 for address in nodes} + for event in events: + freqs[event.connection_id] += 1 + for address in freqs: + freqs[address] = freqs[address] / float(len(events)) + return freqs + + @client_context.require_failCommand_appName + @client_context.require_multiple_mongoses + def test_load_balancing(self): + listener = OvertCommandListener() + # PYTHON-2584: Use a large localThresholdMS to avoid the impact of + # varying RTTs. + client = rs_client( + client_context.mongos_seeds(), + appName="loadBalancingTest", + event_listeners=[listener], + localThresholdMS=30000, + minPoolSize=10, + ) + self.addCleanup(client.close) + wait_until(lambda: len(client.nodes) == 2, "discover both nodes") + wait_until(lambda: len(get_pool(client).conns) >= 10, "create 10 connections") + # Delay find commands on + delay_finds = { + "configureFailPoint": "failCommand", + "mode": {"times": 10000}, + "data": { + "failCommands": ["find"], + "blockConnection": True, + "blockTimeMS": 500, + "appName": "loadBalancingTest", + }, + } + with self.fail_point(delay_finds): + nodes = client_context.client.nodes + self.assertEqual(len(nodes), 1) + delayed_server = next(iter(nodes)) + freqs = self.frequencies(client, listener) + self.assertLessEqual(freqs[delayed_server], 0.25) + listener.reset() + freqs = self.frequencies(client, listener, n_finds=100) + self.assertAlmostEqual(freqs[delayed_server], 0.50, delta=0.15) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_server_selection_rtt.py b/test/test_server_selection_rtt.py index f914e03030..a129af4585 100644 --- a/test/test_server_selection_rtt.py +++ b/test/test_server_selection_rtt.py @@ -13,6 +13,7 @@ # limitations under the License. """Test the topology module.""" +from __future__ import annotations import json import os @@ -21,11 +22,11 @@ sys.path[0:0] = [""] from test import unittest + from pymongo.read_preferences import MovingAverage # Location of JSON test specifications. -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'server_selection/rtt') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "server_selection/rtt") class TestAllScenarios(unittest.TestCase): @@ -36,14 +37,13 @@ def create_test(scenario_def): def run_scenario(self): moving_average = MovingAverage() - if scenario_def['avg_rtt_ms'] != "NULL": - moving_average.add_sample(scenario_def['avg_rtt_ms']) + if scenario_def["avg_rtt_ms"] != "NULL": + moving_average.add_sample(scenario_def["avg_rtt_ms"]) - if scenario_def['new_rtt_ms'] != "NULL": - moving_average.add_sample(scenario_def['new_rtt_ms']) + if scenario_def["new_rtt_ms"] != "NULL": + moving_average.add_sample(scenario_def["new_rtt_ms"]) - self.assertAlmostEqual(moving_average.get(), - scenario_def['new_avg_rtt']) + self.assertAlmostEqual(moving_average.get(), scenario_def["new_avg_rtt"]) return run_scenario @@ -58,8 +58,7 @@ def create_tests(): # Construct test from scenario. new_test = create_test(scenario_def) - test_name = 'test_%s_%s' % ( - dirname, os.path.splitext(filename)[0]) + test_name = f"test_{dirname}_{os.path.splitext(filename)[0]}" new_test.__name__ = test_name setattr(TestAllScenarios, new_test.__name__, new_test) diff --git a/test/test_session.py b/test/test_session.py index 02352022ee..c95691be15 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -13,47 +13,55 @@ # limitations under the License. """Test the client_session module.""" +from __future__ import annotations import copy -import os import sys +import time +from io import BytesIO +from typing import Any, Callable, List, Set, Tuple + +from pymongo.mongo_client import MongoClient + +sys.path[0:0] = [""] + +from test import IntegrationTest, SkipTest, client_context, unittest +from test.utils import ( + EventListener, + ExceptionCatchingThread, + rs_or_single_client, + wait_until, +) from bson import DBRef -from bson.py3compat import StringIO from gridfs import GridFS, GridFSBucket -from pymongo import ASCENDING, InsertOne, IndexModel, OFF, monitoring +from pymongo import ASCENDING, IndexModel, InsertOne, monitoring +from pymongo.command_cursor import CommandCursor from pymongo.common import _MAX_END_SESSIONS -from pymongo.errors import (ConfigurationError, - InvalidOperation, - OperationFailure) -from pymongo.monotonic import time as _time +from pymongo.cursor import Cursor +from pymongo.errors import ConfigurationError, InvalidOperation, OperationFailure +from pymongo.operations import UpdateOne from pymongo.read_concern import ReadConcern -from test import IntegrationTest, client_context, db_user, db_pwd, unittest, SkipTest -from test.utils import (ignore_deprecations, - rs_or_single_client, - EventListener, - TestCreator) -from test.utils_spec_runner import SpecRunner + # Ignore auth commands like saslStart, so we can assert lsid is in all commands. class SessionTestListener(EventListener): def started(self, event): - if not event.command_name.startswith('sasl'): - super(SessionTestListener, self).started(event) + if not event.command_name.startswith("sasl"): + super().started(event) def succeeded(self, event): - if not event.command_name.startswith('sasl'): - super(SessionTestListener, self).succeeded(event) + if not event.command_name.startswith("sasl"): + super().succeeded(event) def failed(self, event): - if not event.command_name.startswith('sasl'): - super(SessionTestListener, self).failed(event) + if not event.command_name.startswith("sasl"): + super().failed(event) def first_command_started(self): - assert len(self.results['started']) >= 1, ( - "No command-started events") + assert len(self.started_events) >= 1, "No command-started events" - return self.results['started'][0] + return self.started_events[0] def session_ids(client): @@ -61,11 +69,13 @@ def session_ids(client): class TestSession(IntegrationTest): + client2: MongoClient + sensitive_commands: Set[str] @classmethod @client_context.require_sessions def setUpClass(cls): - super(TestSession, cls).setUpClass() + super().setUpClass() # Create a second client so we can make sure clients cannot share # sessions. cls.client2 = rs_or_single_client() @@ -77,54 +87,57 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): monitoring._SENSITIVE_COMMANDS.update(cls.sensitive_commands) - super(TestSession, cls).tearDownClass() + cls.client2.close() + super().tearDownClass() def setUp(self): self.listener = SessionTestListener() self.session_checker_listener = SessionTestListener() self.client = rs_or_single_client( - event_listeners=[self.listener, self.session_checker_listener]) + event_listeners=[self.listener, self.session_checker_listener] + ) + self.addCleanup(self.client.close) self.db = self.client.pymongo_test - self.initial_lsids = set(s['id'] for s in session_ids(self.client)) + self.initial_lsids = {s["id"] for s in session_ids(self.client)} def tearDown(self): """All sessions used in the test must be returned to the pool.""" - self.client.drop_database('pymongo_test') + self.client.drop_database("pymongo_test") used_lsids = self.initial_lsids.copy() - for event in self.session_checker_listener.results['started']: - if 'lsid' in event.command: - used_lsids.add(event.command['lsid']['id']) + for event in self.session_checker_listener.started_events: + if "lsid" in event.command: + used_lsids.add(event.command["lsid"]["id"]) - current_lsids = set(s['id'] for s in session_ids(self.client)) + current_lsids = {s["id"] for s in session_ids(self.client)} self.assertLessEqual(used_lsids, current_lsids) def _test_ops(self, client, *ops): - listener = client.event_listeners()[0][0] + listener = client.options.event_listeners[0] for f, args, kw in ops: with client.start_session() as s: last_use = s._server_session.last_use - start = _time() + start = time.monotonic() self.assertLessEqual(last_use, start) - listener.results.clear() + listener.reset() # In case "f" modifies its inputs. args = copy.copy(args) kw = copy.copy(kw) - kw['session'] = s + kw["session"] = s f(*args, **kw) self.assertGreaterEqual(s._server_session.last_use, start) - self.assertGreaterEqual(len(listener.results['started']), 1) - for event in listener.results['started']: + self.assertGreaterEqual(len(listener.started_events), 1) + for event in listener.started_events: self.assertTrue( - 'lsid' in event.command, - "%s sent no lsid with %s" % ( - f.__name__, event.command_name)) + "lsid" in event.command, + f"{f.__name__} sent no lsid with {event.command_name}", + ) self.assertEqual( s.session_id, - event.command['lsid'], - "%s sent wrong lsid with %s" % ( - f.__name__, event.command_name)) + event.command["lsid"], + f"{f.__name__} sent wrong lsid with {event.command_name}", + ) self.assertFalse(s.has_ended) @@ -137,35 +150,90 @@ def _test_ops(self, client, *ops): # In case "f" modifies its inputs. args = copy.copy(args) kw = copy.copy(kw) - kw['session'] = s + kw["session"] = s with self.assertRaisesRegex( - InvalidOperation, - 'Can only use session with the MongoClient' - ' that started it'): + InvalidOperation, "Can only use session with the MongoClient that started it" + ): f(*args, **kw) # No explicit session. for f, args, kw in ops: - listener.results.clear() + listener.reset() f(*args, **kw) - self.assertGreaterEqual(len(listener.results['started']), 1) + self.assertGreaterEqual(len(listener.started_events), 1) lsids = [] - for event in listener.results['started']: + for event in listener.started_events: self.assertTrue( - 'lsid' in event.command, - "%s sent no lsid with %s" % ( - f.__name__, event.command_name)) + "lsid" in event.command, + f"{f.__name__} sent no lsid with {event.command_name}", + ) - lsids.append(event.command['lsid']) + lsids.append(event.command["lsid"]) - if not (sys.platform.startswith('java') or 'PyPy' in sys.version): + if not (sys.platform.startswith("java") or "PyPy" in sys.version): # Server session was returned to pool. Ignore interpreters with # non-deterministic GC. for lsid in lsids: self.assertIn( - lsid, session_ids(client), - "%s did not return implicit session to pool" % ( - f.__name__,)) + lsid, + session_ids(client), + f"{f.__name__} did not return implicit session to pool", + ) + + def test_implicit_sessions_checkout(self): + # "To confirm that implicit sessions only allocate their server session after a + # successful connection checkout" test from Driver Sessions Spec. + succeeded = False + lsid_set = set() + failures = 0 + for _ in range(5): + listener = EventListener() + client = rs_or_single_client(event_listeners=[listener], maxPoolSize=1) + cursor = client.db.test.find({}) + ops: List[Tuple[Callable, List[Any]]] = [ + (client.db.test.find_one, [{"_id": 1}]), + (client.db.test.delete_one, [{}]), + (client.db.test.update_one, [{}, {"$set": {"x": 2}}]), + (client.db.test.bulk_write, [[UpdateOne({}, {"$set": {"x": 2}})]]), + (client.db.test.find_one_and_delete, [{}]), + (client.db.test.find_one_and_update, [{}, {"$set": {"x": 1}}]), + (client.db.test.find_one_and_replace, [{}, {}]), + (client.db.test.aggregate, [[{"$limit": 1}]]), + (client.db.test.find, []), + (client.server_info, []), + (client.db.aggregate, [[{"$listLocalSessions": {}}, {"$limit": 1}]]), + (cursor.distinct, ["_id"]), + (client.db.list_collections, []), + ] + threads = [] + listener.reset() + + def thread_target(op, *args): + res = op(*args) + if isinstance(res, (Cursor, CommandCursor)): + list(res) + + for op, args in ops: + threads.append( + ExceptionCatchingThread( + target=thread_target, args=[op, *args], name=op.__name__ + ) + ) + threads[-1].start() + self.assertEqual(len(threads), len(ops)) + for thread in threads: + thread.join() + self.assertIsNone(thread.exc) + client.close() + lsid_set.clear() + for i in listener.started_events: + if i.command.get("lsid"): + lsid_set.add(i.command.get("lsid")["id"]) + if len(lsid_set) == 1: + succeeded = True + else: + failures += 1 + self.assertTrue(succeeded, lsid_set) def test_pool_lifo(self): # "Pool is LIFO" test from Driver Sessions Spec. @@ -205,85 +273,45 @@ def test_end_sessions(self): listener = SessionTestListener() client = rs_or_single_client(event_listeners=[listener]) # Start many sessions. - sessions = [client.start_session() - for _ in range(_MAX_END_SESSIONS + 1)] + sessions = [client.start_session() for _ in range(_MAX_END_SESSIONS + 1)] for s in sessions: s.end_session() # Closing the client should end all sessions and clear the pool. - self.assertEqual(len(client._topology._session_pool), - _MAX_END_SESSIONS + 1) + self.assertEqual(len(client._topology._session_pool), _MAX_END_SESSIONS + 1) client.close() self.assertEqual(len(client._topology._session_pool), 0) - end_sessions = [e for e in listener.results['started'] - if e.command_name == 'endSessions'] + end_sessions = [e for e in listener.started_events if e.command_name == "endSessions"] self.assertEqual(len(end_sessions), 2) # Closing again should not send any commands. - listener.results.clear() + listener.reset() client.close() - self.assertEqual(len(listener.results['started']), 0) + self.assertEqual(len(listener.started_events), 0) def test_client(self): client = self.client - - # Make sure if the test fails we unlock the server. - def unlock(): - try: - client.unlock() - except OperationFailure: - pass - - self.addCleanup(unlock) - - ops = [ + ops: list = [ (client.server_info, [], {}), - (client.database_names, [], {}), - (client.drop_database, ['pymongo_test'], {}), + (client.list_database_names, [], {}), + (client.drop_database, ["pymongo_test"], {}), ] - if not client_context.is_mongos: - ops.extend([ - (client.fsync, [], {'lock': True}), - (client.unlock, [], {}), - ]) - self._test_ops(client, *ops) def test_database(self): client = self.client db = client.pymongo_test - ops = [ - (db.command, ['ping'], {}), - (db.create_collection, ['collection'], {}), - (db.collection_names, [], {}), + ops: list = [ + (db.command, ["ping"], {}), + (db.create_collection, ["collection"], {}), (db.list_collection_names, [], {}), - (db.validate_collection, ['collection'], {}), - (db.drop_collection, ['collection'], {}), - (db.current_op, [], {}), - (db.profiling_info, [], {}), - (db.dereference, [DBRef('collection', 1)], {}), + (db.validate_collection, ["collection"], {}), + (db.drop_collection, ["collection"], {}), + (db.dereference, [DBRef("collection", 1)], {}), ] - - if not client_context.is_mongos: - ops.append((db.set_profiling_level, [OFF], {})) - ops.append((db.profiling_level, [], {})) - self._test_ops(client, *ops) - @client_context.require_auth - @ignore_deprecations - def test_user_admin(self): - client = self.client - db = client.pymongo_test - - self._test_ops( - client, - (db.add_user, ['session-test', 'pass'], {'roles': ['read']}), - # Do it again to test updateUser command. - (db.add_user, ['session-test', 'pass'], {'roles': ['read']}), - (db.remove_user, ['session-test'], {})) - @staticmethod def collection_write_ops(coll): """Generate database write ops for tests.""" @@ -293,21 +321,19 @@ def collection_write_ops(coll): (coll.insert_one, [{}], {}), (coll.insert_many, [[{}, {}]], {}), (coll.replace_one, [{}, {}], {}), - (coll.update_one, [{}, {'$set': {'a': 1}}], {}), - (coll.update_many, [{}, {'$set': {'a': 1}}], {}), + (coll.update_one, [{}, {"$set": {"a": 1}}], {}), + (coll.update_many, [{}, {"$set": {"a": 1}}], {}), (coll.delete_one, [{}], {}), (coll.delete_many, [{}], {}), - (coll.map_reduce, - ['function() {}', 'function() {}', 'output'], {}), (coll.find_one_and_replace, [{}, {}], {}), - (coll.find_one_and_update, [{}, {'$set': {'a': 1}}], {}), + (coll.find_one_and_update, [{}, {"$set": {"a": 1}}], {}), (coll.find_one_and_delete, [{}, {}], {}), - (coll.rename, ['collection2'], {}), + (coll.rename, ["collection2"], {}), # Drop collection2 between tests of "rename", above. - (coll.database.drop_collection, ['collection2'], {}), - (coll.create_indexes, [[IndexModel('a')]], {}), - (coll.create_index, ['a'], {}), - (coll.drop_index, ['a_1'], {}), + (coll.database.drop_collection, ["collection2"], {}), + (coll.create_indexes, [[IndexModel("a")]], {}), + (coll.create_index, ["a"], {}), + (coll.drop_index, ["a_1"], {}), (coll.drop_indexes, [], {}), (coll.aggregate, [[{"$out": "aggout"}]], {}), ] @@ -318,57 +344,20 @@ def test_collection(self): # Test some collection methods - the rest are in test_cursor. ops = self.collection_write_ops(coll) - ops.extend([ - (coll.distinct, ['a'], {}), - (coll.find_one, [], {}), - (coll.count, [], {}), - (coll.count_documents, [{}], {}), - (coll.inline_map_reduce, ['function() {}', 'function() {}'], {}), - (coll.list_indexes, [], {}), - (coll.index_information, [], {}), - (coll.options, [], {}), - (coll.aggregate, [[]], {}), - ]) - - if client_context.supports_reindex: - ops.append((coll.reindex, [], {})) + ops.extend( + [ + (coll.distinct, ["a"], {}), + (coll.find_one, [], {}), + (coll.count_documents, [{}], {}), + (coll.list_indexes, [], {}), + (coll.index_information, [], {}), + (coll.options, [], {}), + (coll.aggregate, [[]], {}), + ] + ) self._test_ops(client, *ops) - @client_context.require_no_mongos - @client_context.require_version_max(4, 1, 0) - @ignore_deprecations - def test_parallel_collection_scan(self): - listener = self.listener - client = self.client - coll = client.pymongo_test.collection - coll.insert_many([{'_id': i} for i in range(1000)]) - - listener.results.clear() - - def scan(session=None): - cursors = coll.parallel_scan(4, session=session) - for c in cursors: - c.batch_size(2) - list(c) - - listener.results.clear() - with client.start_session() as session: - scan(session) - cursor_lsids = {} - for event in listener.results['started']: - self.assertIn( - 'lsid', event.command, - "parallel_scan sent no lsid with %s" % (event.command_name, )) - - if event.command_name == 'getMore': - cursor_id = event.command['getMore'] - if cursor_id in cursor_lsids: - self.assertEqual(cursor_lsids[cursor_id], - event.command['lsid']) - else: - cursor_lsids[cursor_id] = event.command['lsid'] - def test_cursor_clone(self): coll = self.client.pymongo_test.collection # Ensure some batches. @@ -403,101 +392,96 @@ def test_cursor(self): # Test all cursor methods. ops = [ - ('find', lambda session: list(coll.find(session=session))), - ('getitem', lambda session: coll.find(session=session)[0]), - ('count', lambda session: coll.find(session=session).count()), - ('distinct', - lambda session: coll.find(session=session).distinct('a')), - ('explain', lambda session: coll.find(session=session).explain()), + ("find", lambda session: list(coll.find(session=session))), + ("getitem", lambda session: coll.find(session=session)[0]), + ("distinct", lambda session: coll.find(session=session).distinct("a")), + ("explain", lambda session: coll.find(session=session).explain()), ] for name, f in ops: with client.start_session() as s: - listener.results.clear() + listener.reset() f(session=s) - self.assertGreaterEqual(len(listener.results['started']), 1) - for event in listener.results['started']: + self.assertGreaterEqual(len(listener.started_events), 1) + for event in listener.started_events: self.assertTrue( - 'lsid' in event.command, - "%s sent no lsid with %s" % ( - name, event.command_name)) + "lsid" in event.command, + f"{name} sent no lsid with {event.command_name}", + ) self.assertEqual( s.session_id, - event.command['lsid'], - "%s sent wrong lsid with %s" % ( - name, event.command_name)) + event.command["lsid"], + f"{name} sent wrong lsid with {event.command_name}", + ) with self.assertRaisesRegex(InvalidOperation, "ended session"): f(session=s) # No explicit session. for name, f in ops: - listener.results.clear() + listener.reset() f(session=None) event0 = listener.first_command_started() self.assertTrue( - 'lsid' in event0.command, - "%s sent no lsid with %s" % ( - name, event0.command_name)) + "lsid" in event0.command, f"{name} sent no lsid with {event0.command_name}" + ) - lsid = event0.command['lsid'] + lsid = event0.command["lsid"] - for event in listener.results['started'][1:]: + for event in listener.started_events[1:]: self.assertTrue( - 'lsid' in event.command, - "%s sent no lsid with %s" % ( - name, event.command_name)) + "lsid" in event.command, f"{name} sent no lsid with {event.command_name}" + ) self.assertEqual( lsid, - event.command['lsid'], - "%s sent wrong lsid with %s" % ( - name, event.command_name)) + event.command["lsid"], + f"{name} sent wrong lsid with {event.command_name}", + ) def test_gridfs(self): client = self.client fs = GridFS(client.pymongo_test) def new_file(session=None): - grid_file = fs.new_file(_id=1, filename='f', session=session) + grid_file = fs.new_file(_id=1, filename="f", session=session) # 1 MB, 5 chunks, to test that each chunk is fetched with same lsid. - grid_file.write(b'a' * 1048576) + grid_file.write(b"a" * 1048576) grid_file.close() def find(session=None): - files = list(fs.find({'_id': 1}, session=session)) + files = list(fs.find({"_id": 1}, session=session)) for f in files: f.read() self._test_ops( client, (new_file, [], {}), - (fs.put, [b'data'], {}), + (fs.put, [b"data"], {}), (lambda session=None: fs.get(1, session=session).read(), [], {}), - (lambda session=None: fs.get_version('f', session=session).read(), - [], {}), - (lambda session=None: - fs.get_last_version('f', session=session).read(), [], {}), + (lambda session=None: fs.get_version("f", session=session).read(), [], {}), + (lambda session=None: fs.get_last_version("f", session=session).read(), [], {}), (fs.list, [], {}), (fs.find_one, [1], {}), (lambda session=None: list(fs.find(session=session)), [], {}), (fs.exists, [1], {}), (find, [], {}), - (fs.delete, [1], {})) + (fs.delete, [1], {}), + ) def test_gridfs_bucket(self): client = self.client bucket = GridFSBucket(client.pymongo_test) def upload(session=None): - stream = bucket.open_upload_stream('f', session=session) - stream.write(b'a' * 1048576) + stream = bucket.open_upload_stream("f", session=session) + stream.write(b"a" * 1048576) stream.close() def upload_with_id(session=None): - stream = bucket.open_upload_stream_with_id(1, 'f1', session=session) - stream.write(b'a' * 1048576) + stream = bucket.open_upload_stream_with_id(1, "f1", session=session) + stream.write(b"a" * 1048576) stream.close() def open_download_stream(session=None): @@ -505,31 +489,32 @@ def open_download_stream(session=None): stream.read() def open_download_stream_by_name(session=None): - stream = bucket.open_download_stream_by_name('f', session=session) + stream = bucket.open_download_stream_by_name("f", session=session) stream.read() def find(session=None): - files = list(bucket.find({'_id': 1}, session=session)) + files = list(bucket.find({"_id": 1}, session=session)) for f in files: f.read() - sio = StringIO() + sio = BytesIO() self._test_ops( client, (upload, [], {}), (upload_with_id, [], {}), - (bucket.upload_from_stream, ['f', b'data'], {}), - (bucket.upload_from_stream_with_id, [2, 'f', b'data'], {}), + (bucket.upload_from_stream, ["f", b"data"], {}), + (bucket.upload_from_stream_with_id, [2, "f", b"data"], {}), (open_download_stream, [], {}), (open_download_stream_by_name, [], {}), (bucket.download_to_stream, [1, sio], {}), - (bucket.download_to_stream_by_name, ['f', sio], {}), + (bucket.download_to_stream_by_name, ["f", sio], {}), (find, [], {}), - (bucket.rename, [1, 'f2'], {}), + (bucket.rename, [1, "f2"], {}), # Delete both files so _test_ops can run these operations twice. (bucket.delete, [1], {}), - (bucket.delete, [2], {})) + (bucket.delete, [2], {}), + ) def test_gridfsbucket_cursor(self): client = self.client @@ -537,7 +522,7 @@ def test_gridfsbucket_cursor(self): for file_id in 1, 2: stream = bucket.open_upload_stream_with_id(file_id, str(file_id)) - stream.write(b'a' * 1048576) + stream.write(b"a" * 1048576) stream.close() with client.start_session() as s: @@ -567,6 +552,7 @@ def test_gridfsbucket_cursor(self): # Explicit session. with client.start_session() as s: cursor = bucket.find(session=s) + assert cursor.session is not None s = cursor.session files = list(cursor) cursor.__del__() @@ -586,10 +572,7 @@ def test_aggregate(self): coll = client.pymongo_test.collection def agg(session=None): - list(coll.aggregate( - [], - batchSize=2, - session=session)) + list(coll.aggregate([], batchSize=2, session=session)) # With empty collection. self._test_ops(client, (agg, [], {})) @@ -618,14 +601,14 @@ def test_aggregate_error(self): # 3.6.0 mongos only validates the aggregate pipeline when the # database exists. coll.insert_one({}) - listener.results.clear() + listener.reset() with self.assertRaises(OperationFailure): - coll.aggregate([{'$badOperation': {'bar': 1}}]) + coll.aggregate([{"$badOperation": {"bar": 1}}]) event = listener.first_command_started() - self.assertEqual(event.command_name, 'aggregate') - lsid = event.command['lsid'] + self.assertEqual(event.command_name, "aggregate") + lsid = event.command["lsid"] # Session was returned to pool despite error. self.assertIn(lsid, session_ids(client)) @@ -636,7 +619,7 @@ def _test_cursor_helper(self, create_cursor, close_cursor): cursor = create_cursor(coll, None) next(cursor) # Session is "owned" by cursor. - session = getattr(cursor, '_%s__session' % cursor.__class__.__name__) + session = getattr(cursor, "_%s__session" % cursor.__class__.__name__) self.assertIsNotNone(session) lsid = session.session_id next(cursor) @@ -659,154 +642,192 @@ def _test_cursor_helper(self, create_cursor, close_cursor): def test_cursor_close(self): self._test_cursor_helper( - lambda coll, session: coll.find(session=session), - lambda cursor: cursor.close()) + lambda coll, session: coll.find(session=session), lambda cursor: cursor.close() + ) def test_command_cursor_close(self): self._test_cursor_helper( - lambda coll, session: coll.aggregate([], session=session), - lambda cursor: cursor.close()) + lambda coll, session: coll.aggregate([], session=session), lambda cursor: cursor.close() + ) def test_cursor_del(self): self._test_cursor_helper( - lambda coll, session: coll.find(session=session), - lambda cursor: cursor.__del__()) + lambda coll, session: coll.find(session=session), lambda cursor: cursor.__del__() + ) def test_command_cursor_del(self): self._test_cursor_helper( lambda coll, session: coll.aggregate([], session=session), - lambda cursor: cursor.__del__()) + lambda cursor: cursor.__del__(), + ) def test_cursor_exhaust(self): self._test_cursor_helper( - lambda coll, session: coll.find(session=session), - lambda cursor: list(cursor)) + lambda coll, session: coll.find(session=session), lambda cursor: list(cursor) + ) def test_command_cursor_exhaust(self): self._test_cursor_helper( - lambda coll, session: coll.aggregate([], session=session), - lambda cursor: list(cursor)) + lambda coll, session: coll.aggregate([], session=session), lambda cursor: list(cursor) + ) def test_cursor_limit_reached(self): self._test_cursor_helper( - lambda coll, session: coll.find(limit=4, batch_size=2, - session=session), - lambda cursor: list(cursor)) + lambda coll, session: coll.find(limit=4, batch_size=2, session=session), + lambda cursor: list(cursor), + ) def test_command_cursor_limit_reached(self): self._test_cursor_helper( - lambda coll, session: coll.aggregate([], batchSize=900, - session=session), - lambda cursor: list(cursor)) + lambda coll, session: coll.aggregate([], batchSize=900, session=session), + lambda cursor: list(cursor), + ) def _test_unacknowledged_ops(self, client, *ops): - listener = client.event_listeners()[0][0] + listener = client.options.event_listeners[0] for f, args, kw in ops: with client.start_session() as s: - listener.results.clear() + listener.reset() # In case "f" modifies its inputs. args = copy.copy(args) kw = copy.copy(kw) - kw['session'] = s + kw["session"] = s with self.assertRaises( - ConfigurationError, - msg="%s did not raise ConfigurationError" % ( - f.__name__,)): + ConfigurationError, msg=f"{f.__name__} did not raise ConfigurationError" + ): f(*args, **kw) - if f.__name__ == 'create_collection': + if f.__name__ == "create_collection": # create_collection runs listCollections first. - event = listener.results['started'].pop(0) - self.assertEqual('listCollections', event.command_name) - self.assertIn('lsid', event.command, - "%s sent no lsid with %s" % ( - f.__name__, event.command_name)) + event = listener.started_events.pop(0) + self.assertEqual("listCollections", event.command_name) + self.assertIn( + "lsid", + event.command, + f"{f.__name__} sent no lsid with {event.command_name}", + ) # Should not run any command before raising an error. - self.assertFalse(listener.results['started'], - "%s sent command" % (f.__name__,)) + self.assertFalse(listener.started_events, f"{f.__name__} sent command") self.assertTrue(s.has_ended) # Unacknowledged write without a session does not send an lsid. for f, args, kw in ops: - listener.results.clear() + listener.reset() f(*args, **kw) - self.assertGreaterEqual(len(listener.results['started']), 1) + self.assertGreaterEqual(len(listener.started_events), 1) - if f.__name__ == 'create_collection': + if f.__name__ == "create_collection": # create_collection runs listCollections first. - event = listener.results['started'].pop(0) - self.assertEqual('listCollections', event.command_name) - self.assertIn('lsid', event.command, - "%s sent no lsid with %s" % ( - f.__name__, event.command_name)) - - for event in listener.results['started']: - self.assertNotIn('lsid', event.command, - "%s sent lsid with %s" % ( - f.__name__, event.command_name)) + event = listener.started_events.pop(0) + self.assertEqual("listCollections", event.command_name) + self.assertIn( + "lsid", + event.command, + f"{f.__name__} sent no lsid with {event.command_name}", + ) + + for event in listener.started_events: + self.assertNotIn( + "lsid", event.command, f"{f.__name__} sent lsid with {event.command_name}" + ) def test_unacknowledged_writes(self): # Ensure the collection exists. self.client.pymongo_test.test_unacked_writes.insert_one({}) client = rs_or_single_client(w=0, event_listeners=[self.listener]) + self.addCleanup(client.close) db = client.pymongo_test coll = db.test_unacked_writes - ops = [ + ops: list = [ (client.drop_database, [db.name], {}), - (db.create_collection, ['collection'], {}), - (db.drop_collection, ['collection'], {}), + (db.create_collection, ["collection"], {}), + (db.drop_collection, ["collection"], {}), ] ops.extend(self.collection_write_ops(coll)) self._test_unacknowledged_ops(client, *ops) + def drop_db(): + try: + self.client.drop_database(db.name) + return True + except OperationFailure as exc: + # Try again on BackgroundOperationInProgressForDatabase and + # BackgroundOperationInProgressForNamespace. + if exc.code in (12586, 12587): + return False + raise + + wait_until(drop_db, "dropped database after w=0 writes") + + def test_snapshot_incompatible_with_causal_consistency(self): + with self.client.start_session(causal_consistency=False, snapshot=False): + pass + with self.client.start_session(causal_consistency=False, snapshot=True): + pass + with self.client.start_session(causal_consistency=True, snapshot=False): + pass + with self.assertRaises(ConfigurationError): + with self.client.start_session(causal_consistency=True, snapshot=True): + pass + + def test_session_not_copyable(self): + client = self.client + with client.start_session() as s: + self.assertRaises(TypeError, lambda: copy.copy(s)) + class TestCausalConsistency(unittest.TestCase): + listener: SessionTestListener + client: MongoClient @classmethod def setUpClass(cls): cls.listener = SessionTestListener() cls.client = rs_or_single_client(event_listeners=[cls.listener]) + @classmethod + def tearDownClass(cls): + cls.client.close() + @client_context.require_sessions def setUp(self): - super(TestCausalConsistency, self).setUp() + super().setUp() @client_context.require_no_standalone def test_core(self): with self.client.start_session() as sess: self.assertIsNone(sess.cluster_time) self.assertIsNone(sess.operation_time) - self.listener.results.clear() + self.listener.reset() self.client.pymongo_test.test.find_one(session=sess) - started = self.listener.results['started'][0] + started = self.listener.started_events[0] cmd = started.command - self.assertIsNone(cmd.get('readConcern')) + self.assertIsNone(cmd.get("readConcern")) op_time = sess.operation_time self.assertIsNotNone(op_time) - succeeded = self.listener.results['succeeded'][0] + succeeded = self.listener.succeeded_events[0] reply = succeeded.reply - self.assertEqual(op_time, reply.get('operationTime')) + self.assertEqual(op_time, reply.get("operationTime")) # No explicit session self.client.pymongo_test.test.insert_one({}) self.assertEqual(sess.operation_time, op_time) - self.listener.results.clear() + self.listener.reset() try: - self.client.pymongo_test.command('doesntexist', session=sess) + self.client.pymongo_test.command("doesntexist", session=sess) except: pass - failed = self.listener.results['failed'][0] - failed_op_time = failed.failure.get('operationTime') + failed = self.listener.failed_events[0] + failed_op_time = failed.failure.get("operationTime") # Some older builds of MongoDB 3.5 / 3.6 return None for # operationTime when a command fails. Make sure we don't # change operation_time to None. if failed_op_time is None: self.assertIsNotNone(sess.operation_time) else: - self.assertEqual( - sess.operation_time, failed_op_time) + self.assertEqual(sess.operation_time, failed_op_time) with self.client.start_session() as sess2: self.assertIsNone(sess2.cluster_time) @@ -815,6 +836,8 @@ def test_core(self): self.assertRaises(ValueError, sess2.advance_cluster_time, {}) self.assertRaises(TypeError, sess2.advance_operation_time, 1) # No error + assert sess.cluster_time is not None + assert sess.operation_time is not None sess2.advance_cluster_time(sess.cluster_time) sess2.advance_operation_time(sess.operation_time) self.assertEqual(sess.cluster_time, sess2.cluster_time) @@ -826,72 +849,38 @@ def _test_reads(self, op, exception=None): coll.find_one({}, session=sess) operation_time = sess.operation_time self.assertIsNotNone(operation_time) - self.listener.results.clear() + self.listener.reset() if exception: with self.assertRaises(exception): op(coll, sess) else: op(coll, sess) - act = self.listener.results['started'][0].command.get( - 'readConcern', {}).get('afterClusterTime') + act = ( + self.listener.started_events[0] + .command.get("readConcern", {}) + .get("afterClusterTime") + ) self.assertEqual(operation_time, act) @client_context.require_no_standalone def test_reads(self): # Make sure the collection exists. self.client.pymongo_test.test.insert_one({}) + self._test_reads(lambda coll, session: list(coll.aggregate([], session=session))) + self._test_reads(lambda coll, session: list(coll.find({}, session=session))) + self._test_reads(lambda coll, session: coll.find_one({}, session=session)) + self._test_reads(lambda coll, session: coll.count_documents({}, session=session)) + self._test_reads(lambda coll, session: coll.distinct("foo", session=session)) self._test_reads( - lambda coll, session: list(coll.aggregate([], session=session))) - self._test_reads( - lambda coll, session: list(coll.find({}, session=session))) - self._test_reads( - lambda coll, session: coll.find_one({}, session=session)) - self._test_reads( - lambda coll, session: coll.count(session=session)) - self._test_reads( - lambda coll, session: coll.count_documents({}, session=session)) - self._test_reads( - lambda coll, session: coll.distinct('foo', session=session)) - - # SERVER-40938 removed support for casually consistent mapReduce. - map_reduce_exc = None - if client_context.version.at_least(4, 1, 12): - map_reduce_exc = OperationFailure - # SERVER-44635 The mapReduce in aggregation project added back - # support for casually consistent mapReduce. - if client_context.version < (4, 3): - self._test_reads( - lambda coll, session: coll.map_reduce( - 'function() {}', 'function() {}', 'inline', session=session), - exception=map_reduce_exc) - self._test_reads( - lambda coll, session: coll.inline_map_reduce( - 'function() {}', 'function() {}', session=session), - exception=map_reduce_exc) - if (not client_context.is_mongos and - not client_context.version.at_least(4, 1, 0)): - def scan(coll, session): - cursors = coll.parallel_scan(1, session=session) - for cur in cursors: - list(cur) - self._test_reads( - lambda coll, session: scan(coll, session=session)) + lambda coll, session: list(coll.aggregate_raw_batches([], session=session)) + ) + self._test_reads(lambda coll, session: list(coll.find_raw_batches({}, session=session))) self.assertRaises( ConfigurationError, self._test_reads, - lambda coll, session: list( - coll.aggregate_raw_batches([], session=session))) - self.assertRaises( - ConfigurationError, - self._test_reads, - lambda coll, session: list( - coll.find_raw_batches({}, session=session))) - self.assertRaises( - ConfigurationError, - self._test_reads, - lambda coll, session: coll.estimated_document_count( - session=session)) + lambda coll, session: coll.estimated_document_count(session=session), + ) def _test_writes(self, op): coll = self.client.pymongo_test.test @@ -899,56 +888,50 @@ def _test_writes(self, op): op(coll, sess) operation_time = sess.operation_time self.assertIsNotNone(operation_time) - self.listener.results.clear() + self.listener.reset() coll.find_one({}, session=sess) - act = self.listener.results['started'][0].command.get( - 'readConcern', {}).get('afterClusterTime') + act = ( + self.listener.started_events[0] + .command.get("readConcern", {}) + .get("afterClusterTime") + ) self.assertEqual(operation_time, act) @client_context.require_no_standalone def test_writes(self): self._test_writes( - lambda coll, session: coll.bulk_write( - [InsertOne({})], session=session)) - self._test_writes( - lambda coll, session: coll.insert_one({}, session=session)) - self._test_writes( - lambda coll, session: coll.insert_many([{}], session=session)) - self._test_writes( - lambda coll, session: coll.replace_one( - {'_id': 1}, {'x': 1}, session=session)) - self._test_writes( - lambda coll, session: coll.update_one( - {}, {'$set': {'X': 1}}, session=session)) + lambda coll, session: coll.bulk_write([InsertOne[dict]({})], session=session) + ) + self._test_writes(lambda coll, session: coll.insert_one({}, session=session)) + self._test_writes(lambda coll, session: coll.insert_many([{}], session=session)) self._test_writes( - lambda coll, session: coll.update_many( - {}, {'$set': {'x': 1}}, session=session)) + lambda coll, session: coll.replace_one({"_id": 1}, {"x": 1}, session=session) + ) self._test_writes( - lambda coll, session: coll.delete_one({}, session=session)) + lambda coll, session: coll.update_one({}, {"$set": {"X": 1}}, session=session) + ) self._test_writes( - lambda coll, session: coll.delete_many({}, session=session)) + lambda coll, session: coll.update_many({}, {"$set": {"x": 1}}, session=session) + ) + self._test_writes(lambda coll, session: coll.delete_one({}, session=session)) + self._test_writes(lambda coll, session: coll.delete_many({}, session=session)) self._test_writes( - lambda coll, session: coll.find_one_and_replace( - {'x': 1}, {'y': 1}, session=session)) + lambda coll, session: coll.find_one_and_replace({"x": 1}, {"y": 1}, session=session) + ) self._test_writes( lambda coll, session: coll.find_one_and_update( - {'y': 1}, {'$set': {'x': 1}}, session=session)) - self._test_writes( - lambda coll, session: coll.find_one_and_delete( - {'x': 1}, session=session)) - self._test_writes( - lambda coll, session: coll.create_index("foo", session=session)) + {"y": 1}, {"$set": {"x": 1}}, session=session + ) + ) + self._test_writes(lambda coll, session: coll.find_one_and_delete({"x": 1}, session=session)) + self._test_writes(lambda coll, session: coll.create_index("foo", session=session)) self._test_writes( lambda coll, session: coll.create_indexes( - [IndexModel([("bar", ASCENDING)])], session=session)) - self._test_writes( - lambda coll, session: coll.drop_index("foo_1", session=session)) - self._test_writes( - lambda coll, session: coll.drop_indexes(session=session)) - - if client_context.supports_reindex: - self._test_writes( - lambda coll, session: coll.reindex(session=session)) + [IndexModel([("bar", ASCENDING)])], session=session + ) + ) + self._test_writes(lambda coll, session: coll.drop_index("foo_1", session=session)) + self._test_writes(lambda coll, session: coll.drop_indexes(session=session)) def _test_no_read_concern(self, op): coll = self.client.pymongo_test.test @@ -956,73 +939,58 @@ def _test_no_read_concern(self, op): coll.find_one({}, session=sess) operation_time = sess.operation_time self.assertIsNotNone(operation_time) - self.listener.results.clear() + self.listener.reset() op(coll, sess) - rc = self.listener.results['started'][0].command.get( - 'readConcern') + rc = self.listener.started_events[0].command.get("readConcern") self.assertIsNone(rc) @client_context.require_no_standalone def test_writes_do_not_include_read_concern(self): self._test_no_read_concern( - lambda coll, session: coll.bulk_write( - [InsertOne({})], session=session)) - self._test_no_read_concern( - lambda coll, session: coll.insert_one({}, session=session)) - self._test_no_read_concern( - lambda coll, session: coll.insert_many([{}], session=session)) + lambda coll, session: coll.bulk_write([InsertOne[dict]({})], session=session) + ) + self._test_no_read_concern(lambda coll, session: coll.insert_one({}, session=session)) + self._test_no_read_concern(lambda coll, session: coll.insert_many([{}], session=session)) self._test_no_read_concern( - lambda coll, session: coll.replace_one( - {'_id': 1}, {'x': 1}, session=session)) + lambda coll, session: coll.replace_one({"_id": 1}, {"x": 1}, session=session) + ) self._test_no_read_concern( - lambda coll, session: coll.update_one( - {}, {'$set': {'X': 1}}, session=session)) + lambda coll, session: coll.update_one({}, {"$set": {"X": 1}}, session=session) + ) self._test_no_read_concern( - lambda coll, session: coll.update_many( - {}, {'$set': {'x': 1}}, session=session)) + lambda coll, session: coll.update_many({}, {"$set": {"x": 1}}, session=session) + ) + self._test_no_read_concern(lambda coll, session: coll.delete_one({}, session=session)) + self._test_no_read_concern(lambda coll, session: coll.delete_many({}, session=session)) self._test_no_read_concern( - lambda coll, session: coll.delete_one({}, session=session)) - self._test_no_read_concern( - lambda coll, session: coll.delete_many({}, session=session)) - self._test_no_read_concern( - lambda coll, session: coll.find_one_and_replace( - {'x': 1}, {'y': 1}, session=session)) + lambda coll, session: coll.find_one_and_replace({"x": 1}, {"y": 1}, session=session) + ) self._test_no_read_concern( lambda coll, session: coll.find_one_and_update( - {'y': 1}, {'$set': {'x': 1}}, session=session)) + {"y": 1}, {"$set": {"x": 1}}, session=session + ) + ) self._test_no_read_concern( - lambda coll, session: coll.find_one_and_delete( - {'x': 1}, session=session)) - self._test_no_read_concern( - lambda coll, session: coll.create_index("foo", session=session)) + lambda coll, session: coll.find_one_and_delete({"x": 1}, session=session) + ) + self._test_no_read_concern(lambda coll, session: coll.create_index("foo", session=session)) self._test_no_read_concern( lambda coll, session: coll.create_indexes( - [IndexModel([("bar", ASCENDING)])], session=session)) - self._test_no_read_concern( - lambda coll, session: coll.drop_index("foo_1", session=session)) - self._test_no_read_concern( - lambda coll, session: coll.drop_indexes(session=session)) - self._test_no_read_concern( - lambda coll, session: coll.map_reduce( - 'function() {}', 'function() {}', 'mrout', session=session)) - - # They are not writes, but currentOp and explain also don't support - # readConcern. - self._test_no_read_concern( - lambda coll, session: coll.database.current_op(session=session)) - self._test_no_read_concern( - lambda coll, session: coll.find({}, session=session).explain()) + [IndexModel([("bar", ASCENDING)])], session=session + ) + ) + self._test_no_read_concern(lambda coll, session: coll.drop_index("foo_1", session=session)) + self._test_no_read_concern(lambda coll, session: coll.drop_indexes(session=session)) - if client_context.supports_reindex: - self._test_no_read_concern( - lambda coll, session: coll.reindex(session=session)) + # Not a write, but explain also doesn't support readConcern. + self._test_no_read_concern(lambda coll, session: coll.find({}, session=session).explain()) @client_context.require_no_standalone @client_context.require_version_max(4, 1, 0) def test_aggregate_out_does_not_include_read_concern(self): self._test_no_read_concern( - lambda coll, session: list( - coll.aggregate([{"$out": "aggout"}], session=session))) + lambda coll, session: list(coll.aggregate([{"$out": "aggout"}], session=session)) + ) @client_context.require_no_standalone def test_get_more_does_not_include_read_concern(self): @@ -1034,29 +1002,35 @@ def test_get_more_does_not_include_read_concern(self): coll.insert_many([{}, {}]) cursor = coll.find({}).batch_size(1) next(cursor) - self.listener.results.clear() + self.listener.reset() list(cursor) - started = self.listener.results['started'][0] - self.assertEqual(started.command_name, 'getMore') - self.assertIsNone(started.command.get('readConcern')) + started = self.listener.started_events[0] + self.assertEqual(started.command_name, "getMore") + self.assertIsNone(started.command.get("readConcern")) def test_session_not_causal(self): with self.client.start_session(causal_consistency=False) as s: self.client.pymongo_test.test.insert_one({}, session=s) - self.listener.results.clear() + self.listener.reset() self.client.pymongo_test.test.find_one({}, session=s) - act = self.listener.results['started'][0].command.get( - 'readConcern', {}).get('afterClusterTime') + act = ( + self.listener.started_events[0] + .command.get("readConcern", {}) + .get("afterClusterTime") + ) self.assertIsNone(act) @client_context.require_standalone def test_server_not_causal(self): with self.client.start_session(causal_consistency=True) as s: self.client.pymongo_test.test.insert_one({}, session=s) - self.listener.results.clear() + self.listener.reset() self.client.pymongo_test.test.find_one({}, session=s) - act = self.listener.results['started'][0].command.get( - 'readConcern', {}).get('afterClusterTime') + act = ( + self.listener.started_events[0] + .command.get("readConcern", {}) + .get("afterClusterTime") + ) self.assertIsNone(act) @client_context.require_no_standalone @@ -1065,186 +1039,59 @@ def test_read_concern(self): with self.client.start_session(causal_consistency=True) as s: coll = self.client.pymongo_test.test coll.insert_one({}, session=s) - self.listener.results.clear() + self.listener.reset() coll.find_one({}, session=s) - read_concern = self.listener.results['started'][0].command.get( - 'readConcern') + read_concern = self.listener.started_events[0].command.get("readConcern") self.assertIsNotNone(read_concern) - self.assertIsNone(read_concern.get('level')) - self.assertIsNotNone(read_concern.get('afterClusterTime')) + self.assertIsNone(read_concern.get("level")) + self.assertIsNotNone(read_concern.get("afterClusterTime")) coll = coll.with_options(read_concern=ReadConcern("majority")) - self.listener.results.clear() + self.listener.reset() coll.find_one({}, session=s) - read_concern = self.listener.results['started'][0].command.get( - 'readConcern') + read_concern = self.listener.started_events[0].command.get("readConcern") self.assertIsNotNone(read_concern) - self.assertEqual(read_concern.get('level'), 'majority') - self.assertIsNotNone(read_concern.get('afterClusterTime')) + self.assertEqual(read_concern.get("level"), "majority") + self.assertIsNotNone(read_concern.get("afterClusterTime")) @client_context.require_no_standalone def test_cluster_time_with_server_support(self): self.client.pymongo_test.test.insert_one({}) - self.listener.results.clear() + self.listener.reset() self.client.pymongo_test.test.find_one({}) - after_cluster_time = self.listener.results['started'][0].command.get( - '$clusterTime') + after_cluster_time = self.listener.started_events[0].command.get("$clusterTime") self.assertIsNotNone(after_cluster_time) @client_context.require_standalone def test_cluster_time_no_server_support(self): self.client.pymongo_test.test.insert_one({}) - self.listener.results.clear() + self.listener.reset() self.client.pymongo_test.test.find_one({}) - after_cluster_time = self.listener.results['started'][0].command.get( - '$clusterTime') + after_cluster_time = self.listener.started_events[0].command.get("$clusterTime") self.assertIsNone(after_cluster_time) -class TestSessionsMultiAuth(IntegrationTest): - @client_context.require_auth - @client_context.require_sessions - def setUp(self): - super(TestSessionsMultiAuth, self).setUp() - - client_context.create_user( - 'pymongo_test', 'second-user', 'pass', roles=['readWrite']) - self.addCleanup(client_context.drop_user, 'pymongo_test','second-user') - - @ignore_deprecations - def test_session_authenticate_multiple(self): - listener = SessionTestListener() - # Logged in as root. - client = rs_or_single_client(event_listeners=[listener]) - db = client.pymongo_test - db.authenticate('second-user', 'pass') - - with self.assertRaises(InvalidOperation): - client.start_session() - - # No implicit sessions. - listener.results.clear() - db.collection.find_one() - event = listener.first_command_started() - self.assertNotIn( - 'lsid', event.command, - "find_one with multi-auth shouldn't have sent lsid with %s" % ( - event.command_name)) - - @ignore_deprecations - def test_explicit_session_logout(self): - listener = SessionTestListener() - - # Changing auth invalidates the session. Start as root. - client = rs_or_single_client(event_listeners=[listener]) - db = client.pymongo_test - db.collection.insert_many([{} for _ in range(10)]) - self.addCleanup(db.collection.drop) - - with client.start_session() as s: - listener.results.clear() - cursor = db.collection.find(session=s).batch_size(2) - next(cursor) - event = listener.first_command_started() - self.assertEqual(event.command_name, 'find') - self.assertEqual( - s.session_id, event.command.get('lsid'), - "find() sent wrong lsid with %s cmd" % (event.command_name,)) - - client.admin.logout() - db.authenticate('second-user', 'pass') - - err = ('Cannot use session after authenticating with different' - ' credentials') - - with self.assertRaisesRegex(InvalidOperation, err): - # Auth has changed between find and getMore. - list(cursor) - - with self.assertRaisesRegex(InvalidOperation, err): - db.collection.bulk_write([InsertOne({})], session=s) - - with self.assertRaisesRegex(InvalidOperation, err): - db.collection_names(session=s) - - with self.assertRaisesRegex(InvalidOperation, err): - db.collection.find_one(session=s) - - with self.assertRaisesRegex(InvalidOperation, err): - list(db.collection.aggregate([], session=s)) - - @ignore_deprecations - def test_implicit_session_logout(self): - listener = SessionTestListener() - - # Changing auth doesn't invalidate the session. Start as root. - client = rs_or_single_client(event_listeners=[listener]) - db = client.pymongo_test - - for name, f in [ - ('bulk_write', lambda: db.collection.bulk_write([InsertOne({})])), - ('collection_names', db.collection_names), - ('find_one', db.collection.find_one), - ('aggregate', lambda: list(db.collection.aggregate([]))) - ]: - def sub_test(): - listener.results.clear() - f() - for event in listener.results['started']: - self.assertIn( - 'lsid', event.command, - "%s sent no lsid with %s" % ( - name, event.command_name)) - - # We switch auth without clearing the pool of session ids. The - # server considers these to be new sessions since it's a new user. - # The old sessions time out on the server after 30 minutes. - client.admin.logout() - db.authenticate('second-user', 'pass') - sub_test() - db.logout() - client.admin.authenticate(db_user, db_pwd) - sub_test() - - -class TestSessionsNotSupported(IntegrationTest): - @client_context.require_version_max(3, 5, 10) - def test_sessions_not_supported(self): - with self.assertRaisesRegex( - ConfigurationError, "Sessions are not supported"): - self.client.start_session() - - class TestClusterTime(IntegrationTest): def setUp(self): - super(TestClusterTime, self).setUp() - if '$clusterTime' not in client_context.ismaster: - raise SkipTest('$clusterTime not supported') + super().setUp() + if "$clusterTime" not in client_context.hello: + raise SkipTest("$clusterTime not supported") - @ignore_deprecations def test_cluster_time(self): listener = SessionTestListener() # Prevent heartbeats from updating $clusterTime between operations. - client = rs_or_single_client(event_listeners=[listener], - heartbeatFrequencyMS=999999) + client = rs_or_single_client(event_listeners=[listener], heartbeatFrequencyMS=999999) + self.addCleanup(client.close) collection = client.pymongo_test.collection # Prepare for tests of find() and aggregate(). collection.insert_many([{} for _ in range(10)]) self.addCleanup(collection.drop) self.addCleanup(client.pymongo_test.collection2.drop) - def bulk_insert(ordered): - if ordered: - bulk = collection.initialize_ordered_bulk_op() - else: - bulk = collection.initialize_unordered_bulk_op() - bulk.insert({}) - bulk.execute() - def rename_and_drop(): # Ensure collection exists. collection.insert_one({}) - collection.rename('collection2') + collection.rename("collection2") client.pymongo_test.collection2.drop() def insert_and_find(): @@ -1267,100 +1114,49 @@ def insert_and_aggregate(): ops = [ # Tests from Driver Sessions Spec. - ('ping', lambda: client.admin.command('ping')), - ('aggregate', lambda: list(collection.aggregate([]))), - ('find', lambda: list(collection.find())), - ('insert_one', lambda: collection.insert_one({})), - + ("ping", lambda: client.admin.command("ping")), + ("aggregate", lambda: list(collection.aggregate([]))), + ("find", lambda: list(collection.find())), + ("insert_one", lambda: collection.insert_one({})), # Additional PyMongo tests. - ('insert_and_find', insert_and_find), - ('insert_and_aggregate', insert_and_aggregate), - ('update_one', - lambda: collection.update_one({}, {'$set': {'x': 1}})), - ('update_many', - lambda: collection.update_many({}, {'$set': {'x': 1}})), - ('delete_one', lambda: collection.delete_one({})), - ('delete_many', lambda: collection.delete_many({})), - ('bulk_write', lambda: collection.bulk_write([InsertOne({})])), - ('ordered bulk', lambda: bulk_insert(True)), - ('unordered bulk', lambda: bulk_insert(False)), - ('rename_and_drop', rename_and_drop), + ("insert_and_find", insert_and_find), + ("insert_and_aggregate", insert_and_aggregate), + ("update_one", lambda: collection.update_one({}, {"$set": {"x": 1}})), + ("update_many", lambda: collection.update_many({}, {"$set": {"x": 1}})), + ("delete_one", lambda: collection.delete_one({})), + ("delete_many", lambda: collection.delete_many({})), + ("bulk_write", lambda: collection.bulk_write([InsertOne({})])), + ("rename_and_drop", rename_and_drop), ] - for name, f in ops: - listener.results.clear() + for _name, f in ops: + listener.reset() # Call f() twice, insert to advance clusterTime, call f() again. f() f() collection.insert_one({}) f() - self.assertGreaterEqual(len(listener.results['started']), 1) - for i, event in enumerate(listener.results['started']): + self.assertGreaterEqual(len(listener.started_events), 1) + for i, event in enumerate(listener.started_events): self.assertTrue( - '$clusterTime' in event.command, - "%s sent no $clusterTime with %s" % ( - f.__name__, event.command_name)) + "$clusterTime" in event.command, + f"{f.__name__} sent no $clusterTime with {event.command_name}", + ) if i > 0: - succeeded = listener.results['succeeded'][i - 1] + succeeded = listener.succeeded_events[i - 1] self.assertTrue( - '$clusterTime' in succeeded.reply, - "%s received no $clusterTime with %s" % ( - f.__name__, succeeded.command_name)) + "$clusterTime" in succeeded.reply, + f"{f.__name__} received no $clusterTime with {succeeded.command_name}", + ) self.assertTrue( - event.command['$clusterTime']['clusterTime'] >= - succeeded.reply['$clusterTime']['clusterTime'], - "%s sent wrong $clusterTime with %s" % ( - f.__name__, event.command_name)) - - -class TestSpec(SpecRunner): - # Location of JSON test specifications. - TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'sessions') - - def last_two_command_events(self): - """Return the last two command started events.""" - started_events = self.listener.results['started'][-2:] - self.assertEqual(2, len(started_events)) - return started_events - - def assert_same_lsid_on_last_two_commands(self): - """Run the assertSameLsidOnLastTwoCommands test operation.""" - event1, event2 = self.last_two_command_events() - self.assertEqual(event1.command['lsid'], event2.command['lsid']) - - def assert_different_lsid_on_last_two_commands(self): - """Run the assertDifferentLsidOnLastTwoCommands test operation.""" - event1, event2 = self.last_two_command_events() - self.assertNotEqual(event1.command['lsid'], event2.command['lsid']) - - def assert_session_dirty(self, session): - """Run the assertSessionDirty test operation. - - Assert that the given session is dirty. - """ - self.assertIsNotNone(session._server_session) - self.assertTrue(session._server_session.dirty) - - def assert_session_not_dirty(self, session): - """Run the assertSessionNotDirty test operation. - - Assert that the given session is not dirty. - """ - self.assertIsNotNone(session._server_session) - self.assertFalse(session._server_session.dirty) - - -def create_test(scenario_def, test, name): - @client_context.require_test_commands - def run_scenario(self): - self.run_scenario(scenario_def, test) - - return run_scenario + event.command["$clusterTime"]["clusterTime"] + >= succeeded.reply["$clusterTime"]["clusterTime"], + f"{f.__name__} sent wrong $clusterTime with {event.command_name}", + ) -test_creator = TestCreator(create_test, TestSpec, TestSpec.TEST_PATH) -test_creator.create_tests() +if __name__ == "__main__": + unittest.main() diff --git a/test/test_sessions_unified.py b/test/test_sessions_unified.py new file mode 100644 index 0000000000..c51b4642e7 --- /dev/null +++ b/test/test_sessions_unified.py @@ -0,0 +1,33 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the Sessions unified spec tests.""" +from __future__ import annotations + +import os +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.unified_format import generate_test_classes + +# Location of JSON test specifications. +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sessions") + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_son.py b/test/test_son.py index 921f85d45c..579d765d8e 100644 --- a/test/test_son.py +++ b/test/test_son.py @@ -13,6 +13,7 @@ # limitations under the License. """Tests for the son module.""" +from __future__ import annotations import copy import pickle @@ -21,9 +22,10 @@ sys.path[0:0] = [""] -from bson.py3compat import b +from collections import OrderedDict +from test import unittest + from bson.son import SON -from test import SkipTest, unittest class TestSON(unittest.TestCase): @@ -32,9 +34,9 @@ def test_ordered_dict(self): a1["hello"] = "world" a1["mike"] = "awesome" a1["hello_"] = "mike" - self.assertEqual(list(a1.items()), [("hello", "world"), - ("mike", "awesome"), - ("hello_", "mike")]) + self.assertEqual( + list(a1.items()), [("hello", "world"), ("mike", "awesome"), ("hello_", "mike")] + ) b2 = SON({"hello": "world"}) self.assertEqual(b2["hello"], "world") @@ -42,38 +44,28 @@ def test_ordered_dict(self): def test_equality(self): a1 = SON({"hello": "world"}) - b2 = SON((('hello', 'world'), ('mike', 'awesome'), ('hello_', 'mike'))) + b2 = SON((("hello", "world"), ("mike", "awesome"), ("hello_", "mike"))) self.assertEqual(a1, SON({"hello": "world"})) - self.assertEqual(b2, SON((('hello', 'world'), - ('mike', 'awesome'), - ('hello_', 'mike')))) - self.assertEqual(b2, dict((('hello_', 'mike'), - ('mike', 'awesome'), - ('hello', 'world')))) + self.assertEqual(b2, SON((("hello", "world"), ("mike", "awesome"), ("hello_", "mike")))) + self.assertEqual(b2, {"hello_": "mike", "mike": "awesome", "hello": "world"}) self.assertNotEqual(a1, b2) - self.assertNotEqual(b2, SON((('hello_', 'mike'), - ('mike', 'awesome'), - ('hello', 'world')))) + self.assertNotEqual(b2, SON((("hello_", "mike"), ("mike", "awesome"), ("hello", "world")))) # Explicitly test inequality self.assertFalse(a1 != SON({"hello": "world"})) - self.assertFalse(b2 != SON((('hello', 'world'), - ('mike', 'awesome'), - ('hello_', 'mike')))) - self.assertFalse(b2 != dict((('hello_', 'mike'), - ('mike', 'awesome'), - ('hello', 'world')))) + self.assertFalse(b2 != SON((("hello", "world"), ("mike", "awesome"), ("hello_", "mike")))) + self.assertFalse(b2 != {"hello_": "mike", "mike": "awesome", "hello": "world"}) # Embedded SON. - d4 = SON([('blah', {'foo': SON()})]) - self.assertEqual(d4, {'blah': {'foo': {}}}) - self.assertEqual(d4, {'blah': {'foo': SON()}}) - self.assertNotEqual(d4, {'blah': {'foo': []}}) + d4 = SON([("blah", {"foo": SON()})]) + self.assertEqual(d4, {"blah": {"foo": {}}}) + self.assertEqual(d4, {"blah": {"foo": SON()}}) + self.assertNotEqual(d4, {"blah": {"foo": []}}) # Original data unaffected. - self.assertEqual(SON, d4['blah']['foo'].__class__) + self.assertEqual(SON, d4["blah"]["foo"].__class__) def test_to_dict(self): a1 = SON() @@ -90,37 +82,34 @@ def test_to_dict(self): self.assertEqual(dict, d4.to_dict()["blah"]["foo"].__class__) # Original data unaffected. - self.assertEqual(SON, d4['blah']['foo'].__class__) + self.assertEqual(SON, d4["blah"]["foo"].__class__) def test_pickle(self): simple_son = SON([]) - complex_son = SON([('son', simple_son), - ('list', [simple_son, simple_son])]) + complex_son = SON([("son", simple_son), ("list", [simple_son, simple_son])]) for protocol in range(pickle.HIGHEST_PROTOCOL + 1): - pickled = pickle.loads(pickle.dumps(complex_son, - protocol=protocol)) - self.assertEqual(pickled['son'], pickled['list'][0]) - self.assertEqual(pickled['son'], pickled['list'][1]) + pickled = pickle.loads(pickle.dumps(complex_son, protocol=protocol)) + self.assertEqual(pickled["son"], pickled["list"][0]) + self.assertEqual(pickled["son"], pickled["list"][1]) def test_pickle_backwards_compatability(self): # This string was generated by pickling a SON object in pymongo # version 2.1.1 - pickled_with_2_1_1 = b( - "ccopy_reg\n_reconstructor\np0\n(cbson.son\nSON\np1\n" - "c__builtin__\ndict\np2\n(dp3\ntp4\nRp5\n(dp6\n" - "S'_SON__keys'\np7\n(lp8\nsb." + pickled_with_2_1_1 = ( + b"ccopy_reg\n_reconstructor\np0\n(cbson.son\nSON\np1\n" + b"c__builtin__\ndict\np2\n(dp3\ntp4\nRp5\n(dp6\n" + b"S'_SON__keys'\np7\n(lp8\nsb." ) son_2_1_1 = pickle.loads(pickled_with_2_1_1) self.assertEqual(son_2_1_1, SON([])) def test_copying(self): simple_son = SON([]) - complex_son = SON([('son', simple_son), - ('list', [simple_son, simple_son])]) + complex_son = SON([("son", simple_son), ("list", [simple_son, simple_son])]) regex_son = SON([("x", re.compile("^hello.*"))]) - reflexive_son = SON([('son', simple_son)]) + reflexive_son = SON([("son", simple_son)]) reflexive_son["reflexive"] = reflexive_son simple_son1 = copy.copy(simple_son) @@ -150,18 +139,14 @@ def test_copying(self): self.assertEqual(id(reflexive_son1), id(reflexive_son1["reflexive"])) def test_iteration(self): - """ - Test __iter__ - """ + """Test __iter__""" # test success case test_son = SON([(1, 100), (2, 200), (3, 300)]) for ele in test_son: self.assertEqual(ele * 100, test_son[ele]) def test_contains_has(self): - """ - has_key and __contains__ - """ + """has_key and __contains__""" test_son = SON([(1, 100), (2, 200), (3, 300)]) self.assertIn(1, test_son) self.assertTrue(2 in test_son, "in failed") @@ -170,9 +155,7 @@ def test_contains_has(self): self.assertFalse(test_son.has_key(22), "has_key succeeded when it shouldn't") def test_clears(self): - """ - Test clear() - """ + """Test clear()""" test_son = SON([(1, 100), (2, 200), (3, 300)]) test_son.clear() self.assertNotIn(1, test_son) @@ -181,9 +164,7 @@ def test_clears(self): self.assertEqual({}, test_son.to_dict()) def test_len(self): - """ - Test len - """ + """Test len""" test_son = SON() self.assertEqual(0, len(test_son)) test_son = SON([(1, 100), (2, 200), (3, 300)]) @@ -191,5 +172,24 @@ def test_len(self): test_son.popitem() self.assertEqual(2, len(test_son)) + def test_keys(self): + # Test to make sure that set operations do not throw an error + d = SON().keys() + for i in [OrderedDict, dict]: + try: + d - i().keys() + except TypeError: + self.fail( + "SON().keys() is not returning an object compatible " + "with %s objects" % (str(i)) + ) + # Test to verify correctness + d = SON({"k": "v"}).keys() + for i in [OrderedDict, dict]: + self.assertEqual(d | i({"k1": 0}).keys(), {"k", "k1"}) + for i in [OrderedDict, dict]: + self.assertEqual(d - i({"k": 0}).keys(), set()) + + if __name__ == "__main__": unittest.main() diff --git a/test/test_son_manipulator.py b/test/test_son_manipulator.py deleted file mode 100644 index b4b9544f5a..0000000000 --- a/test/test_son_manipulator.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright 2009-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tests for SONManipulators. -""" - -import sys -import warnings - -sys.path[0:0] = [""] - -from bson.son import SON -from pymongo import MongoClient -from pymongo.son_manipulator import (NamespaceInjector, - ObjectIdInjector, - ObjectIdShuffler, - SONManipulator) -from test import client_context, qcheck, unittest - - -class TestSONManipulator(unittest.TestCase): - - @classmethod - def setUpClass(cls): - cls.warn_context = warnings.catch_warnings() - cls.warn_context.__enter__() - warnings.simplefilter("ignore", DeprecationWarning) - - client = MongoClient( - client_context.host, client_context.port, connect=False) - cls.db = client.pymongo_test - - @classmethod - def tearDownClass(cls): - cls.warn_context.__exit__() - cls.warn_context = None - - def test_basic(self): - manip = SONManipulator() - collection = self.db.test - - def incoming_is_identity(son): - return son == manip.transform_incoming(son, collection) - qcheck.check_unittest(self, incoming_is_identity, - qcheck.gen_mongo_dict(3)) - - def outgoing_is_identity(son): - return son == manip.transform_outgoing(son, collection) - qcheck.check_unittest(self, outgoing_is_identity, - qcheck.gen_mongo_dict(3)) - - def test_id_injection(self): - manip = ObjectIdInjector() - collection = self.db.test - - def incoming_adds_id(son): - son = manip.transform_incoming(son, collection) - assert "_id" in son - return True - qcheck.check_unittest(self, incoming_adds_id, - qcheck.gen_mongo_dict(3)) - - def outgoing_is_identity(son): - return son == manip.transform_outgoing(son, collection) - qcheck.check_unittest(self, outgoing_is_identity, - qcheck.gen_mongo_dict(3)) - - def test_id_shuffling(self): - manip = ObjectIdShuffler() - collection = self.db.test - - def incoming_moves_id(son_in): - son = manip.transform_incoming(son_in, collection) - if not "_id" in son: - return True - for (k, v) in son.items(): - self.assertEqual(k, "_id") - break - # Key order matters in SON equality test, - # matching collections.OrderedDict - if isinstance(son_in, SON): - return son_in.to_dict() == son.to_dict() - return son_in == son - - self.assertTrue(incoming_moves_id({})) - self.assertTrue(incoming_moves_id({"_id": 12})) - self.assertTrue(incoming_moves_id({"hello": "world", "_id": 12})) - self.assertTrue(incoming_moves_id(SON([("hello", "world"), - ("_id", 12)]))) - - def outgoing_is_identity(son): - return son == manip.transform_outgoing(son, collection) - qcheck.check_unittest(self, outgoing_is_identity, - qcheck.gen_mongo_dict(3)) - - def test_ns_injection(self): - manip = NamespaceInjector() - collection = self.db.test - - def incoming_adds_ns(son): - son = manip.transform_incoming(son, collection) - assert "_ns" in son - return son["_ns"] == collection.name - qcheck.check_unittest(self, incoming_adds_ns, - qcheck.gen_mongo_dict(3)) - - def outgoing_is_identity(son): - return son == manip.transform_outgoing(son, collection) - qcheck.check_unittest(self, outgoing_is_identity, - qcheck.gen_mongo_dict(3)) - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_srv_polling.py b/test/test_srv_polling.py index 1908103476..d7e9106626 100644 --- a/test/test_srv_polling.py +++ b/test/test_srv_polling.py @@ -13,33 +13,37 @@ # limitations under the License. """Run the SRV support tests.""" +from __future__ import annotations import sys - from time import sleep +from typing import Any sys.path[0:0] = [""] -import pymongo +from test import client_knobs, unittest +from test.utils import FunctionCallRecorder, wait_until +import pymongo from pymongo import common from pymongo.errors import ConfigurationError -from pymongo.srv_resolver import _HAVE_DNSPYTHON from pymongo.mongo_client import MongoClient -from test import client_knobs, unittest -from test.utils import wait_until, FunctionCallRecorder - +from pymongo.srv_resolver import _HAVE_DNSPYTHON WAIT_TIME = 0.1 -class SrvPollingKnobs(object): - def __init__(self, ttl_time=None, min_srv_rescan_interval=None, - dns_resolver_nodelist_response=None, - count_resolver_calls=False): +class SrvPollingKnobs: + def __init__( + self, + ttl_time=None, + min_srv_rescan_interval=None, + nodelist_callback=None, + count_resolver_calls=False, + ): self.ttl_time = ttl_time self.min_srv_rescan_interval = min_srv_rescan_interval - self.dns_resolver_nodelist_response = dns_resolver_nodelist_response + self.nodelist_callback = nodelist_callback self.count_resolver_calls = count_resolver_calls self.old_min_srv_rescan_interval = None @@ -47,34 +51,36 @@ def __init__(self, ttl_time=None, min_srv_rescan_interval=None, def enable(self): self.old_min_srv_rescan_interval = common.MIN_SRV_RESCAN_INTERVAL - self.old_dns_resolver_response = \ - pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl + self.old_dns_resolver_response = pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl if self.min_srv_rescan_interval is not None: common.MIN_SRV_RESCAN_INTERVAL = self.min_srv_rescan_interval def mock_get_hosts_and_min_ttl(resolver, *args): + assert self.old_dns_resolver_response is not None nodes, ttl = self.old_dns_resolver_response(resolver) - if self.dns_resolver_nodelist_response is not None: - nodes = self.dns_resolver_nodelist_response() + if self.nodelist_callback is not None: + nodes = self.nodelist_callback() if self.ttl_time is not None: ttl = self.ttl_time return nodes, ttl + patch_func: Any if self.count_resolver_calls: patch_func = FunctionCallRecorder(mock_get_hosts_and_min_ttl) else: patch_func = mock_get_hosts_and_min_ttl - pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl = patch_func + pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl = patch_func # type: ignore def __enter__(self): self.enable() def disable(self): - common.MIN_SRV_RESCAN_INTERVAL = self.old_min_srv_rescan_interval - pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl = \ - self.old_dns_resolver_response + common.MIN_SRV_RESCAN_INTERVAL = self.old_min_srv_rescan_interval # type: ignore + pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl = ( # type: ignore + self.old_dns_resolver_response # type: ignore + ) def __exit__(self, exc_type, exc_val, exc_tb): self.disable() @@ -84,18 +90,20 @@ class TestSrvPolling(unittest.TestCase): BASE_SRV_RESPONSE = [ ("localhost.test.build.10gen.cc", 27017), - ("localhost.test.build.10gen.cc", 27018)] + ("localhost.test.build.10gen.cc", 27018), + ] CONNECTION_STRING = "mongodb+srv://test1.test.build.10gen.cc" def setUp(self): if not _HAVE_DNSPYTHON: - raise unittest.SkipTest("SRV polling tests require the dnspython " - "module") + raise unittest.SkipTest("SRV polling tests require the dnspython module") # Patch timeouts to ensure short rescan SRV interval. self.client_knobs = client_knobs( - heartbeat_frequency=WAIT_TIME, min_heartbeat_interval=WAIT_TIME, - events_queue_frequency=WAIT_TIME) + heartbeat_frequency=WAIT_TIME, + min_heartbeat_interval=WAIT_TIME, + events_queue_frequency=WAIT_TIME, + ) self.client_knobs.enable() def tearDown(self): @@ -108,12 +116,14 @@ def assert_nodelist_change(self, expected_nodelist, client): """Check if the client._topology eventually sees all nodes in the expected_nodelist. """ + def predicate(): nodelist = self.get_nodelist(client) if set(expected_nodelist) == set(nodelist): return True return False - wait_until(predicate, "see expected nodelist", timeout=100*WAIT_TIME) + + wait_until(predicate, "see expected nodelist", timeout=100 * WAIT_TIME) def assert_nodelist_nochange(self, expected_nodelist, client): """Check if the client._topology ever deviates from seeing all nodes @@ -121,20 +131,30 @@ def assert_nodelist_nochange(self, expected_nodelist, client): (WAIT_TIME * 10) seconds. Also check that the resolver is called at least once. """ - sleep(WAIT_TIME*10) + + def predicate(): + if set(expected_nodelist) == set(self.get_nodelist(client)): + return pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl.call_count >= 1 + return False + + wait_until(predicate, "Node list equals expected nodelist", timeout=100 * WAIT_TIME) + nodelist = self.get_nodelist(client) if set(expected_nodelist) != set(nodelist): msg = "Client nodelist %s changed unexpectedly (expected %s)" raise self.fail(msg % (nodelist, expected_nodelist)) self.assertGreaterEqual( - pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl.call_count, - 1, "resolver was never called") + pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl.call_count, # type: ignore + 1, + "resolver was never called", + ) return True def run_scenario(self, dns_response, expect_change): if callable(dns_response): dns_resolver_response = dns_response else: + def dns_resolver_response(): return dns_response @@ -148,34 +168,29 @@ def dns_resolver_response(): expected_response = self.BASE_SRV_RESPONSE # Patch timeouts to ensure short test running times. - with SrvPollingKnobs( - ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): client = MongoClient(self.CONNECTION_STRING) self.assert_nodelist_change(self.BASE_SRV_RESPONSE, client) # Patch list of hosts returned by DNS query. with SrvPollingKnobs( - dns_resolver_nodelist_response=dns_resolver_response, - count_resolver_calls=count_resolver_calls): + nodelist_callback=dns_resolver_response, count_resolver_calls=count_resolver_calls + ): assertion_method(expected_response, client) def test_addition(self): response = self.BASE_SRV_RESPONSE[:] - response.append( - ("localhost.test.build.10gen.cc", 27019)) + response.append(("localhost.test.build.10gen.cc", 27019)) self.run_scenario(response, True) def test_removal(self): response = self.BASE_SRV_RESPONSE[:] - response.remove( - ("localhost.test.build.10gen.cc", 27018)) + response.remove(("localhost.test.build.10gen.cc", 27018)) self.run_scenario(response, True) def test_replace_one(self): response = self.BASE_SRV_RESPONSE[:] - response.remove( - ("localhost.test.build.10gen.cc", 27018)) - response.append( - ("localhost.test.build.10gen.cc", 27019)) + response.remove(("localhost.test.build.10gen.cc", 27018)) + response.append(("localhost.test.build.10gen.cc", 27019)) self.run_scenario(response, True) def test_replace_both_with_one(self): @@ -183,53 +198,139 @@ def test_replace_both_with_one(self): self.run_scenario(response, True) def test_replace_both_with_two(self): - response = [("localhost.test.build.10gen.cc", 27019), - ("localhost.test.build.10gen.cc", 27020)] + response = [ + ("localhost.test.build.10gen.cc", 27019), + ("localhost.test.build.10gen.cc", 27020), + ] self.run_scenario(response, True) def test_dns_failures(self): from dns import exception + for exc in (exception.FormError, exception.TooBig, exception.Timeout): + def response_callback(*args): raise exc("DNS Failure!") + self.run_scenario(response_callback, False) def test_dns_record_lookup_empty(self): - response = [] + response: list = [] self.run_scenario(response, False) def _test_recover_from_initial(self, initial_callback): # Construct a valid final response callback distinct from base. response_final = self.BASE_SRV_RESPONSE[:] response_final.pop() + def final_callback(): return response_final with SrvPollingKnobs( - ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME, - dns_resolver_nodelist_response=initial_callback, - count_resolver_calls=True): + ttl_time=WAIT_TIME, + min_srv_rescan_interval=WAIT_TIME, + nodelist_callback=initial_callback, + count_resolver_calls=True, + ): # Client uses unpatched method to get initial nodelist client = MongoClient(self.CONNECTION_STRING) # Invalid DNS resolver response should not change nodelist. self.assert_nodelist_nochange(self.BASE_SRV_RESPONSE, client) with SrvPollingKnobs( - ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME, - dns_resolver_nodelist_response=final_callback): + ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME, nodelist_callback=final_callback + ): # Nodelist should reflect new valid DNS resolver response. self.assert_nodelist_change(response_final, client) def test_recover_from_initially_empty_seedlist(self): def empty_seedlist(): return [] + self._test_recover_from_initial(empty_seedlist) def test_recover_from_initially_erroring_seedlist(self): def erroring_seedlist(): raise ConfigurationError + self._test_recover_from_initial(erroring_seedlist) + def test_10_all_dns_selected(self): + response = [ + ("localhost.test.build.10gen.cc", 27017), + ("localhost.test.build.10gen.cc", 27019), + ("localhost.test.build.10gen.cc", 27020), + ] + + def nodelist_callback(): + return response + + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + client = MongoClient(self.CONNECTION_STRING, srvMaxHosts=0) + self.addCleanup(client.close) + with SrvPollingKnobs(nodelist_callback=nodelist_callback): + self.assert_nodelist_change(response, client) + + def test_11_all_dns_selected(self): + response = [ + ("localhost.test.build.10gen.cc", 27019), + ("localhost.test.build.10gen.cc", 27020), + ] + + def nodelist_callback(): + return response + + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + client = MongoClient(self.CONNECTION_STRING, srvMaxHosts=2) + self.addCleanup(client.close) + with SrvPollingKnobs(nodelist_callback=nodelist_callback): + self.assert_nodelist_change(response, client) + + def test_12_new_dns_randomly_selected(self): + response = [ + ("localhost.test.build.10gen.cc", 27020), + ("localhost.test.build.10gen.cc", 27019), + ("localhost.test.build.10gen.cc", 27017), + ] + + def nodelist_callback(): + return response + + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + client = MongoClient(self.CONNECTION_STRING, srvMaxHosts=2) + self.addCleanup(client.close) + with SrvPollingKnobs(nodelist_callback=nodelist_callback): + sleep(2 * common.MIN_SRV_RESCAN_INTERVAL) + final_topology = set(client.topology_description.server_descriptions()) + self.assertIn(("localhost.test.build.10gen.cc", 27017), final_topology) + self.assertEqual(len(final_topology), 2) + + def test_does_not_flipflop(self): + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + client = MongoClient(self.CONNECTION_STRING, srvMaxHosts=1) + self.addCleanup(client.close) + old = set(client.topology_description.server_descriptions()) + sleep(4 * WAIT_TIME) + new = set(client.topology_description.server_descriptions()) + self.assertSetEqual(old, new) + + def test_srv_service_name(self): + # Construct a valid final response callback distinct from base. + response = [ + ("localhost.test.build.10gen.cc.", 27019), + ("localhost.test.build.10gen.cc.", 27020), + ] + + def nodelist_callback(): + return response + + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + client = MongoClient( + "mongodb+srv://test22.test.build.10gen.cc/?srvServiceName=customname" + ) + with SrvPollingKnobs(nodelist_callback=nodelist_callback): + self.assert_nodelist_change(response, client) + -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/test_ssl.py b/test/test_ssl.py index c5531f9eb6..bde385138c 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -13,6 +13,7 @@ # limitations under the License. """Tests for SSL support.""" +from __future__ import annotations import os import socket @@ -20,40 +21,49 @@ sys.path[0:0] = [""] -try: - from urllib.parse import quote_plus -except ImportError: - # Python 2 - from urllib import quote_plus +from test import HAVE_IPADDRESS, IntegrationTest, SkipTest, client_context, unittest +from test.utils import ( + EventListener, + cat_files, + connected, + ignore_deprecations, + remove_all_users, +) +from urllib.parse import quote_plus from pymongo import MongoClient, ssl_support -from pymongo.errors import (ConfigurationError, - ConnectionFailure, - OperationFailure) -from pymongo.ssl_support import HAVE_SSL, get_ssl_context, validate_cert_reqs +from pymongo.errors import ConfigurationError, ConnectionFailure, OperationFailure +from pymongo.hello import HelloCompat +from pymongo.ssl_support import HAVE_SSL, _ssl, get_ssl_context from pymongo.write_concern import WriteConcern -from test import (IntegrationTest, - client_context, - db_pwd, - db_user, - SkipTest, - unittest, - HAVE_IPADDRESS) -from test.utils import remove_all_users, connected + +_HAVE_PYOPENSSL = False +try: + # All of these must be available to use PyOpenSSL + import OpenSSL + import requests + import service_identity + + # Ensure service_identity>=18.1 is installed + from service_identity.pyopenssl import verify_ip_address + + from pymongo.ocsp_support import _load_trusted_ca_certs + + _HAVE_PYOPENSSL = True +except ImportError: + _load_trusted_ca_certs = None # type: ignore + if HAVE_SSL: import ssl -CERT_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), - 'certificates') -CLIENT_PEM = os.path.join(CERT_PATH, 'client.pem') -CLIENT_ENCRYPTED_PEM = os.path.join(CERT_PATH, 'password_protected.pem') -CA_PEM = os.path.join(CERT_PATH, 'ca.pem') -CRL_PEM = os.path.join(CERT_PATH, 'crl.pem') -MONGODB_X509_USERNAME = ( - "C=US,ST=New York,L=New York City,O=MDB,OU=Drivers,CN=client") - -_PY37PLUS = sys.version_info[:2] >= (3, 7) +CERT_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "certificates") +CLIENT_PEM = os.path.join(CERT_PATH, "client.pem") +CLIENT_ENCRYPTED_PEM = os.path.join(CERT_PATH, "password_protected.pem") +CA_PEM = os.path.join(CERT_PATH, "ca.pem") +CA_BUNDLE_PEM = os.path.join(CERT_PATH, "trusted-ca.pem") +CRL_PEM = os.path.join(CERT_PATH, "crl.pem") +MONGODB_X509_USERNAME = "C=US,ST=New York,L=New York City,O=MDB,OU=Drivers,CN=client" # To fully test this start a mongod instance (built with SSL support) like so: # mongod --dbpath /path/to/data/directory --sslOnNormalPorts \ @@ -67,97 +77,72 @@ class TestClientSSL(unittest.TestCase): - - @unittest.skipIf(HAVE_SSL, "The ssl module is available, can't test what " - "happens without it.") + @unittest.skipIf(HAVE_SSL, "The ssl module is available, can't test what happens without it.") def test_no_ssl_module(self): # Explicit - self.assertRaises(ConfigurationError, - MongoClient, ssl=True) + self.assertRaises(ConfigurationError, MongoClient, ssl=True) # Implied - self.assertRaises(ConfigurationError, - MongoClient, ssl_certfile=CLIENT_PEM) + self.assertRaises(ConfigurationError, MongoClient, tlsCertificateKeyFile=CLIENT_PEM) @unittest.skipUnless(HAVE_SSL, "The ssl module is not available.") + @ignore_deprecations def test_config_ssl(self): # Tests various ssl configurations - self.assertRaises(ValueError, MongoClient, ssl='foo') - self.assertRaises(ConfigurationError, - MongoClient, - ssl=False, - ssl_certfile=CLIENT_PEM) + self.assertRaises(ValueError, MongoClient, ssl="foo") + self.assertRaises( + ConfigurationError, MongoClient, tls=False, tlsCertificateKeyFile=CLIENT_PEM + ) self.assertRaises(TypeError, MongoClient, ssl=0) self.assertRaises(TypeError, MongoClient, ssl=5.5) self.assertRaises(TypeError, MongoClient, ssl=[]) - self.assertRaises(IOError, MongoClient, ssl_certfile="NoSuchFile") - self.assertRaises(TypeError, MongoClient, ssl_certfile=True) - self.assertRaises(TypeError, MongoClient, ssl_certfile=[]) - self.assertRaises(IOError, MongoClient, ssl_keyfile="NoSuchFile") - self.assertRaises(TypeError, MongoClient, ssl_keyfile=True) - self.assertRaises(TypeError, MongoClient, ssl_keyfile=[]) + self.assertRaises(IOError, MongoClient, tlsCertificateKeyFile="NoSuchFile") + self.assertRaises(TypeError, MongoClient, tlsCertificateKeyFile=True) + self.assertRaises(TypeError, MongoClient, tlsCertificateKeyFile=[]) # Test invalid combinations - self.assertRaises(ConfigurationError, - MongoClient, - ssl=False, - ssl_keyfile=CLIENT_PEM) - self.assertRaises(ConfigurationError, - MongoClient, - ssl=False, - ssl_certfile=CLIENT_PEM) - self.assertRaises(ConfigurationError, - MongoClient, - ssl=False, - ssl_keyfile=CLIENT_PEM, - ssl_certfile=CLIENT_PEM) - self.assertRaises( - ValueError, validate_cert_reqs, 'ssl_cert_reqs', 3) + ConfigurationError, MongoClient, tls=False, tlsCertificateKeyFile=CLIENT_PEM + ) + self.assertRaises(ConfigurationError, MongoClient, tls=False, tlsCAFile=CA_PEM) + self.assertRaises(ConfigurationError, MongoClient, tls=False, tlsCRLFile=CRL_PEM) + self.assertRaises( + ConfigurationError, MongoClient, tls=False, tlsAllowInvalidCertificates=False + ) self.assertRaises( - ValueError, validate_cert_reqs, 'ssl_cert_reqs', -1) - self.assertEqual( - validate_cert_reqs('ssl_cert_reqs', None), None) - self.assertEqual( - validate_cert_reqs('ssl_cert_reqs', ssl.CERT_NONE), - ssl.CERT_NONE) - self.assertEqual( - validate_cert_reqs('ssl_cert_reqs', ssl.CERT_OPTIONAL), - ssl.CERT_OPTIONAL) - self.assertEqual( - validate_cert_reqs('ssl_cert_reqs', ssl.CERT_REQUIRED), - ssl.CERT_REQUIRED) - self.assertEqual( - validate_cert_reqs('ssl_cert_reqs', 0), ssl.CERT_NONE) - self.assertEqual( - validate_cert_reqs('ssl_cert_reqs', 1), ssl.CERT_OPTIONAL) - self.assertEqual( - validate_cert_reqs('ssl_cert_reqs', 2), ssl.CERT_REQUIRED) - self.assertEqual( - validate_cert_reqs('ssl_cert_reqs', 'CERT_NONE'), ssl.CERT_NONE) - self.assertEqual( - validate_cert_reqs('ssl_cert_reqs', 'CERT_OPTIONAL'), - ssl.CERT_OPTIONAL) - self.assertEqual( - validate_cert_reqs('ssl_cert_reqs', 'CERT_REQUIRED'), - ssl.CERT_REQUIRED) + ConfigurationError, MongoClient, tls=False, tlsAllowInvalidHostnames=False + ) + self.assertRaises( + ConfigurationError, MongoClient, tls=False, tlsDisableOCSPEndpointCheck=False + ) + + @unittest.skipUnless(_HAVE_PYOPENSSL, "PyOpenSSL is not available.") + def test_use_pyopenssl_when_available(self): + self.assertTrue(_ssl.IS_PYOPENSSL) + + @unittest.skipUnless(_HAVE_PYOPENSSL, "Cannot test without PyOpenSSL") + def test_load_trusted_ca_certs(self): + trusted_ca_certs = _load_trusted_ca_certs(CA_BUNDLE_PEM) + self.assertEqual(2, len(trusted_ca_certs)) class TestSSL(IntegrationTest): + saved_port: int def assertClientWorks(self, client): coll = client.pymongo_test.ssl_test.with_options( - write_concern=WriteConcern(w=client_context.w)) + write_concern=WriteConcern(w=client_context.w) + ) coll.drop() - coll.insert_one({'ssl': True}) - self.assertTrue(coll.find_one()['ssl']) + coll.insert_one({"ssl": True}) + self.assertTrue(coll.find_one()["ssl"]) coll.drop() @classmethod @unittest.skipUnless(HAVE_SSL, "The ssl module is not available.") def setUpClass(cls): - super(TestSSL, cls).setUpClass() + super().setUpClass() # MongoClient should connect to the primary by default. cls.saved_port = MongoClient.PORT MongoClient.PORT = client_context.port @@ -165,48 +150,57 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): MongoClient.PORT = cls.saved_port - super(TestSSL, cls).tearDownClass() + super().tearDownClass() - @client_context.require_ssl + @client_context.require_tls def test_simple_ssl(self): # Expects the server to be running with ssl and with # no --sslPEMKeyFile or with --sslWeakCertificateValidation self.assertClientWorks(self.client) - @client_context.require_ssl_certfile - def test_ssl_pem_passphrase(self): + @client_context.require_tlsCertificateKeyFile + @ignore_deprecations + def test_tlsCertificateKeyFilePassword(self): # Expects the server to be running with server.pem and ca.pem # # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem - vi = sys.version_info - if vi[0] == 2 and vi < (2, 7, 9) or vi[0] == 3 and vi < (3, 3): + if not hasattr(ssl, "SSLContext") and not _ssl.IS_PYOPENSSL: self.assertRaises( ConfigurationError, MongoClient, - 'localhost', + "localhost", ssl=True, - ssl_certfile=CLIENT_ENCRYPTED_PEM, - ssl_pem_passphrase="qwerty", - ssl_ca_certs=CA_PEM, - serverSelectionTimeoutMS=100) + tlsCertificateKeyFile=CLIENT_ENCRYPTED_PEM, + tlsCertificateKeyFilePassword="qwerty", + tlsCAFile=CA_PEM, + serverSelectionTimeoutMS=1000, + ) else: - connected(MongoClient('localhost', - ssl=True, - ssl_certfile=CLIENT_ENCRYPTED_PEM, - ssl_pem_passphrase="qwerty", - ssl_ca_certs=CA_PEM, - serverSelectionTimeoutMS=5000, - **self.credentials)) - - uri_fmt = ("mongodb://localhost/?ssl=true" - "&ssl_certfile=%s&ssl_pem_passphrase=qwerty" - "&ssl_ca_certs=%s&serverSelectionTimeoutMS=5000") - connected(MongoClient(uri_fmt % (CLIENT_ENCRYPTED_PEM, CA_PEM), - **self.credentials)) - - @client_context.require_ssl_certfile + connected( + MongoClient( + "localhost", + ssl=True, + tlsCertificateKeyFile=CLIENT_ENCRYPTED_PEM, + tlsCertificateKeyFilePassword="qwerty", + tlsCAFile=CA_PEM, + serverSelectionTimeoutMS=5000, + **self.credentials, # type: ignore[arg-type] + ) + ) + + uri_fmt = ( + "mongodb://localhost/?ssl=true" + "&tlsCertificateKeyFile=%s&tlsCertificateKeyFilePassword=qwerty" + "&tlsCAFile=%s&serverSelectionTimeoutMS=5000" + ) + connected( + MongoClient(uri_fmt % (CLIENT_ENCRYPTED_PEM, CA_PEM), **self.credentials) # type: ignore[arg-type] + ) + + @client_context.require_tlsCertificateKeyFile @client_context.require_no_auth + @ignore_deprecations def test_cert_ssl_implicitly_set(self): # Expects the server to be running with server.pem and ca.pem # @@ -214,207 +208,212 @@ def test_cert_ssl_implicitly_set(self): # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem # - # test that setting ssl_certfile causes ssl to be set to True - client = MongoClient(client_context.host, client_context.port, - ssl_cert_reqs=ssl.CERT_NONE, - ssl_certfile=CLIENT_PEM) - response = client.admin.command('ismaster') - if 'setName' in response: - client = MongoClient(client_context.pair, - replicaSet=response['setName'], - w=len(response['hosts']), - ssl_cert_reqs=ssl.CERT_NONE, - ssl_certfile=CLIENT_PEM) + # test that setting tlsCertificateKeyFile causes ssl to be set to True + client = MongoClient( + client_context.host, + client_context.port, + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM, + ) + response = client.admin.command(HelloCompat.LEGACY_CMD) + if "setName" in response: + client = MongoClient( + client_context.pair, + replicaSet=response["setName"], + w=len(response["hosts"]), + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM, + ) self.assertClientWorks(client) - @client_context.require_ssl_certfile + @client_context.require_tlsCertificateKeyFile @client_context.require_no_auth + @ignore_deprecations def test_cert_ssl_validation(self): # Expects the server to be running with server.pem and ca.pem # # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem # - client = MongoClient('localhost', - ssl=True, - ssl_certfile=CLIENT_PEM, - ssl_cert_reqs=ssl.CERT_REQUIRED, - ssl_ca_certs=CA_PEM) - response = client.admin.command('ismaster') - if 'setName' in response: - if response['primary'].split(":")[0] != 'localhost': - raise SkipTest("No hosts in the replicaset for 'localhost'. " - "Cannot validate hostname in the certificate") - - client = MongoClient('localhost', - replicaSet=response['setName'], - w=len(response['hosts']), - ssl=True, - ssl_certfile=CLIENT_PEM, - ssl_cert_reqs=ssl.CERT_REQUIRED, - ssl_ca_certs=CA_PEM) + client = MongoClient( + "localhost", + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + ) + response = client.admin.command(HelloCompat.LEGACY_CMD) + if "setName" in response: + if response["primary"].split(":")[0] != "localhost": + raise SkipTest( + "No hosts in the replicaset for 'localhost'. " + "Cannot validate hostname in the certificate" + ) + + client = MongoClient( + "localhost", + replicaSet=response["setName"], + w=len(response["hosts"]), + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + ) self.assertClientWorks(client) if HAVE_IPADDRESS: - client = MongoClient('127.0.0.1', - ssl=True, - ssl_certfile=CLIENT_PEM, - ssl_cert_reqs=ssl.CERT_REQUIRED, - ssl_ca_certs=CA_PEM) + client = MongoClient( + "127.0.0.1", + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + ) self.assertClientWorks(client) - @client_context.require_ssl_certfile + @client_context.require_tlsCertificateKeyFile @client_context.require_no_auth + @ignore_deprecations def test_cert_ssl_uri_support(self): # Expects the server to be running with server.pem and ca.pem # # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem # - uri_fmt = ("mongodb://localhost/?ssl=true&ssl_certfile=%s&ssl_cert_reqs" - "=%s&ssl_ca_certs=%s&ssl_match_hostname=true") - client = MongoClient(uri_fmt % (CLIENT_PEM, 'CERT_REQUIRED', CA_PEM)) + uri_fmt = ( + "mongodb://localhost/?ssl=true&tlsCertificateKeyFile=%s&tlsAllowInvalidCertificates" + "=%s&tlsCAFile=%s&tlsAllowInvalidHostnames=false" + ) + client = MongoClient(uri_fmt % (CLIENT_PEM, "true", CA_PEM)) self.assertClientWorks(client) - @client_context.require_ssl_certfile - @client_context.require_no_auth - def test_cert_ssl_validation_optional(self): - # Expects the server to be running with server.pem and ca.pem - # - # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem - # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem - # - client = MongoClient('localhost', - ssl=True, - ssl_certfile=CLIENT_PEM, - ssl_cert_reqs=ssl.CERT_OPTIONAL, - ssl_ca_certs=CA_PEM) - - response = client.admin.command('ismaster') - if 'setName' in response: - if response['primary'].split(":")[0] != 'localhost': - raise SkipTest("No hosts in the replicaset for 'localhost'. " - "Cannot validate hostname in the certificate") - - client = MongoClient('localhost', - replicaSet=response['setName'], - w=len(response['hosts']), - ssl=True, - ssl_certfile=CLIENT_PEM, - ssl_cert_reqs=ssl.CERT_OPTIONAL, - ssl_ca_certs=CA_PEM) - - self.assertClientWorks(client) - - @client_context.require_ssl_certfile + @client_context.require_tlsCertificateKeyFile @client_context.require_server_resolvable + @ignore_deprecations def test_cert_ssl_validation_hostname_matching(self): # Expects the server to be running with server.pem and ca.pem # # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem + ctx = get_ssl_context(None, None, None, None, True, True, False) + self.assertFalse(ctx.check_hostname) + ctx = get_ssl_context(None, None, None, None, True, False, False) + self.assertFalse(ctx.check_hostname) + ctx = get_ssl_context(None, None, None, None, False, True, False) + self.assertFalse(ctx.check_hostname) + ctx = get_ssl_context(None, None, None, None, False, False, False) + self.assertTrue(ctx.check_hostname) - # Python > 2.7.9. If SSLContext doesn't have load_default_certs - # it also doesn't have check_hostname. - ctx = get_ssl_context( - None, None, None, None, ssl.CERT_NONE, None, False) - if hasattr(ctx, 'load_default_certs'): - self.assertFalse(ctx.check_hostname) - ctx = get_ssl_context( - None, None, None, None, ssl.CERT_NONE, None, True) - self.assertFalse(ctx.check_hostname) - ctx = get_ssl_context( - None, None, None, None, ssl.CERT_REQUIRED, None, False) - self.assertFalse(ctx.check_hostname) - ctx = get_ssl_context( - None, None, None, None, ssl.CERT_REQUIRED, None, True) - if _PY37PLUS: - self.assertTrue(ctx.check_hostname) - else: - self.assertFalse(ctx.check_hostname) - - response = self.client.admin.command('ismaster') + response = self.client.admin.command(HelloCompat.LEGACY_CMD) with self.assertRaises(ConnectionFailure): - connected(MongoClient('server', - ssl=True, - ssl_certfile=CLIENT_PEM, - ssl_cert_reqs=ssl.CERT_REQUIRED, - ssl_ca_certs=CA_PEM, - serverSelectionTimeoutMS=500, - **self.credentials)) - - connected(MongoClient('server', - ssl=True, - ssl_certfile=CLIENT_PEM, - ssl_cert_reqs=ssl.CERT_REQUIRED, - ssl_ca_certs=CA_PEM, - ssl_match_hostname=False, - serverSelectionTimeoutMS=500, - **self.credentials)) - - if 'setName' in response: + connected( + MongoClient( + "server", + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + serverSelectionTimeoutMS=500, + **self.credentials, # type: ignore[arg-type] + ) + ) + + connected( + MongoClient( + "server", + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + tlsAllowInvalidHostnames=True, + serverSelectionTimeoutMS=500, + **self.credentials, # type: ignore[arg-type] + ) + ) + + if "setName" in response: with self.assertRaises(ConnectionFailure): - connected(MongoClient('server', - replicaSet=response['setName'], - ssl=True, - ssl_certfile=CLIENT_PEM, - ssl_cert_reqs=ssl.CERT_REQUIRED, - ssl_ca_certs=CA_PEM, - serverSelectionTimeoutMS=500, - **self.credentials)) - - connected(MongoClient('server', - replicaSet=response['setName'], - ssl=True, - ssl_certfile=CLIENT_PEM, - ssl_cert_reqs=ssl.CERT_REQUIRED, - ssl_ca_certs=CA_PEM, - ssl_match_hostname=False, - serverSelectionTimeoutMS=500, - **self.credentials)) - - @client_context.require_ssl_certfile - def test_ssl_crlfile_support(self): - if not hasattr(ssl, 'VERIFY_CRL_CHECK_LEAF'): + connected( + MongoClient( + "server", + replicaSet=response["setName"], + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + serverSelectionTimeoutMS=500, + **self.credentials, # type: ignore[arg-type] + ) + ) + + connected( + MongoClient( + "server", + replicaSet=response["setName"], + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + tlsAllowInvalidHostnames=True, + serverSelectionTimeoutMS=500, + **self.credentials, # type: ignore[arg-type] + ) + ) + + @client_context.require_tlsCertificateKeyFile + @ignore_deprecations + def test_tlsCRLFile_support(self): + if not hasattr(ssl, "VERIFY_CRL_CHECK_LEAF") or _ssl.IS_PYOPENSSL: self.assertRaises( ConfigurationError, MongoClient, - 'localhost', + "localhost", ssl=True, - ssl_ca_certs=CA_PEM, - ssl_crlfile=CRL_PEM, - serverSelectionTimeoutMS=100) + tlsCAFile=CA_PEM, + tlsCRLFile=CRL_PEM, + serverSelectionTimeoutMS=1000, + ) else: - connected(MongoClient('localhost', - ssl=True, - ssl_ca_certs=CA_PEM, - serverSelectionTimeoutMS=100, - **self.credentials)) + connected( + MongoClient( + "localhost", + ssl=True, + tlsCAFile=CA_PEM, + serverSelectionTimeoutMS=1000, + **self.credentials, # type: ignore[arg-type] + ) + ) with self.assertRaises(ConnectionFailure): - connected(MongoClient('localhost', - ssl=True, - ssl_ca_certs=CA_PEM, - ssl_crlfile=CRL_PEM, - serverSelectionTimeoutMS=100, - **self.credentials)) - - uri_fmt = ("mongodb://localhost/?ssl=true&" - "ssl_ca_certs=%s&serverSelectionTimeoutMS=100") - connected(MongoClient(uri_fmt % (CA_PEM,), - **self.credentials)) - - uri_fmt = ("mongodb://localhost/?ssl=true&ssl_crlfile=%s" - "&ssl_ca_certs=%s&serverSelectionTimeoutMS=100") + connected( + MongoClient( + "localhost", + ssl=True, + tlsCAFile=CA_PEM, + tlsCRLFile=CRL_PEM, + serverSelectionTimeoutMS=1000, + **self.credentials, # type: ignore[arg-type] + ) + ) + + uri_fmt = "mongodb://localhost/?ssl=true&tlsCAFile=%s&serverSelectionTimeoutMS=1000" + connected(MongoClient(uri_fmt % (CA_PEM,), **self.credentials)) # type: ignore + + uri_fmt = ( + "mongodb://localhost/?ssl=true&tlsCRLFile=%s" + "&tlsCAFile=%s&serverSelectionTimeoutMS=1000" + ) with self.assertRaises(ConnectionFailure): - connected(MongoClient(uri_fmt % (CRL_PEM, CA_PEM), - **self.credentials)) + connected( + MongoClient(uri_fmt % (CRL_PEM, CA_PEM), **self.credentials) # type: ignore[arg-type] + ) - @client_context.require_ssl_certfile + @client_context.require_tlsCertificateKeyFile @client_context.require_server_resolvable + @ignore_deprecations def test_validation_with_system_ca_certs(self): # Expects the server to be running with server.pem and ca.pem. # @@ -422,51 +421,42 @@ def test_validation_with_system_ca_certs(self): # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem # --sslWeakCertificateValidation # - if sys.platform == "win32": - raise SkipTest("Can't test system ca certs on Windows.") - - if sys.version_info < (2, 7, 9): - raise SkipTest("Can't load system CA certificates.") - - # Tell OpenSSL where CA certificates live. - os.environ['SSL_CERT_FILE'] = CA_PEM - try: - with self.assertRaises(ConnectionFailure): - # Server cert is verified but hostname matching fails - connected(MongoClient('server', - ssl=True, - serverSelectionTimeoutMS=100, - **self.credentials)) - - # Server cert is verified. Disable hostname matching. - connected(MongoClient('server', - ssl=True, - ssl_match_hostname=False, - serverSelectionTimeoutMS=100, - **self.credentials)) - - # Server cert and hostname are verified. - connected(MongoClient('localhost', - ssl=True, - serverSelectionTimeoutMS=100, - **self.credentials)) - - # Server cert and hostname are verified. + self.patch_system_certs(CA_PEM) + with self.assertRaises(ConnectionFailure): + # Server cert is verified but hostname matching fails connected( - MongoClient( - 'mongodb://localhost/?ssl=true&serverSelectionTimeoutMS=100', - **self.credentials)) - finally: - os.environ.pop('SSL_CERT_FILE') + MongoClient("server", ssl=True, serverSelectionTimeoutMS=1000, **self.credentials) # type: ignore[arg-type] + ) + + # Server cert is verified. Disable hostname matching. + connected( + MongoClient( + "server", + ssl=True, + tlsAllowInvalidHostnames=True, + serverSelectionTimeoutMS=1000, + **self.credentials, # type: ignore[arg-type] + ) + ) + + # Server cert and hostname are verified. + connected( + MongoClient("localhost", ssl=True, serverSelectionTimeoutMS=1000, **self.credentials) # type: ignore[arg-type] + ) + + # Server cert and hostname are verified. + connected( + MongoClient( + "mongodb://localhost/?ssl=true&serverSelectionTimeoutMS=1000", **self.credentials # type: ignore[arg-type] + ) + ) def test_system_certs_config_error(self): - ctx = get_ssl_context( - None, None, None, None, ssl.CERT_NONE, None, False) - if ((sys.platform != "win32" - and hasattr(ctx, "set_default_verify_paths")) - or hasattr(ctx, "load_default_certs")): - raise SkipTest( - "Can't test when system CA certificates are loadable.") + ctx = get_ssl_context(None, None, None, None, True, True, False) + if (sys.platform != "win32" and hasattr(ctx, "set_default_verify_paths")) or hasattr( + ctx, "load_default_certs" + ): + raise SkipTest("Can't test when system CA certificates are loadable.") have_certifi = ssl_support.HAVE_CERTIFI have_wincertstore = ssl_support.HAVE_WINCERTSTORE @@ -493,12 +483,11 @@ def test_certifi_support(self): # Force the test on Windows, regardless of environment. ssl_support.HAVE_WINCERTSTORE = False try: - ctx = get_ssl_context( - None, None, None, CA_PEM, ssl.CERT_REQUIRED, None, True) + ctx = get_ssl_context(None, None, CA_PEM, None, False, False, False) ssl_sock = ctx.wrap_socket(socket.socket()) self.assertEqual(ssl_sock.ca_certs, CA_PEM) - ctx = get_ssl_context(None, None, None, None, None, None, True) + ctx = get_ssl_context(None, None, None, None, False, False, False) ssl_sock = ctx.wrap_socket(socket.socket()) self.assertEqual(ssl_sock.ca_certs, ssl_support.certifi.where()) finally: @@ -515,116 +504,148 @@ def test_wincertstore(self): if not ssl_support.HAVE_WINCERTSTORE: raise SkipTest("Need wincertstore to test wincertstore.") - ctx = get_ssl_context( - None, None, None, CA_PEM, ssl.CERT_REQUIRED, None, True) + ctx = get_ssl_context(None, None, CA_PEM, None, False, False, False) ssl_sock = ctx.wrap_socket(socket.socket()) self.assertEqual(ssl_sock.ca_certs, CA_PEM) - ctx = get_ssl_context(None, None, None, None, None, None, True) + ctx = get_ssl_context(None, None, None, None, False, False, False) ssl_sock = ctx.wrap_socket(socket.socket()) self.assertEqual(ssl_sock.ca_certs, ssl_support._WINCERTS.name) @client_context.require_auth - @client_context.require_ssl_certfile + @client_context.require_tlsCertificateKeyFile + @ignore_deprecations def test_mongodb_x509_auth(self): host, port = client_context.host, client_context.port - ssl_client = MongoClient( - client_context.pair, - ssl=True, - ssl_cert_reqs=ssl.CERT_NONE, - ssl_certfile=CLIENT_PEM) - self.addCleanup(remove_all_users, ssl_client['$external']) - - ssl_client.admin.authenticate(db_user, db_pwd) + self.addCleanup(remove_all_users, client_context.client["$external"]) # Give x509 user all necessary privileges. - client_context.create_user('$external', MONGODB_X509_USERNAME, roles=[ - {'role': 'readWriteAnyDatabase', 'db': 'admin'}, - {'role': 'userAdminAnyDatabase', 'db': 'admin'}]) + client_context.create_user( + "$external", + MONGODB_X509_USERNAME, + roles=[ + {"role": "readWriteAnyDatabase", "db": "admin"}, + {"role": "userAdminAnyDatabase", "db": "admin"}, + ], + ) noauth = MongoClient( client_context.pair, ssl=True, - ssl_cert_reqs=ssl.CERT_NONE, - ssl_certfile=CLIENT_PEM) + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM, + ) + self.addCleanup(noauth.close) - self.assertRaises(OperationFailure, noauth.pymongo_test.test.count) + with self.assertRaises(OperationFailure): + noauth.pymongo_test.test.find_one() + listener = EventListener() auth = MongoClient( client_context.pair, - authMechanism='MONGODB-X509', + authMechanism="MONGODB-X509", ssl=True, - ssl_cert_reqs=ssl.CERT_NONE, - ssl_certfile=CLIENT_PEM) + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM, + event_listeners=[listener], + ) + self.addCleanup(auth.close) - if client_context.version.at_least(3, 3, 12): - # No error - auth.pymongo_test.test.find_one() + # No error + auth.pymongo_test.test.find_one() + names = listener.started_command_names() + if client_context.version.at_least(4, 4, -1): + # Speculative auth skips the authenticate command. + self.assertEqual(names, ["find"]) else: - # Should require a username - with self.assertRaises(ConfigurationError): - auth.pymongo_test.test.find_one() - - uri = ('mongodb://%s@%s:%d/?authMechanism=' - 'MONGODB-X509' % ( - quote_plus(MONGODB_X509_USERNAME), host, port)) - client = MongoClient(uri, - ssl=True, - ssl_cert_reqs=ssl.CERT_NONE, - ssl_certfile=CLIENT_PEM) + self.assertEqual(names, ["authenticate", "find"]) + + uri = "mongodb://%s@%s:%d/?authMechanism=MONGODB-X509" % ( + quote_plus(MONGODB_X509_USERNAME), + host, + port, + ) + client = MongoClient( + uri, ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM + ) + self.addCleanup(client.close) # No error client.pymongo_test.test.find_one() - uri = 'mongodb://%s:%d/?authMechanism=MONGODB-X509' % (host, port) - client = MongoClient(uri, - ssl=True, - ssl_cert_reqs=ssl.CERT_NONE, - ssl_certfile=CLIENT_PEM) - if client_context.version.at_least(3, 3, 12): - # No error - client.pymongo_test.test.find_one() - else: - # Should require a username - with self.assertRaises(ConfigurationError): - client.pymongo_test.test.find_one() - + uri = "mongodb://%s:%d/?authMechanism=MONGODB-X509" % (host, port) + client = MongoClient( + uri, ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM + ) + self.addCleanup(client.close) + # No error + client.pymongo_test.test.find_one() # Auth should fail if username and certificate do not match - uri = ('mongodb://%s@%s:%d/?authMechanism=' - 'MONGODB-X509' % ( - quote_plus("not the username"), host, port)) + uri = "mongodb://%s@%s:%d/?authMechanism=MONGODB-X509" % ( + quote_plus("not the username"), + host, + port, + ) bad_client = MongoClient( - uri, ssl=True, ssl_cert_reqs="CERT_NONE", ssl_certfile=CLIENT_PEM) + uri, ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM + ) + self.addCleanup(bad_client.close) with self.assertRaises(OperationFailure): bad_client.pymongo_test.test.find_one() bad_client = MongoClient( - client_context.pair, - username="not the username", - authMechanism='MONGODB-X509', - ssl=True, - ssl_cert_reqs=ssl.CERT_NONE, - ssl_certfile=CLIENT_PEM) + client_context.pair, + username="not the username", + authMechanism="MONGODB-X509", + ssl=True, + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM, + ) + self.addCleanup(bad_client.close) with self.assertRaises(OperationFailure): bad_client.pymongo_test.test.find_one() # Invalid certificate (using CA certificate as client certificate) - uri = ('mongodb://%s@%s:%d/?authMechanism=' - 'MONGODB-X509' % ( - quote_plus(MONGODB_X509_USERNAME), host, port)) + uri = "mongodb://%s@%s:%d/?authMechanism=MONGODB-X509" % ( + quote_plus(MONGODB_X509_USERNAME), + host, + port, + ) try: - connected(MongoClient(uri, - ssl=True, - ssl_cert_reqs=ssl.CERT_NONE, - ssl_certfile=CA_PEM, - serverSelectionTimeoutMS=100)) + connected( + MongoClient( + uri, + ssl=True, + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CA_PEM, + serverSelectionTimeoutMS=1000, + ) + ) except (ConnectionFailure, ConfigurationError): pass else: self.fail("Invalid certificate accepted.") + @client_context.require_tlsCertificateKeyFile + @ignore_deprecations + def test_connect_with_ca_bundle(self): + def remove(path): + try: + os.remove(path) + except OSError: + pass + + temp_ca_bundle = os.path.join(CERT_PATH, "trusted-ca-bundle.pem") + self.addCleanup(remove, temp_ca_bundle) + # Add the CA cert file to the bundle. + cat_files(temp_ca_bundle, CA_BUNDLE_PEM, CA_PEM) + with MongoClient( + "localhost", tls=True, tlsCertificateKeyFile=CLIENT_PEM, tlsCAFile=temp_ca_bundle + ) as client: + self.assertTrue(client.admin.command("ping")) + if __name__ == "__main__": unittest.main() diff --git a/test/test_streaming_protocol.py b/test/test_streaming_protocol.py new file mode 100644 index 0000000000..44e673822a --- /dev/null +++ b/test/test_streaming_protocol.py @@ -0,0 +1,231 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the database module.""" +from __future__ import annotations + +import sys +import time + +sys.path[0:0] = [""] + +from test import IntegrationTest, client_context, unittest +from test.utils import ( + HeartbeatEventListener, + ServerEventListener, + rs_or_single_client, + single_client, + wait_until, +) + +from pymongo import monitoring +from pymongo.hello import HelloCompat + + +class TestStreamingProtocol(IntegrationTest): + @client_context.require_failCommand_appName + def test_failCommand_streaming(self): + listener = ServerEventListener() + hb_listener = HeartbeatEventListener() + client = rs_or_single_client( + event_listeners=[listener, hb_listener], + heartbeatFrequencyMS=500, + appName="failingHeartbeatTest", + ) + self.addCleanup(client.close) + # Force a connection. + client.admin.command("ping") + address = client.address + listener.reset() + + fail_hello = { + "configureFailPoint": "failCommand", + "mode": {"times": 4}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "closeConnection": False, + "errorCode": 10107, + "appName": "failingHeartbeatTest", + }, + } + with self.fail_point(fail_hello): + + def _marked_unknown(event): + return ( + event.server_address == address + and not event.new_description.is_server_type_known + ) + + def _discovered_node(event): + return ( + event.server_address == address + and not event.previous_description.is_server_type_known + and event.new_description.is_server_type_known + ) + + def marked_unknown(): + return len(listener.matching(_marked_unknown)) >= 1 + + def rediscovered(): + return len(listener.matching(_discovered_node)) >= 1 + + # Topology events are published asynchronously + wait_until(marked_unknown, "mark node unknown") + wait_until(rediscovered, "rediscover node") + + # Server should be selectable. + client.admin.command("ping") + + @client_context.require_failCommand_appName + def test_streaming_rtt(self): + listener = ServerEventListener() + hb_listener = HeartbeatEventListener() + # On Windows, RTT can actually be 0.0 because time.time() only has + # 1-15 millisecond resolution. We need to delay the initial hello + # to ensure that RTT is never zero. + name = "streamingRttTest" + delay_hello: dict = { + "configureFailPoint": "failCommand", + "mode": {"times": 1000}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "blockConnection": True, + "blockTimeMS": 20, + # This can be uncommented after SERVER-49220 is fixed. + # 'appName': name, + }, + } + with self.fail_point(delay_hello): + client = rs_or_single_client( + event_listeners=[listener, hb_listener], heartbeatFrequencyMS=500, appName=name + ) + self.addCleanup(client.close) + # Force a connection. + client.admin.command("ping") + address = client.address + + delay_hello["data"]["blockTimeMS"] = 500 + delay_hello["data"]["appName"] = name + with self.fail_point(delay_hello): + + def rtt_exceeds_250_ms(): + # XXX: Add a public TopologyDescription getter to MongoClient? + topology = client._topology + sd = topology.description.server_descriptions()[address] + assert sd.round_trip_time is not None + return sd.round_trip_time > 0.250 + + wait_until(rtt_exceeds_250_ms, "exceed 250ms RTT") + + # Server should be selectable. + client.admin.command("ping") + + def changed_event(event): + return event.server_address == address and isinstance( + event, monitoring.ServerDescriptionChangedEvent + ) + + # There should only be one event published, for the initial discovery. + events = listener.matching(changed_event) + self.assertEqual(1, len(events)) + self.assertGreater(events[0].new_description.round_trip_time, 0) + + @client_context.require_version_min(4, 9, -1) + @client_context.require_failCommand_appName + def test_monitor_waits_after_server_check_error(self): + # This test implements: + # https://github.com/mongodb/specifications/blob/6c5b2ac/source/server-discovery-and-monitoring/server-discovery-and-monitoring-tests.rst#monitors-sleep-at-least-minheartbeatfreqencyms-between-checks + fail_hello = { + "mode": {"times": 5}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "errorCode": 1234, + "appName": "SDAMMinHeartbeatFrequencyTest", + }, + } + with self.fail_point(fail_hello): + start = time.time() + client = single_client( + appName="SDAMMinHeartbeatFrequencyTest", serverSelectionTimeoutMS=5000 + ) + self.addCleanup(client.close) + # Force a connection. + client.admin.command("ping") + duration = time.time() - start + # Explanation of the expected events: + # 0ms: run configureFailPoint + # 1ms: create MongoClient + # 2ms: failed monitor handshake, 1 + # 502ms: failed monitor handshake, 2 + # 1002ms: failed monitor handshake, 3 + # 1502ms: failed monitor handshake, 4 + # 2002ms: failed monitor handshake, 5 + # 2502ms: monitor handshake succeeds + # 2503ms: run awaitable hello + # 2504ms: application handshake succeeds + # 2505ms: ping command succeeds + self.assertGreaterEqual(duration, 2) + self.assertLessEqual(duration, 3.5) + + @client_context.require_failCommand_appName + def test_heartbeat_awaited_flag(self): + hb_listener = HeartbeatEventListener() + client = single_client( + event_listeners=[hb_listener], + heartbeatFrequencyMS=500, + appName="heartbeatEventAwaitedFlag", + ) + self.addCleanup(client.close) + # Force a connection. + client.admin.command("ping") + + def hb_succeeded(event): + return isinstance(event, monitoring.ServerHeartbeatSucceededEvent) + + def hb_failed(event): + return isinstance(event, monitoring.ServerHeartbeatFailedEvent) + + fail_heartbeat = { + "mode": {"times": 2}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "closeConnection": True, + "appName": "heartbeatEventAwaitedFlag", + }, + } + with self.fail_point(fail_heartbeat): + wait_until(lambda: hb_listener.matching(hb_failed), "published failed event") + # Reconnect. + client.admin.command("ping") + + hb_succeeded_events = hb_listener.matching(hb_succeeded) + hb_failed_events = hb_listener.matching(hb_failed) + self.assertFalse(hb_succeeded_events[0].awaited) + self.assertTrue(hb_failed_events[0].awaited) + # Depending on thread scheduling, the failed heartbeat could occur on + # the second or third check. + events = [type(e) for e in hb_listener.events[:4]] + if events == [ + monitoring.ServerHeartbeatStartedEvent, + monitoring.ServerHeartbeatSucceededEvent, + monitoring.ServerHeartbeatStartedEvent, + monitoring.ServerHeartbeatFailedEvent, + ]: + self.assertFalse(hb_succeeded_events[1].awaited) + else: + self.assertTrue(hb_succeeded_events[1].awaited) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_threads.py b/test/test_threads.py index 21d24a6b4d..b3dadbb1a3 100644 --- a/test/test_threads.py +++ b/test/test_threads.py @@ -13,17 +13,11 @@ # limitations under the License. """Test that pymongo is thread safe.""" +from __future__ import annotations import threading - -from test import (client_context, - db_user, - db_pwd, - IntegrationTest, - unittest) -from test.utils import rs_or_single_client_noauth, rs_or_single_client +from test import IntegrationTest, client_context, unittest from test.utils import joinall -from pymongo.errors import OperationFailure @client_context.require_connection @@ -32,28 +26,26 @@ def setUpModule(): class AutoAuthenticateThreads(threading.Thread): - def __init__(self, collection, num): threading.Thread.__init__(self) self.coll = collection self.num = num self.success = False - self.setDaemon(True) + self.daemon = True def run(self): for i in range(self.num): - self.coll.insert_one({'num': i}) - self.coll.find_one({'num': i}) + self.coll.insert_one({"num": i}) + self.coll.find_one({"num": i}) self.success = True class SaveAndFind(threading.Thread): - def __init__(self, collection): threading.Thread.__init__(self) self.collection = collection - self.setDaemon(True) + self.daemon = True self.passed = False def run(self): @@ -66,13 +58,12 @@ def run(self): class Insert(threading.Thread): - def __init__(self, collection, n, expect_exception): threading.Thread.__init__(self) self.collection = collection self.n = n self.expect_exception = expect_exception - self.setDaemon(True) + self.daemon = True def run(self): for _ in range(self.n): @@ -90,21 +81,19 @@ def run(self): class Update(threading.Thread): - def __init__(self, collection, n, expect_exception): threading.Thread.__init__(self) self.collection = collection self.n = n self.expect_exception = expect_exception - self.setDaemon(True) + self.daemon = True def run(self): for _ in range(self.n): error = True try: - self.collection.update_one({"test": "unique"}, - {"$set": {"test": "update"}}) + self.collection.update_one({"test": "unique"}, {"$set": {"test": "update"}}) error = False except: if not self.expect_exception: @@ -114,21 +103,6 @@ def run(self): assert error -class Disconnect(threading.Thread): - - def __init__(self, client, n): - threading.Thread.__init__(self) - self.client = client - self.n = n - self.passed = False - - def run(self): - for _ in range(self.n): - self.client.close() - - self.passed = True - - class TestThreads(IntegrationTest): def setUp(self): self.db = self.client.pymongo_test @@ -138,7 +112,7 @@ def test_threading(self): self.db.test.insert_many([{"x": i} for i in range(1000)]) threads = [] - for i in range(10): + for _i in range(10): t = SaveAndFind(self.db.test) t.start() threads.append(t) @@ -183,54 +157,6 @@ def test_safe_update(self): error.join() okay.join() - def test_client_disconnect(self): - db = rs_or_single_client(serverSelectionTimeoutMS=30000).pymongo_test - db.drop_collection("test") - db.test.insert_many([{"x": i} for i in range(1000)]) - - # Start 10 threads that execute a query, and 10 threads that call - # client.close() 10 times in a row. - threads = [SaveAndFind(db.test) for _ in range(10)] - threads.extend(Disconnect(db.client, 10) for _ in range(10)) - - for t in threads: - t.start() - - for t in threads: - t.join(300) - - for t in threads: - self.assertTrue(t.passed) - - -class TestThreadsAuth(IntegrationTest): - @classmethod - @client_context.require_auth - def setUpClass(cls): - super(TestThreadsAuth, cls).setUpClass() - - def test_auto_auth_login(self): - # Create the database upfront to workaround SERVER-39167. - self.client.auth_test.test.insert_one({}) - self.addCleanup(self.client.drop_database, "auth_test") - client = rs_or_single_client_noauth() - self.assertRaises(OperationFailure, client.auth_test.test.find_one) - - # Admin auth - client.admin.authenticate(db_user, db_pwd) - - nthreads = 10 - threads = [] - for _ in range(nthreads): - t = AutoAuthenticateThreads(client.auth_test.test, 10) - t.start() - threads.append(t) - - joinall(threads) - - for t in threads: - self.assertTrue(t.success) - if __name__ == "__main__": unittest.main() diff --git a/test/test_timestamp.py b/test/test_timestamp.py index bb3358121c..7495d2ec9f 100644 --- a/test/test_timestamp.py +++ b/test/test_timestamp.py @@ -13,16 +13,19 @@ # limitations under the License. """Tests for the Timestamp class.""" +from __future__ import annotations -import datetime -import sys import copy +import datetime import pickle +import sys + sys.path[0:0] = [""] +from test import unittest + from bson.timestamp import Timestamp from bson.tz_util import utc -from test import unittest class TestTimestamp(unittest.TestCase): @@ -78,5 +81,6 @@ def test_repr(self): t = Timestamp(0, 0) self.assertEqual(repr(t), "Timestamp(0, 0)") + if __name__ == "__main__": unittest.main() diff --git a/test/test_topology.py b/test/test_topology.py index 31c7b0ce1c..88c99d2a28 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -13,47 +13,29 @@ # limitations under the License. """Test the topology module.""" +from __future__ import annotations import sys sys.path[0:0] = [""] -import threading +from test import client_knobs, unittest +from test.pymongo_mocks import DummyMonitor +from test.utils import MockPool, wait_until -from bson.py3compat import imap +from bson.objectid import ObjectId from pymongo import common -from pymongo.read_preferences import ReadPreference, Secondary -from pymongo.server_type import SERVER_TYPE -from pymongo.topology import Topology -from pymongo.topology_description import TOPOLOGY_TYPE -from pymongo.errors import (AutoReconnect, - ConfigurationError, - ConnectionFailure) -from pymongo.ismaster import IsMaster +from pymongo.errors import AutoReconnect, ConfigurationError, ConnectionFailure +from pymongo.hello import Hello, HelloCompat from pymongo.monitor import Monitor from pymongo.pool import PoolOptions +from pymongo.read_preferences import ReadPreference, Secondary from pymongo.server_description import ServerDescription -from pymongo.server_selectors import (any_server_selector, - writable_server_selector) +from pymongo.server_selectors import any_server_selector, writable_server_selector +from pymongo.server_type import SERVER_TYPE from pymongo.settings import TopologySettings -from test import client_knobs, unittest -from test.utils import MockPool, wait_until - - -class MockMonitor(object): - def __init__(self, server_description, topology, pool, topology_settings): - self._server_description = server_description - self._topology = topology - self.opened = False - - def open(self): - self.opened = True - - def request_check(self): - pass - - def close(self): - self.opened = False +from pymongo.topology import Topology, _ErrorContext +from pymongo.topology_description import TOPOLOGY_TYPE class SetNameDiscoverySettings(TopologySettings): @@ -61,28 +43,28 @@ def get_topology_type(self): return TOPOLOGY_TYPE.ReplicaSetNoPrimary -address = ('a', 27017) +address = ("a", 27017) def create_mock_topology( - seeds=None, - replica_set_name=None, - monitor_class=MockMonitor): - partitioned_seeds = list(imap(common.partition_node, seeds or ['a'])) + seeds=None, replica_set_name=None, monitor_class=DummyMonitor, direct_connection=False +): + partitioned_seeds = list(map(common.partition_node, seeds or ["a"])) topology_settings = TopologySettings( partitioned_seeds, replica_set_name=replica_set_name, - pool_class=MockPool, - monitor_class=monitor_class) + pool_class=MockPool, # type: ignore[arg-type] + monitor_class=monitor_class, + direct_connection=direct_connection, + ) t = Topology(topology_settings) t.open() return t -def got_ismaster(topology, server_address, ismaster_response): - server_description = ServerDescription( - server_address, IsMaster(ismaster_response), 0) +def got_hello(topology, server_address, hello_response): + server_description = ServerDescription(server_address, Hello(hello_response), 0) topology.on_change(server_description) @@ -108,18 +90,12 @@ class TopologyTest(unittest.TestCase): """Disables periodic monitoring, to make tests deterministic.""" def setUp(self): - super(TopologyTest, self).setUp() + super().setUp() self.client_knobs = client_knobs(heartbeat_frequency=999999) self.client_knobs.enable() self.addCleanup(self.client_knobs.disable) -# Use assertRaisesRegex if available, otherwise use Python 2.7's -# deprecated assertRaisesRegexp, with a 'p'. -if not hasattr(unittest.TestCase, 'assertRaisesRegex'): - TopologyTest.assertRaisesRegex = unittest.TestCase.assertRaisesRegexp - - class TestTopologyConfiguration(TopologyTest): def test_timeout_configuration(self): pool_options = PoolOptions(connect_timeout=1, socket_timeout=2) @@ -128,7 +104,7 @@ def test_timeout_configuration(self): t.open() # Get the default server. - server = t.get_server_by_address(('localhost', 27017)) + server = t.get_server_by_address(("localhost", 27017)) # The pool for application operations obeys our settings. self.assertEqual(1, server._pool.opts.connect_timeout) @@ -140,62 +116,62 @@ def test_timeout_configuration(self): self.assertEqual(1, monitor._pool.opts.connect_timeout) self.assertEqual(1, monitor._pool.opts.socket_timeout) - # The monitor, not its pool, is responsible for calling ismaster. + # The monitor, not its pool, is responsible for calling hello. self.assertFalse(monitor._pool.handshake) class TestSingleServerTopology(TopologyTest): def test_direct_connection(self): - for server_type, ismaster_response in [ - (SERVER_TYPE.RSPrimary, { - 'ok': 1, - 'ismaster': True, - 'hosts': ['a'], - 'setName': 'rs', - 'maxWireVersion': 6}), - - (SERVER_TYPE.RSSecondary, { - 'ok': 1, - 'ismaster': False, - 'secondary': True, - 'hosts': ['a'], - 'setName': 'rs', - 'maxWireVersion': 6}), - - (SERVER_TYPE.Mongos, { - 'ok': 1, - 'ismaster': True, - 'msg': 'isdbgrid', - 'maxWireVersion': 6}), - - (SERVER_TYPE.RSArbiter, { - 'ok': 1, - 'ismaster': False, - 'arbiterOnly': True, - 'hosts': ['a'], - 'setName': 'rs', - 'maxWireVersion': 6}), - - (SERVER_TYPE.Standalone, { - 'ok': 1, - 'ismaster': True, - 'maxWireVersion': 6}), - - # Slave. - (SERVER_TYPE.Standalone, { - 'ok': 1, - 'ismaster': False, - 'maxWireVersion': 6}), + for server_type, hello_response in [ + ( + SERVER_TYPE.RSPrimary, + { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "hosts": ["a"], + "setName": "rs", + "maxWireVersion": 6, + }, + ), + ( + SERVER_TYPE.RSSecondary, + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "hosts": ["a"], + "setName": "rs", + "maxWireVersion": 6, + }, + ), + ( + SERVER_TYPE.Mongos, + {"ok": 1, HelloCompat.LEGACY_CMD: True, "msg": "isdbgrid", "maxWireVersion": 6}, + ), + ( + SERVER_TYPE.RSArbiter, + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "arbiterOnly": True, + "hosts": ["a"], + "setName": "rs", + "maxWireVersion": 6, + }, + ), + (SERVER_TYPE.Standalone, {"ok": 1, HelloCompat.LEGACY_CMD: True, "maxWireVersion": 6}), + # A "slave" in a master-slave deployment. + # This replication type was removed in MongoDB + # 4.0. + (SERVER_TYPE.Standalone, {"ok": 1, HelloCompat.LEGACY_CMD: False, "maxWireVersion": 6}), ]: - t = create_mock_topology() + t = create_mock_topology(direct_connection=True) # Can't select a server while the only server is of type Unknown. - with self.assertRaisesRegex(ConnectionFailure, - 'No servers found yet'): - t.select_servers(any_server_selector, - server_selection_timeout=0) + with self.assertRaisesRegex(ConnectionFailure, "No servers found yet"): + t.select_servers(any_server_selector, server_selection_timeout=0) - got_ismaster(t, address, ismaster_response) + got_hello(t, address, hello_response) # Topology type never changes. self.assertEqual(TOPOLOGY_TYPE.Single, t.description.topology_type) @@ -207,12 +183,13 @@ def test_direct_connection(self): # Topology type single is always readable and writable regardless # of server type or state. - self.assertEqual(t.description.topology_type_name, 'Single') + self.assertEqual(t.description.topology_type_name, "Single") self.assertTrue(t.description.has_writable_server()) self.assertTrue(t.description.has_readable_server()) self.assertTrue(t.description.has_readable_server(Secondary())) - self.assertTrue(t.description.has_readable_server( - Secondary(tag_sets=[{'tag': 'does-not-exist'}]))) + self.assertTrue( + t.description.has_readable_server(Secondary(tag_sets=[{"tag": "does-not-exist"}])) + ) def test_reopen(self): t = create_mock_topology() @@ -224,7 +201,7 @@ def test_reopen(self): def test_unavailable_seed(self): t = create_mock_topology() disconnected(t, address) - self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'a')) + self.assertEqual(SERVER_TYPE.Unknown, get_type(t, "a")) def test_round_trip_time(self): round_trip_time = 125 @@ -233,12 +210,12 @@ def test_round_trip_time(self): class TestMonitor(Monitor): def _check_with_socket(self, *args, **kwargs): if available: - return (IsMaster({'ok': 1, 'maxWireVersion': 6}), - round_trip_time) + return (Hello({"ok": 1, "maxWireVersion": 6}), round_trip_time) else: - raise AutoReconnect('mock monitor error') + raise AutoReconnect("mock monitor error") t = create_mock_topology(monitor_class=TestMonitor) + self.addCleanup(t.close) s = t.select_server(writable_server_selector) self.assertEqual(125, s.description.round_trip_time) @@ -254,14 +231,13 @@ def _check_with_socket(self, *args, **kwargs): def raises_err(): try: - t.select_server(writable_server_selector, - server_selection_timeout=0.1) + t.select_server(writable_server_selector, server_selection_timeout=0.1) except ConnectionFailure: return True else: return False - wait_until(raises_err, 'discover server is down') + wait_until(raises_err, "discover server is down") self.assertIsNone(s.description.round_trip_time) # Bring it back, RTT is now 20 milliseconds. @@ -271,8 +247,10 @@ def raises_err(): def new_average(): # We reset the average to the most recent measurement. description = s.description - return (description.round_trip_time is not None - and round(abs(20 - description.round_trip_time), 7) == 0) + return ( + description.round_trip_time is not None + and round(abs(20 - description.round_trip_time), 7) == 0 + ) tries = 0 while not new_average(): @@ -284,272 +262,289 @@ def new_average(): class TestMultiServerTopology(TopologyTest): def test_readable_writable(self): - t = create_mock_topology(replica_set_name='rs') - got_ismaster(t, ('a', 27017), { - 'ok': 1, - 'ismaster': True, - 'setName': 'rs', - 'hosts': ['a', 'b']}) - - got_ismaster(t, ('b', 27017), { - 'ok': 1, - 'ismaster': False, - 'secondary': True, - 'setName': 'rs', - 'hosts': ['a', 'b']}) - - self.assertTrue( - t.description.topology_type_name, 'ReplicaSetWithPrimary') + t = create_mock_topology(replica_set_name="rs") + got_hello( + t, + ("a", 27017), + {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a", "b"]}, + ) + + got_hello( + t, + ("b", 27017), + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "setName": "rs", + "hosts": ["a", "b"], + }, + ) + + self.assertEqual(t.description.topology_type_name, "ReplicaSetWithPrimary") self.assertTrue(t.description.has_writable_server()) self.assertTrue(t.description.has_readable_server()) - self.assertTrue( - t.description.has_readable_server(Secondary())) - self.assertFalse( - t.description.has_readable_server( - Secondary(tag_sets=[{'tag': 'exists'}]))) - - t = create_mock_topology(replica_set_name='rs') - got_ismaster(t, ('a', 27017), { - 'ok': 1, - 'ismaster': False, - 'secondary': False, - 'setName': 'rs', - 'hosts': ['a', 'b']}) - - got_ismaster(t, ('b', 27017), { - 'ok': 1, - 'ismaster': False, - 'secondary': True, - 'setName': 'rs', - 'hosts': ['a', 'b']}) - - self.assertTrue( - t.description.topology_type_name, 'ReplicaSetNoPrimary') + self.assertTrue(t.description.has_readable_server(Secondary())) + self.assertFalse(t.description.has_readable_server(Secondary(tag_sets=[{"tag": "exists"}]))) + + t = create_mock_topology(replica_set_name="rs") + got_hello( + t, + ("a", 27017), + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": False, + "setName": "rs", + "hosts": ["a", "b"], + }, + ) + + got_hello( + t, + ("b", 27017), + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "setName": "rs", + "hosts": ["a", "b"], + }, + ) + + self.assertEqual(t.description.topology_type_name, "ReplicaSetNoPrimary") self.assertFalse(t.description.has_writable_server()) self.assertFalse(t.description.has_readable_server()) - self.assertTrue( - t.description.has_readable_server(Secondary())) - self.assertFalse( - t.description.has_readable_server( - Secondary(tag_sets=[{'tag': 'exists'}]))) - - t = create_mock_topology(replica_set_name='rs') - got_ismaster(t, ('a', 27017), { - 'ok': 1, - 'ismaster': True, - 'setName': 'rs', - 'hosts': ['a', 'b']}) - - got_ismaster(t, ('b', 27017), { - 'ok': 1, - 'ismaster': False, - 'secondary': True, - 'setName': 'rs', - 'hosts': ['a', 'b'], - 'tags': {'tag': 'exists'}}) - - self.assertTrue( - t.description.topology_type_name, 'ReplicaSetWithPrimary') + self.assertTrue(t.description.has_readable_server(Secondary())) + self.assertFalse(t.description.has_readable_server(Secondary(tag_sets=[{"tag": "exists"}]))) + + t = create_mock_topology(replica_set_name="rs") + got_hello( + t, + ("a", 27017), + {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a", "b"]}, + ) + + got_hello( + t, + ("b", 27017), + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "setName": "rs", + "hosts": ["a", "b"], + "tags": {"tag": "exists"}, + }, + ) + + self.assertEqual(t.description.topology_type_name, "ReplicaSetWithPrimary") self.assertTrue(t.description.has_writable_server()) self.assertTrue(t.description.has_readable_server()) - self.assertTrue( - t.description.has_readable_server(Secondary())) - self.assertTrue( - t.description.has_readable_server( - Secondary(tag_sets=[{'tag': 'exists'}]))) + self.assertTrue(t.description.has_readable_server(Secondary())) + self.assertTrue(t.description.has_readable_server(Secondary(tag_sets=[{"tag": "exists"}]))) def test_close(self): - t = create_mock_topology(replica_set_name='rs') - got_ismaster(t, ('a', 27017), { - 'ok': 1, - 'ismaster': True, - 'setName': 'rs', - 'hosts': ['a', 'b']}) - - got_ismaster(t, ('b', 27017), { - 'ok': 1, - 'ismaster': False, - 'secondary': True, - 'setName': 'rs', - 'hosts': ['a', 'b']}) - - self.assertEqual(SERVER_TYPE.RSPrimary, get_type(t, 'a')) - self.assertEqual(SERVER_TYPE.RSSecondary, get_type(t, 'b')) - self.assertTrue(get_monitor(t, 'a').opened) - self.assertTrue(get_monitor(t, 'b').opened) - self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, - t.description.topology_type) + t = create_mock_topology(replica_set_name="rs") + got_hello( + t, + ("a", 27017), + {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a", "b"]}, + ) + + got_hello( + t, + ("b", 27017), + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "setName": "rs", + "hosts": ["a", "b"], + }, + ) + + self.assertEqual(SERVER_TYPE.RSPrimary, get_type(t, "a")) + self.assertEqual(SERVER_TYPE.RSSecondary, get_type(t, "b")) + self.assertTrue(get_monitor(t, "a").opened) + self.assertTrue(get_monitor(t, "b").opened) + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, t.description.topology_type) t.close() self.assertEqual(2, len(t.description.server_descriptions())) - self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'a')) - self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'b')) - self.assertFalse(get_monitor(t, 'a').opened) - self.assertFalse(get_monitor(t, 'b').opened) - self.assertEqual('rs', t.description.replica_set_name) - self.assertEqual(TOPOLOGY_TYPE.ReplicaSetNoPrimary, - t.description.topology_type) - - # A closed topology should not be updated when receiving an isMaster. - got_ismaster(t, ('a', 27017), { - 'ok': 1, - 'ismaster': True, - 'setName': 'rs', - 'hosts': ['a', 'b', 'c']}) + self.assertEqual(SERVER_TYPE.Unknown, get_type(t, "a")) + self.assertEqual(SERVER_TYPE.Unknown, get_type(t, "b")) + self.assertFalse(get_monitor(t, "a").opened) + self.assertFalse(get_monitor(t, "b").opened) + self.assertEqual("rs", t.description.replica_set_name) + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetNoPrimary, t.description.topology_type) + + # A closed topology should not be updated when receiving a hello. + got_hello( + t, + ("a", 27017), + {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a", "b", "c"]}, + ) self.assertEqual(2, len(t.description.server_descriptions())) - self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'a')) - self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'b')) - self.assertFalse(get_monitor(t, 'a').opened) - self.assertFalse(get_monitor(t, 'b').opened) + self.assertEqual(SERVER_TYPE.Unknown, get_type(t, "a")) + self.assertEqual(SERVER_TYPE.Unknown, get_type(t, "b")) + self.assertFalse(get_monitor(t, "a").opened) + self.assertFalse(get_monitor(t, "b").opened) # Server c should not have been added. - self.assertEqual(None, get_server(t, 'c')) - self.assertEqual('rs', t.description.replica_set_name) - self.assertEqual(TOPOLOGY_TYPE.ReplicaSetNoPrimary, - t.description.topology_type) - - def test_reset_server(self): - t = create_mock_topology(replica_set_name='rs') - got_ismaster(t, ('a', 27017), { - 'ok': 1, - 'ismaster': True, - 'setName': 'rs', - 'hosts': ['a', 'b']}) - - got_ismaster(t, ('b', 27017), { - 'ok': 1, - 'ismaster': False, - 'secondary': True, - 'setName': 'rs', - 'hosts': ['a', 'b']}) - - t.reset_server(('a', 27017)) - self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'a')) - self.assertEqual(SERVER_TYPE.RSSecondary, get_type(t, 'b')) - self.assertEqual('rs', t.description.replica_set_name) - self.assertEqual(TOPOLOGY_TYPE.ReplicaSetNoPrimary, - t.description.topology_type) - - got_ismaster(t, ('a', 27017), { - 'ok': 1, - 'ismaster': True, - 'setName': 'rs', - 'hosts': ['a', 'b']}) - - self.assertEqual(SERVER_TYPE.RSPrimary, get_type(t, 'a')) - self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, - t.description.topology_type) - - t.reset_server(('b', 27017)) - self.assertEqual(SERVER_TYPE.RSPrimary, get_type(t, 'a')) - self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'b')) - self.assertEqual('rs', t.description.replica_set_name) - self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, - t.description.topology_type) - - def test_reset_removed_server(self): - t = create_mock_topology(replica_set_name='rs') + self.assertEqual(None, get_server(t, "c")) + self.assertEqual("rs", t.description.replica_set_name) + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetNoPrimary, t.description.topology_type) + + def test_handle_error(self): + t = create_mock_topology(replica_set_name="rs") + got_hello( + t, + ("a", 27017), + {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a", "b"]}, + ) + + got_hello( + t, + ("b", 27017), + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "setName": "rs", + "hosts": ["a", "b"], + }, + ) + + errctx = _ErrorContext(AutoReconnect("mock"), 0, 0, True, None) + t.handle_error(("a", 27017), errctx) + self.assertEqual(SERVER_TYPE.Unknown, get_type(t, "a")) + self.assertEqual(SERVER_TYPE.RSSecondary, get_type(t, "b")) + self.assertEqual("rs", t.description.replica_set_name) + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetNoPrimary, t.description.topology_type) + + got_hello( + t, + ("a", 27017), + {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a", "b"]}, + ) + + self.assertEqual(SERVER_TYPE.RSPrimary, get_type(t, "a")) + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, t.description.topology_type) + + t.handle_error(("b", 27017), errctx) + self.assertEqual(SERVER_TYPE.RSPrimary, get_type(t, "a")) + self.assertEqual(SERVER_TYPE.Unknown, get_type(t, "b")) + self.assertEqual("rs", t.description.replica_set_name) + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, t.description.topology_type) + + def test_handle_error_removed_server(self): + t = create_mock_topology(replica_set_name="rs") # No error resetting a server not in the TopologyDescription. - t.reset_server(('b', 27017)) + errctx = _ErrorContext(AutoReconnect("mock"), 0, 0, True, None) + t.handle_error(("b", 27017), errctx) # Server was *not* added as type Unknown. - self.assertFalse(t.has_server(('b', 27017))) + self.assertFalse(t.has_server(("b", 27017))) def test_discover_set_name_from_primary(self): # Discovering a replica set without the setName supplied by the user # is not yet supported by MongoClient, but Topology can do it. topology_settings = SetNameDiscoverySettings( - seeds=[address], - pool_class=MockPool, - monitor_class=MockMonitor) + seeds=[address], pool_class=MockPool, monitor_class=DummyMonitor # type: ignore[arg-type] + ) t = Topology(topology_settings) self.assertEqual(t.description.replica_set_name, None) - self.assertEqual(t.description.topology_type, - TOPOLOGY_TYPE.ReplicaSetNoPrimary) + self.assertEqual(t.description.topology_type, TOPOLOGY_TYPE.ReplicaSetNoPrimary) t.open() - got_ismaster(t, address, { - 'ok': 1, - 'ismaster': True, - 'setName': 'rs', - 'hosts': ['a']}) + got_hello( + t, address, {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a"]} + ) - self.assertEqual(t.description.replica_set_name, 'rs') - self.assertEqual(t.description.topology_type, - TOPOLOGY_TYPE.ReplicaSetWithPrimary) + self.assertEqual(t.description.replica_set_name, "rs") + self.assertEqual(t.description.topology_type, TOPOLOGY_TYPE.ReplicaSetWithPrimary) # Another response from the primary. Tests the code that processes # primary response when topology type is already ReplicaSetWithPrimary. - got_ismaster(t, address, { - 'ok': 1, - 'ismaster': True, - 'setName': 'rs', - 'hosts': ['a']}) + got_hello( + t, address, {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a"]} + ) # No change. - self.assertEqual(t.description.replica_set_name, 'rs') - self.assertEqual(t.description.topology_type, - TOPOLOGY_TYPE.ReplicaSetWithPrimary) + self.assertEqual(t.description.replica_set_name, "rs") + self.assertEqual(t.description.topology_type, TOPOLOGY_TYPE.ReplicaSetWithPrimary) def test_discover_set_name_from_secondary(self): # Discovering a replica set without the setName supplied by the user # is not yet supported by MongoClient, but Topology can do it. topology_settings = SetNameDiscoverySettings( - seeds=[address], - pool_class=MockPool, - monitor_class=MockMonitor) + seeds=[address], pool_class=MockPool, monitor_class=DummyMonitor # type: ignore[arg-type] + ) t = Topology(topology_settings) self.assertEqual(t.description.replica_set_name, None) - self.assertEqual(t.description.topology_type, - TOPOLOGY_TYPE.ReplicaSetNoPrimary) + self.assertEqual(t.description.topology_type, TOPOLOGY_TYPE.ReplicaSetNoPrimary) t.open() - got_ismaster(t, address, { - 'ok': 1, - 'ismaster': False, - 'secondary': True, - 'setName': 'rs', - 'hosts': ['a']}) - - self.assertEqual(t.description.replica_set_name, 'rs') - self.assertEqual(t.description.topology_type, - TOPOLOGY_TYPE.ReplicaSetNoPrimary) + got_hello( + t, + address, + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "setName": "rs", + "hosts": ["a"], + }, + ) + + self.assertEqual(t.description.replica_set_name, "rs") + self.assertEqual(t.description.topology_type, TOPOLOGY_TYPE.ReplicaSetNoPrimary) def test_wire_version(self): - t = create_mock_topology(replica_set_name='rs') + t = create_mock_topology(replica_set_name="rs") t.description.check_compatible() # No error. - got_ismaster(t, address, { - 'ok': 1, - 'ismaster': True, - 'setName': 'rs', - 'hosts': ['a']}) + got_hello( + t, address, {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a"]} + ) # Use defaults. server = t.get_server_by_address(address) self.assertEqual(server.description.min_wire_version, 0) self.assertEqual(server.description.max_wire_version, 0) - got_ismaster(t, address, { - 'ok': 1, - 'ismaster': True, - 'setName': 'rs', - 'hosts': ['a'], - 'minWireVersion': 1, - 'maxWireVersion': 5}) + got_hello( + t, + address, + { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "setName": "rs", + "hosts": ["a"], + "minWireVersion": 1, + "maxWireVersion": 6, + }, + ) self.assertEqual(server.description.min_wire_version, 1) - self.assertEqual(server.description.max_wire_version, 5) + self.assertEqual(server.description.max_wire_version, 6) + t.select_servers(any_server_selector) # Incompatible. - got_ismaster(t, address, { - 'ok': 1, - 'ismaster': True, - 'setName': 'rs', - 'hosts': ['a'], - 'minWireVersion': 11, - 'maxWireVersion': 12}) + got_hello( + t, + address, + { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "setName": "rs", + "hosts": ["a"], + "minWireVersion": 22, + "maxWireVersion": 24, + }, + ) try: t.select_servers(any_server_selector) @@ -557,20 +552,25 @@ def test_wire_version(self): # Error message should say which server failed and why. self.assertEqual( str(e), - "Server at a:27017 requires wire version 11, but this version " - "of PyMongo only supports up to %d." - % (common.MAX_SUPPORTED_WIRE_VERSION,)) + "Server at a:27017 requires wire version 22, but this version " + "of PyMongo only supports up to %d." % (common.MAX_SUPPORTED_WIRE_VERSION,), + ) else: - self.fail('No error with incompatible wire version') + self.fail("No error with incompatible wire version") # Incompatible. - got_ismaster(t, address, { - 'ok': 1, - 'ismaster': True, - 'setName': 'rs', - 'hosts': ['a'], - 'minWireVersion': 0, - 'maxWireVersion': 0}) + got_hello( + t, + address, + { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "setName": "rs", + "hosts": ["a"], + "minWireVersion": 0, + "maxWireVersion": 0, + }, + ) try: t.select_servers(any_server_selector) @@ -580,126 +580,190 @@ def test_wire_version(self): str(e), "Server at a:27017 reports wire version 0, but this version " "of PyMongo requires at least %d (MongoDB %s)." - % (common.MIN_SUPPORTED_WIRE_VERSION, - common.MIN_SUPPORTED_SERVER_VERSION)) + % (common.MIN_SUPPORTED_WIRE_VERSION, common.MIN_SUPPORTED_SERVER_VERSION), + ) else: - self.fail('No error with incompatible wire version') + self.fail("No error with incompatible wire version") def test_max_write_batch_size(self): - t = create_mock_topology(seeds=['a', 'b'], replica_set_name='rs') + t = create_mock_topology(seeds=["a", "b"], replica_set_name="rs") def write_batch_size(): s = t.select_server(writable_server_selector) return s.description.max_write_batch_size - got_ismaster(t, ('a', 27017), { - 'ok': 1, - 'ismaster': True, - 'setName': 'rs', - 'hosts': ['a', 'b'], - 'maxWireVersion': 6, - 'maxWriteBatchSize': 1}) - - got_ismaster(t, ('b', 27017), { - 'ok': 1, - 'ismaster': False, - 'secondary': True, - 'setName': 'rs', - 'hosts': ['a', 'b'], - 'maxWireVersion': 6, - 'maxWriteBatchSize': 2}) + got_hello( + t, + ("a", 27017), + { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "setName": "rs", + "hosts": ["a", "b"], + "maxWireVersion": 6, + "maxWriteBatchSize": 1, + }, + ) + + got_hello( + t, + ("b", 27017), + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "setName": "rs", + "hosts": ["a", "b"], + "maxWireVersion": 6, + "maxWriteBatchSize": 2, + }, + ) # Uses primary's max batch size. self.assertEqual(1, write_batch_size()) # b becomes primary. - got_ismaster(t, ('b', 27017), { - 'ok': 1, - 'ismaster': True, - 'setName': 'rs', - 'hosts': ['a', 'b'], - 'maxWireVersion': 6, - 'maxWriteBatchSize': 2}) + got_hello( + t, + ("b", 27017), + { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "setName": "rs", + "hosts": ["a", "b"], + "maxWireVersion": 6, + "maxWriteBatchSize": 2, + }, + ) self.assertEqual(2, write_batch_size()) + def test_topology_repr(self): + t = create_mock_topology(replica_set_name="rs") + self.addCleanup(t.close) + got_hello( + t, + ("a", 27017), + {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a", "c", "b"]}, + ) + self.assertEqual( + repr(t.description), + f", " + ", " + "]>", + ) + + def test_unexpected_load_balancer(self): + # Note: This behavior should not be reachable in practice but we + # should handle it gracefully nonetheless. See PYTHON-2791. + # Load balancers are included in topology with a single seed. + t = create_mock_topology(seeds=["a"]) + mock_lb_response = { + "ok": 1, + "msg": "isdbgrid", + "serviceId": ObjectId(), + "maxWireVersion": 13, + } + got_hello(t, ("a", 27017), mock_lb_response) + sds = t.description.server_descriptions() + self.assertIn(("a", 27017), sds) + self.assertEqual(sds[("a", 27017)].server_type_name, "LoadBalancer") + self.assertEqual(t.description.topology_type_name, "Single") + self.assertTrue(t.description.has_writable_server()) + + # Load balancers are removed from a topology with multiple seeds. + t = create_mock_topology(seeds=["a", "b"]) + got_hello(t, ("a", 27017), mock_lb_response) + self.assertNotIn(("a", 27017), t.description.server_descriptions()) + self.assertEqual(t.description.topology_type_name, "Unknown") + -def wait_for_master(topology): +def wait_for_primary(topology): """Wait for a Topology to discover a writable server. - If the monitor is currently calling ismaster, a blocking call to + If the monitor is currently calling hello, a blocking call to select_server from this thread can trigger a spurious wake of the monitor thread. In applications this is harmless but it would break some tests, so we pass server_selection_timeout=0 and poll instead. """ - def get_master(): + def get_primary(): try: return topology.select_server(writable_server_selector, 0) except ConnectionFailure: return None - return wait_until(get_master, 'find master') + return wait_until(get_primary, "find primary") class TestTopologyErrors(TopologyTest): - # Errors when calling ismaster. + # Errors when calling hello. def test_pool_reset(self): - # ismaster succeeds at first, then always raises socket error. - ismaster_count = [0] + # hello succeeds at first, then always raises socket error. + hello_count = [0] class TestMonitor(Monitor): def _check_with_socket(self, *args, **kwargs): - ismaster_count[0] += 1 - if ismaster_count[0] == 1: - return IsMaster({'ok': 1, 'maxWireVersion': 6}), 0 + hello_count[0] += 1 + if hello_count[0] == 1: + return Hello({"ok": 1, "maxWireVersion": 6}), 0 else: - raise AutoReconnect('mock monitor error') + raise AutoReconnect("mock monitor error") t = create_mock_topology(monitor_class=TestMonitor) - server = wait_for_master(t) - self.assertEqual(1, ismaster_count[0]) - pool_id = server.pool.pool_id + self.addCleanup(t.close) + server = wait_for_primary(t) + self.assertEqual(1, hello_count[0]) + generation = server.pool.gen.get_overall() - # Pool is reset by ismaster failure. + # Pool is reset by hello failure. t.request_check_all() - self.assertNotEqual(pool_id, server.pool.pool_id) + self.assertNotEqual(generation, server.pool.gen.get_overall()) - def test_ismaster_retry(self): - # ismaster succeeds at first, then raises socket error, then succeeds. - ismaster_count = [0] + def test_hello_retry(self): + # hello succeeds at first, then raises socket error, then succeeds. + hello_count = [0] class TestMonitor(Monitor): def _check_with_socket(self, *args, **kwargs): - ismaster_count[0] += 1 - if ismaster_count[0] in (1, 3): - return IsMaster({'ok': 1, 'maxWireVersion': 6}), 0 + hello_count[0] += 1 + if hello_count[0] in (1, 3): + return Hello({"ok": 1, "maxWireVersion": 6}), 0 else: - raise AutoReconnect('mock monitor error') + raise AutoReconnect(f"mock monitor error #{hello_count[0]}") t = create_mock_topology(monitor_class=TestMonitor) - server = wait_for_master(t) - self.assertEqual(1, ismaster_count[0]) - self.assertEqual(SERVER_TYPE.Standalone, - server.description.server_type) + self.addCleanup(t.close) + server = wait_for_primary(t) + self.assertEqual(1, hello_count[0]) + self.assertEqual(SERVER_TYPE.Standalone, server.description.server_type) - # Second ismaster call, then immediately the third. + # Second hello call, server is marked Unknown, then the monitor + # immediately runs a retry (third hello). t.request_check_all() - self.assertEqual(3, ismaster_count[0]) - self.assertEqual(SERVER_TYPE.Standalone, get_type(t, 'a')) + # The third hello call (the immediate retry) happens sometime soon + # after the failed check triggered by request_check_all. Wait until + # the server becomes known again. + server = t.select_server(writable_server_selector, 0.250) + self.assertEqual(SERVER_TYPE.Standalone, server.description.server_type) + self.assertEqual(3, hello_count[0]) def test_internal_monitor_error(self): - exception = AssertionError('internal error') + exception = AssertionError("internal error") class TestMonitor(Monitor): def _check_with_socket(self, *args, **kwargs): raise exception t = create_mock_topology(monitor_class=TestMonitor) - with self.assertRaisesRegex(ConnectionFailure, 'internal error'): - t.select_server(any_server_selector, - server_selection_timeout=0.5) + self.addCleanup(t.close) + with self.assertRaisesRegex(ConnectionFailure, "internal error"): + t.select_server(any_server_selector, server_selection_timeout=0.5) class TestServerSelectionErrors(TopologyTest): @@ -707,71 +771,83 @@ def assertMessage(self, message, topology, selector=any_server_selector): with self.assertRaises(ConnectionFailure) as context: topology.select_server(selector, server_selection_timeout=0) - self.assertEqual(message, str(context.exception)) + self.assertIn(message, str(context.exception)) def test_no_primary(self): - t = create_mock_topology(replica_set_name='rs') - got_ismaster(t, address, { - 'ok': 1, - 'ismaster': False, - 'secondary': True, - 'setName': 'rs', - 'hosts': ['a']}) + t = create_mock_topology(replica_set_name="rs") + got_hello( + t, + address, + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "setName": "rs", + "hosts": ["a"], + }, + ) - self.assertMessage('No replica set members match selector "Primary()"', - t, ReadPreference.PRIMARY) + self.assertMessage( + 'No replica set members match selector "Primary()"', t, ReadPreference.PRIMARY + ) - self.assertMessage('No primary available for writes', - t, writable_server_selector) + self.assertMessage("No primary available for writes", t, writable_server_selector) def test_no_secondary(self): - t = create_mock_topology(replica_set_name='rs') - got_ismaster(t, address, { - 'ok': 1, - 'ismaster': True, - 'setName': 'rs', - 'hosts': ['a']}) + t = create_mock_topology(replica_set_name="rs") + got_hello( + t, address, {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a"]} + ) self.assertMessage( - 'No replica set members match selector' - ' "Secondary(tag_sets=None, max_staleness=-1)"', - t, ReadPreference.SECONDARY) + "No replica set members match selector" + ' "Secondary(tag_sets=None, max_staleness=-1, hedge=None)"', + t, + ReadPreference.SECONDARY, + ) self.assertMessage( "No replica set members match selector" - " \"Secondary(tag_sets=[{'dc': 'ny'}], max_staleness=-1)\"", - t, Secondary(tag_sets=[{'dc': 'ny'}])) + " \"Secondary(tag_sets=[{'dc': 'ny'}], max_staleness=-1, " + 'hedge=None)"', + t, + Secondary(tag_sets=[{"dc": "ny"}]), + ) def test_bad_replica_set_name(self): - t = create_mock_topology(replica_set_name='rs') - got_ismaster(t, address, { - 'ok': 1, - 'ismaster': False, - 'secondary': True, - 'setName': 'wrong', - 'hosts': ['a']}) - - self.assertMessage( - 'No replica set members available for replica set name "rs"', t) + t = create_mock_topology(replica_set_name="rs") + got_hello( + t, + address, + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "setName": "wrong", + "hosts": ["a"], + }, + ) + + self.assertMessage('No replica set members available for replica set name "rs"', t) def test_multiple_standalones(self): # Standalones are removed from a topology with multiple seeds. - t = create_mock_topology(seeds=['a', 'b']) - got_ismaster(t, ('a', 27017), {'ok': 1}) - got_ismaster(t, ('b', 27017), {'ok': 1}) - self.assertMessage('No servers available', t) + t = create_mock_topology(seeds=["a", "b"]) + got_hello(t, ("a", 27017), {"ok": 1}) + got_hello(t, ("b", 27017), {"ok": 1}) + self.assertMessage("No servers available", t) def test_no_mongoses(self): # Standalones are removed from a topology with multiple seeds. - t = create_mock_topology(seeds=['a', 'b']) + t = create_mock_topology(seeds=["a", "b"]) # Discover a mongos and change topology type to Sharded. - got_ismaster(t, ('a', 27017), {'ok': 1, 'msg': 'isdbgrid'}) + got_hello(t, ("a", 27017), {"ok": 1, "msg": "isdbgrid"}) # Oops, both servers are standalone now. Remove them. - got_ismaster(t, ('a', 27017), {'ok': 1}) - got_ismaster(t, ('b', 27017), {'ok': 1}) - self.assertMessage('No mongoses available', t) + got_hello(t, ("a", 27017), {"ok": 1}) + got_hello(t, ("b", 27017), {"ok": 1}) + self.assertMessage("No mongoses available", t) if __name__ == "__main__": diff --git a/test/test_transactions.py b/test/test_transactions.py index 88e6dae5ab..64b93f0b54 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -13,32 +13,47 @@ # limitations under the License. """Execute Transactions Spec tests.""" +from __future__ import annotations import os import sys +from io import BytesIO sys.path[0:0] = [""] -from pymongo import client_session, WriteConcern +from test import client_context, unittest +from test.utils import ( + OvertCommandListener, + SpecTestCreator, + rs_client, + single_client, + wait_until, +) +from test.utils_spec_runner import SpecRunner +from typing import List + +from bson import encode +from bson.raw_bson import RawBSONDocument +from gridfs import GridFS, GridFSBucket +from pymongo import WriteConcern, client_session from pymongo.client_session import TransactionOptions -from pymongo.errors import (ConfigurationError, - ConnectionFailure, - OperationFailure) +from pymongo.command_cursor import CommandCursor +from pymongo.cursor import Cursor +from pymongo.errors import ( + CollectionInvalid, + ConfigurationError, + ConnectionFailure, + InvalidOperation, + OperationFailure, +) from pymongo.operations import IndexModel, InsertOne from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference -from test import unittest, client_context -from test.utils import (rs_client, single_client, - wait_until, OvertCommandListener, - TestCreator) -from test.utils_spec_runner import SpecRunner - # Location of JSON test specifications. -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'transactions') +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "transactions", "legacy") -_TXN_TESTS_DEBUG = os.environ.get('TRANSACTION_TESTS_DEBUG') +_TXN_TESTS_DEBUG = os.environ.get("TRANSACTION_TESTS_DEBUG") # Max number of operations to perform after a transaction to prove unpinning # occurs. Chosen so that there's a low false positive rate. With 2 mongoses, @@ -50,20 +65,30 @@ class TransactionsBase(SpecRunner): @classmethod def setUpClass(cls): - super(TransactionsBase, cls).setUpClass() + super().setUpClass() if client_context.supports_transactions(): for address in client_context.mongoses: - cls.mongos_clients.append(single_client('%s:%s' % address)) + cls.mongos_clients.append(single_client("{}:{}".format(*address))) + + @classmethod + def tearDownClass(cls): + for client in cls.mongos_clients: + client.close() + super().tearDownClass() def maybe_skip_scenario(self, test): - super(TransactionsBase, self).maybe_skip_scenario(test) - if ('secondary' in self.id() and - not client_context.is_mongos and - not client_context.has_secondaries): - raise unittest.SkipTest('No secondaries') + super().maybe_skip_scenario(test) + if ( + "secondary" in self.id() + and not client_context.is_mongos + and not client_context.has_secondaries + ): + raise unittest.SkipTest("No secondaries") class TestTransactions(TransactionsBase): + RUN_ON_SERVERLESS = True + @client_context.require_transactions def test_transaction_options_validation(self): default_options = TransactionOptions() @@ -72,25 +97,24 @@ def test_transaction_options_validation(self): self.assertIsNone(default_options.read_preference) self.assertIsNone(default_options.max_commit_time_ms) # No error when valid options are provided. - TransactionOptions(read_concern=ReadConcern(), - write_concern=WriteConcern(), - read_preference=ReadPreference.PRIMARY, - max_commit_time_ms=10000) + TransactionOptions( + read_concern=ReadConcern(), + write_concern=WriteConcern(), + read_preference=ReadPreference.PRIMARY, + max_commit_time_ms=10000, + ) with self.assertRaisesRegex(TypeError, "read_concern must be "): - TransactionOptions(read_concern={}) + TransactionOptions(read_concern={}) # type: ignore with self.assertRaisesRegex(TypeError, "write_concern must be "): - TransactionOptions(write_concern={}) + TransactionOptions(write_concern={}) # type: ignore with self.assertRaisesRegex( - ConfigurationError, - "transactions do not support unacknowledged write concern"): + ConfigurationError, "transactions do not support unacknowledged write concern" + ): TransactionOptions(write_concern=WriteConcern(w=0)) - with self.assertRaisesRegex( - TypeError, "is not valid for read_preference"): - TransactionOptions(read_preference={}) - with self.assertRaisesRegex( - TypeError, "max_commit_time_ms must be an integer or None"): - TransactionOptions(max_commit_time_ms="10000") - + with self.assertRaisesRegex(TypeError, "is not valid for read_preference"): + TransactionOptions(read_preference={}) # type: ignore + with self.assertRaisesRegex(TypeError, "max_commit_time_ms must be an integer or None"): + TransactionOptions(max_commit_time_ms="10000") # type: ignore @client_context.require_transactions def test_transaction_write_concern_override(self): @@ -103,42 +127,42 @@ def test_transaction_write_concern_override(self): with client.start_session() as s: with s.start_transaction(write_concern=WriteConcern(w=1)): self.assertTrue(coll.insert_one({}, session=s).acknowledged) - self.assertTrue(coll.insert_many( - [{}, {}], session=s).acknowledged) - self.assertTrue(coll.bulk_write( - [InsertOne({})], session=s).acknowledged) - self.assertTrue(coll.replace_one( - {}, {}, session=s).acknowledged) - self.assertTrue(coll.update_one( - {}, {"$set": {"a": 1}}, session=s).acknowledged) - self.assertTrue(coll.update_many( - {}, {"$set": {"a": 1}}, session=s).acknowledged) + self.assertTrue(coll.insert_many([{}, {}], session=s).acknowledged) + self.assertTrue(coll.bulk_write([InsertOne({})], session=s).acknowledged) + self.assertTrue(coll.replace_one({}, {}, session=s).acknowledged) + self.assertTrue(coll.update_one({}, {"$set": {"a": 1}}, session=s).acknowledged) + self.assertTrue(coll.update_many({}, {"$set": {"a": 1}}, session=s).acknowledged) self.assertTrue(coll.delete_one({}, session=s).acknowledged) self.assertTrue(coll.delete_many({}, session=s).acknowledged) coll.find_one_and_delete({}, session=s) coll.find_one_and_replace({}, {}, session=s) coll.find_one_and_update({}, {"$set": {"a": 1}}, session=s) - unsupported_txn_writes = [ + unsupported_txn_writes: list = [ (client.drop_database, [db.name], {}), - (db.create_collection, ['collection'], {}), - (db.drop_collection, ['collection'], {}), + (db.drop_collection, ["collection"], {}), (coll.drop, [], {}), - (coll.map_reduce, - ['function() {}', 'function() {}', 'output'], {}), - (coll.rename, ['collection2'], {}), + (coll.rename, ["collection2"], {}), # Drop collection2 between tests of "rename", above. - (coll.database.drop_collection, ['collection2'], {}), - (coll.create_indexes, [[IndexModel('a')]], {}), - (coll.create_index, ['a'], {}), - (coll.drop_index, ['a_1'], {}), + (coll.database.drop_collection, ["collection2"], {}), + (coll.create_indexes, [[IndexModel("a")]], {}), + (coll.create_index, ["a"], {}), + (coll.drop_index, ["a_1"], {}), (coll.drop_indexes, [], {}), (coll.aggregate, [[{"$out": "aggout"}]], {}), ] + # Creating a collection in a transaction requires MongoDB 4.4+. + if client_context.version < (4, 3, 4): + unsupported_txn_writes.extend( + [ + (db.create_collection, ["collection"], {}), + ] + ) + for op in unsupported_txn_writes: op, args, kwargs = op with client.start_session() as s: - kwargs['session'] = s + kwargs["session"] = s s.start_transaction(write_concern=WriteConcern(w=1)) with self.assertRaises(OperationFailure): op(*args, **kwargs) @@ -149,8 +173,7 @@ def test_transaction_write_concern_override(self): def test_unpin_for_next_transaction(self): # Increase localThresholdMS and wait until both nodes are discovered # to avoid false positives. - client = rs_client(client_context.mongos_seeds(), - localThresholdMS=1000) + client = rs_client(client_context.mongos_seeds(), localThresholdMS=1000) wait_until(lambda: len(client.nodes) > 1, "discover both mongoses") coll = client.test.test # Create the collection. @@ -178,8 +201,7 @@ def test_unpin_for_next_transaction(self): def test_unpin_for_non_transaction_operation(self): # Increase localThresholdMS and wait until both nodes are discovered # to avoid false positives. - client = rs_client(client_context.mongos_seeds(), - localThresholdMS=1000) + client = rs_client(client_context.mongos_seeds(), localThresholdMS=1000) wait_until(lambda: len(client.nodes) > 1, "discover both mongoses") coll = client.test.test # Create the collection. @@ -201,9 +223,180 @@ def test_unpin_for_non_transaction_operation(self): self.assertGreater(len(addresses), 1) + @client_context.require_transactions + @client_context.require_version_min(4, 3, 4) + def test_create_collection(self): + client = client_context.client + db = client.pymongo_test + coll = db.test_create_collection + self.addCleanup(coll.drop) + + # Use with_transaction to avoid StaleConfig errors on sharded clusters. + def create_and_insert(session): + coll2 = db.create_collection(coll.name, session=session) + self.assertEqual(coll, coll2) + coll.insert_one({}, session=session) + + with client.start_session() as s: + s.with_transaction(create_and_insert) + + # Outside a transaction we raise CollectionInvalid on existing colls. + with self.assertRaises(CollectionInvalid): + db.create_collection(coll.name) + + # Inside a transaction we raise the OperationFailure from create. + with client.start_session() as s: + s.start_transaction() + with self.assertRaises(OperationFailure) as ctx: + db.create_collection(coll.name, session=s) + self.assertEqual(ctx.exception.code, 48) # NamespaceExists + + @client_context.require_transactions + def test_gridfs_does_not_support_transactions(self): + client = client_context.client + db = client.pymongo_test + gfs = GridFS(db) + bucket = GridFSBucket(db) + + def gridfs_find(*args, **kwargs): + return gfs.find(*args, **kwargs).next() + + def gridfs_open_upload_stream(*args, **kwargs): + bucket.open_upload_stream(*args, **kwargs).write(b"1") + + gridfs_ops = [ + (gfs.put, (b"123",)), + (gfs.get, (1,)), + (gfs.get_version, ("name",)), + (gfs.get_last_version, ("name",)), + (gfs.delete, (1,)), + (gfs.list, ()), + (gfs.find_one, ()), + (gridfs_find, ()), + (gfs.exists, ()), + (gridfs_open_upload_stream, ("name",)), + ( + bucket.upload_from_stream, + ( + "name", + b"data", + ), + ), + ( + bucket.download_to_stream, + ( + 1, + BytesIO(), + ), + ), + ( + bucket.download_to_stream_by_name, + ( + "name", + BytesIO(), + ), + ), + (bucket.delete, (1,)), + (bucket.find, ()), + (bucket.open_download_stream, (1,)), + (bucket.open_download_stream_by_name, ("name",)), + ( + bucket.rename, + ( + 1, + "new-name", + ), + ), + ] + + with client.start_session() as s, s.start_transaction(): + for op, args in gridfs_ops: + with self.assertRaisesRegex( + InvalidOperation, + "GridFS does not support multi-document transactions", + ): + op(*args, session=s) # type: ignore -class PatchSessionTimeout(object): + # Require 4.2+ for large (16MB+) transactions. + @client_context.require_version_min(4, 2) + @client_context.require_transactions + @unittest.skipIf(sys.platform == "win32", "Our Windows machines are too slow to pass this test") + def test_transaction_starts_with_batched_write(self): + if "PyPy" in sys.version and client_context.tls: + self.skipTest( + "PYTHON-2937 PyPy is so slow sending large " + "messages over TLS that this test fails" + ) + # Start a transaction with a batch of operations that needs to be + # split. + listener = OvertCommandListener() + client = rs_client(event_listeners=[listener]) + coll = client[self.db.name].test + coll.delete_many({}) + listener.reset() + self.addCleanup(client.close) + self.addCleanup(coll.drop) + large_str = "\0" * (1 * 1024 * 1024) + ops: List[InsertOne[RawBSONDocument]] = [ + InsertOne(RawBSONDocument(encode({"a": large_str}))) for _ in range(48) + ] + with client.start_session() as session: + with session.start_transaction(): + coll.bulk_write(ops, session=session) # type: ignore[arg-type] + # Assert commands were constructed properly. + self.assertEqual( + ["insert", "insert", "commitTransaction"], listener.started_command_names() + ) + first_cmd = listener.started_events[0].command + self.assertTrue(first_cmd["startTransaction"]) + lsid = first_cmd["lsid"] + txn_number = first_cmd["txnNumber"] + for event in listener.started_events[1:]: + self.assertNotIn("startTransaction", event.command) + self.assertEqual(lsid, event.command["lsid"]) + self.assertEqual(txn_number, event.command["txnNumber"]) + self.assertEqual(48, coll.count_documents({})) + + @client_context.require_transactions + def test_transaction_direct_connection(self): + client = single_client() + self.addCleanup(client.close) + coll = client.pymongo_test.test + + # Make sure the collection exists. + coll.insert_one({}) + self.assertEqual(client.topology_description.topology_type_name, "Single") + ops = [ + (coll.bulk_write, [[InsertOne[dict]({})]]), + (coll.insert_one, [{}]), + (coll.insert_many, [[{}, {}]]), + (coll.replace_one, [{}, {}]), + (coll.update_one, [{}, {"$set": {"a": 1}}]), + (coll.update_many, [{}, {"$set": {"a": 1}}]), + (coll.delete_one, [{}]), + (coll.delete_many, [{}]), + (coll.find_one_and_replace, [{}, {}]), + (coll.find_one_and_update, [{}, {"$set": {"a": 1}}]), + (coll.find_one_and_delete, [{}, {}]), + (coll.find_one, [{}]), + (coll.count_documents, [{}]), + (coll.distinct, ["foo"]), + (coll.aggregate, [[]]), + (coll.find, [{}]), + (coll.aggregate_raw_batches, [[]]), + (coll.find_raw_batches, [{}]), + (coll.database.command, ["find", coll.name]), + ] + for f, args in ops: + with client.start_session() as s, s.start_transaction(): + res = f(*args, session=s) # type:ignore[operator] + if isinstance(res, (CommandCursor, Cursor)): + list(res) + + +class PatchSessionTimeout: """Patches the client_session's with_transaction timeout for testing.""" + def __init__(self, mock_timeout): self.real_timeout = client_session._WITH_TRANSACTION_RETRY_TIME_LIMIT self.mock_timeout = mock_timeout @@ -217,15 +410,18 @@ def __exit__(self, exc_type, exc_val, exc_tb): class TestTransactionsConvenientAPI(TransactionsBase): - TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), - 'transactions-convenient-api') + TEST_PATH = os.path.join( + os.path.dirname(os.path.realpath(__file__)), "transactions-convenient-api" + ) @client_context.require_transactions def test_callback_raises_custom_error(self): - class _MyException(Exception):pass + class _MyException(Exception): + pass def raise_error(_): - raise _MyException() + raise _MyException + with self.client.start_session() as s: with self.assertRaises(_MyException): s.with_transaction(raise_error) @@ -233,51 +429,54 @@ def raise_error(_): @client_context.require_transactions def test_callback_returns_value(self): def callback(_): - return 'Foo' + return "Foo" + with self.client.start_session() as s: - self.assertEqual(s.with_transaction(callback), 'Foo') + self.assertEqual(s.with_transaction(callback), "Foo") self.db.test.insert_one({}) - def callback(session): + def callback2(session): self.db.test.insert_one({}, session=session) - return 'Foo' + return "Foo" + with self.client.start_session() as s: - self.assertEqual(s.with_transaction(callback), 'Foo') + self.assertEqual(s.with_transaction(callback2), "Foo") @client_context.require_transactions def test_callback_not_retried_after_timeout(self): listener = OvertCommandListener() client = rs_client(event_listeners=[listener]) + self.addCleanup(client.close) coll = client[self.db.name].test def callback(session): coll.insert_one({}, session=session) - err = { - 'ok': 0, - 'errmsg': 'Transaction 7819 has been aborted.', - 'code': 251, - 'codeName': 'NoSuchTransaction', - 'errorLabels': ['TransientTransactionError'], + err: dict = { + "ok": 0, + "errmsg": "Transaction 7819 has been aborted.", + "code": 251, + "codeName": "NoSuchTransaction", + "errorLabels": ["TransientTransactionError"], } - raise OperationFailure(err['errmsg'], err['code'], err) + raise OperationFailure(err["errmsg"], err["code"], err) # Create the collection. coll.insert_one({}) - listener.results.clear() + listener.reset() with client.start_session() as s: with PatchSessionTimeout(0): with self.assertRaises(OperationFailure): s.with_transaction(callback) - self.assertEqual(listener.started_command_names(), - ['insert', 'abortTransaction']) + self.assertEqual(listener.started_command_names(), ["insert", "abortTransaction"]) @client_context.require_test_commands @client_context.require_transactions def test_callback_not_retried_after_commit_timeout(self): listener = OvertCommandListener() client = rs_client(event_listeners=[listener]) + self.addCleanup(client.close) coll = client[self.db.name].test def callback(session): @@ -285,29 +484,32 @@ def callback(session): # Create the collection. coll.insert_one({}) - self.set_fail_point({ - 'configureFailPoint': 'failCommand', 'mode': {'times': 1}, - 'data': { - 'failCommands': ['commitTransaction'], - 'errorCode': 251, # NoSuchTransaction - }}) - self.addCleanup(self.set_fail_point, { - 'configureFailPoint': 'failCommand', 'mode': 'off'}) - listener.results.clear() + self.set_fail_point( + { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "failCommands": ["commitTransaction"], + "errorCode": 251, # NoSuchTransaction + }, + } + ) + self.addCleanup(self.set_fail_point, {"configureFailPoint": "failCommand", "mode": "off"}) + listener.reset() with client.start_session() as s: with PatchSessionTimeout(0): with self.assertRaises(OperationFailure): s.with_transaction(callback) - self.assertEqual(listener.started_command_names(), - ['insert', 'commitTransaction']) + self.assertEqual(listener.started_command_names(), ["insert", "commitTransaction"]) @client_context.require_test_commands @client_context.require_transactions def test_commit_not_retried_after_timeout(self): listener = OvertCommandListener() client = rs_client(event_listeners=[listener]) + self.addCleanup(client.close) coll = client[self.db.name].test def callback(session): @@ -315,14 +517,15 @@ def callback(session): # Create the collection. coll.insert_one({}) - self.set_fail_point({ - 'configureFailPoint': 'failCommand', 'mode': {'times': 2}, - 'data': { - 'failCommands': ['commitTransaction'], - 'closeConnection': True}}) - self.addCleanup(self.set_fail_point, { - 'configureFailPoint': 'failCommand', 'mode': 'off'}) - listener.results.clear() + self.set_fail_point( + { + "configureFailPoint": "failCommand", + "mode": {"times": 2}, + "data": {"failCommands": ["commitTransaction"], "closeConnection": True}, + } + ) + self.addCleanup(self.set_fail_point, {"configureFailPoint": "failCommand", "mode": "off"}) + listener.reset() with client.start_session() as s: with PatchSessionTimeout(0): @@ -331,8 +534,9 @@ def callback(session): # One insert for the callback and two commits (includes the automatic # retry). - self.assertEqual(listener.started_command_names(), - ['insert', 'commitTransaction', 'commitTransaction']) + self.assertEqual( + listener.started_command_names(), ["insert", "commitTransaction", "commitTransaction"] + ) # Tested here because this supports Motor's convenient transactions API. @client_context.require_transactions @@ -365,6 +569,7 @@ def test_in_transaction_property(self): # Using a callback def callback(session): self.assertTrue(session.in_transaction) + with client.start_session() as s: self.assertFalse(s.in_transaction) s.with_transaction(callback) @@ -380,12 +585,13 @@ def run_scenario(self): return run_scenario -test_creator = TestCreator(create_test, TestTransactions, _TEST_PATH) +test_creator = SpecTestCreator(create_test, TestTransactions, TEST_PATH) test_creator.create_tests() -TestCreator(create_test, TestTransactionsConvenientAPI, - TestTransactionsConvenientAPI.TEST_PATH).create_tests() +SpecTestCreator( + create_test, TestTransactionsConvenientAPI, TestTransactionsConvenientAPI.TEST_PATH +).create_tests() if __name__ == "__main__": diff --git a/test/test_transactions_unified.py b/test/test_transactions_unified.py new file mode 100644 index 0000000000..6de4902a81 --- /dev/null +++ b/test/test_transactions_unified.py @@ -0,0 +1,33 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the Transactions unified spec tests.""" +from __future__ import annotations + +import os +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.unified_format import generate_test_classes + +# Location of JSON test specifications. +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "transactions", "unified") + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_typing.py b/test/test_typing.py new file mode 100644 index 0000000000..3d6156ce2c --- /dev/null +++ b/test/test_typing.py @@ -0,0 +1,573 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test that each file in mypy_fails/ actually fails mypy, and test some +sample client code that uses PyMongo typings. +""" +from __future__ import annotations + +import os +import sys +import tempfile +import unittest +from typing import TYPE_CHECKING, Any, Dict, Iterable, Iterator, List, Union + +try: + from typing_extensions import NotRequired, TypedDict + + from bson import ObjectId + + class Movie(TypedDict): + name: str + year: int + + class MovieWithId(TypedDict): + _id: ObjectId + name: str + year: int + + class ImplicitMovie(TypedDict): + _id: NotRequired[ObjectId] # pyright: ignore[reportGeneralTypeIssues] + name: str + year: int + +except ImportError: + Movie = dict # type:ignore[misc,assignment] + ImplicitMovie = dict # type: ignore[assignment,misc] + MovieWithId = dict # type: ignore[assignment,misc] + TypedDict = None + NotRequired = None # type: ignore[assignment] + + +try: + from mypy import api +except ImportError: + api = None # type: ignore[assignment] + +sys.path[0:0] = [""] + +from test import IntegrationTest, client_context +from test.utils import rs_or_single_client + +from bson import CodecOptions, decode, decode_all, decode_file_iter, decode_iter, encode +from bson.raw_bson import RawBSONDocument +from bson.son import SON +from pymongo import ASCENDING, MongoClient +from pymongo.collection import Collection +from pymongo.operations import DeleteOne, InsertOne, ReplaceOne +from pymongo.read_preferences import ReadPreference + +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "mypy_fails") + + +def get_tests() -> Iterable[str]: + for dirpath, _, filenames in os.walk(TEST_PATH): + for filename in filenames: + yield os.path.join(dirpath, filename) + + +def only_type_check(func): + def inner(*args, **kwargs): + if not TYPE_CHECKING: + raise unittest.SkipTest("Used for Type Checking Only") + func(*args, **kwargs) + + return inner + + +class TestMypyFails(unittest.TestCase): + def ensure_mypy_fails(self, filename: str) -> None: + if api is None: + raise unittest.SkipTest("Mypy is not installed") + stdout, stderr, exit_status = api.run([filename]) + self.assertTrue(exit_status, msg=stdout) + + def test_mypy_failures(self) -> None: + for filename in get_tests(): + if filename == "typeddict_client.py" and TypedDict is None: + continue + with self.subTest(filename=filename): + self.ensure_mypy_fails(filename) + + +class TestPymongo(IntegrationTest): + coll: Collection + + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.coll = cls.client.test.test + + def test_insert_find(self) -> None: + doc = {"my": "doc"} + coll2 = self.client.test.test2 + result = self.coll.insert_one(doc) + self.assertEqual(result.inserted_id, doc["_id"]) + retrieved = self.coll.find_one({"_id": doc["_id"]}) + if retrieved: + # Documents returned from find are mutable. + retrieved["new_field"] = 1 + result2 = coll2.insert_one(retrieved) + self.assertEqual(result2.inserted_id, result.inserted_id) + + def test_cursor_iterable(self) -> None: + def to_list(iterable: Iterable[Dict[str, Any]]) -> List[Dict[str, Any]]: + return list(iterable) + + self.coll.insert_one({}) + cursor = self.coll.find() + docs = to_list(cursor) + self.assertTrue(docs) + + @only_type_check + def test_bulk_write(self) -> None: + self.coll.insert_one({}) + coll: Collection[Movie] = self.coll + requests: List[InsertOne[Movie]] = [InsertOne(Movie(name="American Graffiti", year=1973))] + self.assertTrue(coll.bulk_write(requests).acknowledged) + new_requests: List[Union[InsertOne[Movie], ReplaceOne[Movie]]] = [] + input_list: List[Union[InsertOne[Movie], ReplaceOne[Movie]]] = [ + InsertOne(Movie(name="American Graffiti", year=1973)), + ReplaceOne({}, Movie(name="American Graffiti", year=1973)), + ] + for i in input_list: + new_requests.append(i) + self.assertTrue(coll.bulk_write(new_requests).acknowledged) + + # Because ReplaceOne is not generic, type checking is not enforced for ReplaceOne in the first example. + @only_type_check + def test_bulk_write_heterogeneous(self): + coll: Collection[Movie] = self.coll + requests: List[Union[InsertOne[Movie], ReplaceOne, DeleteOne]] = [ + InsertOne(Movie(name="American Graffiti", year=1973)), + ReplaceOne({}, {"name": "American Graffiti", "year": "WRONG_TYPE"}), + DeleteOne({}), + ] + self.assertTrue(coll.bulk_write(requests).acknowledged) + requests_two: List[Union[InsertOne[Movie], ReplaceOne[Movie], DeleteOne]] = [ + InsertOne(Movie(name="American Graffiti", year=1973)), + ReplaceOne( + {}, + {"name": "American Graffiti", "year": "WRONG_TYPE"}, # type:ignore[typeddict-item] + ), + DeleteOne({}), + ] + self.assertTrue(coll.bulk_write(requests_two).acknowledged) + + def test_command(self) -> None: + result: Dict = self.client.admin.command("ping") + result.items() + + def test_list_collections(self) -> None: + cursor = self.client.test.list_collections() + value = cursor.next() + value.items() + + def test_list_databases(self) -> None: + cursor = self.client.list_databases() + value = cursor.next() + value.items() + + def test_default_document_type(self) -> None: + client = rs_or_single_client() + self.addCleanup(client.close) + coll = client.test.test + doc = {"my": "doc"} + coll.insert_one(doc) + retrieved = coll.find_one({"_id": doc["_id"]}) + assert retrieved is not None + retrieved["a"] = 1 + + def test_aggregate_pipeline(self) -> None: + coll3 = self.client.test.test3 + coll3.insert_many( + [ + {"x": 1, "tags": ["dog", "cat"]}, + {"x": 2, "tags": ["cat"]}, + {"x": 2, "tags": ["mouse", "cat", "dog"]}, + {"x": 3, "tags": []}, + ] + ) + + class mydict(Dict[str, Any]): + pass + + result = coll3.aggregate( + [ + mydict({"$unwind": "$tags"}), + {"$group": {"_id": "$tags", "count": {"$sum": 1}}}, + {"$sort": SON([("count", -1), ("_id", -1)])}, + ] + ) + self.assertTrue(len(list(result))) + + def test_with_transaction(self) -> None: + def execute_transaction(session): + pass + + with self.client.start_session() as session: + return session.with_transaction( + execute_transaction, read_preference=ReadPreference.PRIMARY + ) + + +class TestDecode(unittest.TestCase): + def test_bson_decode(self) -> None: + doc = {"_id": 1} + bsonbytes = encode(doc) + rt_document: Dict[str, Any] = decode(bsonbytes) + assert rt_document["_id"] == 1 + rt_document["foo"] = "bar" + + class MyDict(Dict[str, Any]): + def foo(self): + return "bar" + + codec_options = CodecOptions(document_class=MyDict) + bsonbytes2 = encode(doc, codec_options=codec_options) + rt_document2 = decode(bsonbytes2, codec_options=codec_options) + assert rt_document2.foo() == "bar" + + codec_options2 = CodecOptions(document_class=RawBSONDocument) + encode(doc, codec_options=codec_options2) + rt_document3 = decode(bsonbytes2, codec_options=codec_options2) + assert rt_document3.raw + + def test_bson_decode_no_codec_option(self) -> None: + doc = decode_all(encode({"a": 1})) + assert doc + doc[0]["a"] = 2 + + def test_bson_decode_all(self) -> None: + doc = {"_id": 1} + bsonbytes = encode(doc) + bsonbytes += encode(doc) + rt_documents: List[Dict[str, Any]] = decode_all(bsonbytes) + assert rt_documents[0]["_id"] == 1 + rt_documents[0]["foo"] = "bar" + + class MyDict(Dict[str, Any]): + def foo(self): + return "bar" + + codec_options2 = CodecOptions(MyDict) + bsonbytes2 = encode(doc, codec_options=codec_options2) + bsonbytes2 += encode(doc, codec_options=codec_options2) + rt_documents2 = decode_all(bsonbytes2, codec_options2) + assert rt_documents2[0].foo() == "bar" + + codec_options3 = CodecOptions(RawBSONDocument) + bsonbytes3 = encode(doc, codec_options=codec_options3) + bsonbytes3 += encode(doc, codec_options=codec_options3) + rt_documents3 = decode_all(bsonbytes3, codec_options3) + assert rt_documents3[0].raw + + def test_bson_decode_all_no_codec_option(self) -> None: + docs = decode_all(b"") + docs.append({"new": 1}) + + docs = decode_all(encode({"a": 1})) + assert docs + docs[0]["a"] = 2 + docs.append({"new": 1}) + + def test_bson_decode_iter(self) -> None: + doc = {"_id": 1} + bsonbytes = encode(doc) + bsonbytes += encode(doc) + rt_documents: Iterator[Dict[str, Any]] = decode_iter(bsonbytes) + assert next(rt_documents)["_id"] == 1 + next(rt_documents)["foo"] = "bar" + + class MyDict(Dict[str, Any]): + def foo(self): + return "bar" + + codec_options2 = CodecOptions(MyDict) + bsonbytes2 = encode(doc, codec_options=codec_options2) + bsonbytes2 += encode(doc, codec_options=codec_options2) + rt_documents2 = decode_iter(bsonbytes2, codec_options2) + assert next(rt_documents2).foo() == "bar" + + codec_options3 = CodecOptions(RawBSONDocument) + bsonbytes3 = encode(doc, codec_options=codec_options3) + bsonbytes3 += encode(doc, codec_options=codec_options3) + rt_documents3 = decode_iter(bsonbytes3, codec_options3) + assert next(rt_documents3).raw + + def test_bson_decode_iter_no_codec_option(self) -> None: + doc = next(decode_iter(encode({"a": 1}))) + assert doc + doc["a"] = 2 + + def make_tempfile(self, content: bytes) -> Any: + fileobj = tempfile.TemporaryFile() + fileobj.write(content) + fileobj.seek(0) + self.addCleanup(fileobj.close) + return fileobj + + def test_bson_decode_file_iter(self) -> None: + doc = {"_id": 1} + bsonbytes = encode(doc) + bsonbytes += encode(doc) + fileobj = self.make_tempfile(bsonbytes) + rt_documents: Iterator[Dict[str, Any]] = decode_file_iter(fileobj) + assert next(rt_documents)["_id"] == 1 + next(rt_documents)["foo"] = "bar" + + class MyDict(Dict[str, Any]): + def foo(self): + return "bar" + + codec_options2 = CodecOptions(MyDict) + bsonbytes2 = encode(doc, codec_options=codec_options2) + bsonbytes2 += encode(doc, codec_options=codec_options2) + fileobj2 = self.make_tempfile(bsonbytes2) + rt_documents2 = decode_file_iter(fileobj2, codec_options2) + assert next(rt_documents2).foo() == "bar" + + codec_options3 = CodecOptions(RawBSONDocument) + bsonbytes3 = encode(doc, codec_options=codec_options3) + bsonbytes3 += encode(doc, codec_options=codec_options3) + fileobj3 = self.make_tempfile(bsonbytes3) + rt_documents3 = decode_file_iter(fileobj3, codec_options3) + assert next(rt_documents3).raw + + def test_bson_decode_file_iter_none_codec_option(self) -> None: + fileobj = self.make_tempfile(encode({"new": 1})) + doc = next(decode_file_iter(fileobj)) + assert doc + doc["a"] = 2 + + +class TestDocumentType(unittest.TestCase): + @only_type_check + def test_default(self) -> None: + client: MongoClient = MongoClient() + coll = client.test.test + retrieved = coll.find_one({"_id": "foo"}) + assert retrieved is not None + retrieved["a"] = 1 + + @only_type_check + def test_explicit_document_type(self) -> None: + client: MongoClient[Dict[str, Any]] = MongoClient() + coll = client.test.test + retrieved = coll.find_one({"_id": "foo"}) + assert retrieved is not None + retrieved["a"] = 1 + + @only_type_check + def test_typeddict_document_type(self) -> None: + client: MongoClient[Movie] = MongoClient() + coll = client.test.test + retrieved = coll.find_one({"_id": "foo"}) + assert retrieved is not None + assert retrieved["year"] == 1 + assert retrieved["name"] == "a" + + @only_type_check + def test_typeddict_document_type_insertion(self) -> None: + client: MongoClient[Movie] = MongoClient() + coll = client.test.test + mov = {"name": "THX-1138", "year": 1971} + movie = Movie(name="THX-1138", year=1971) + coll.insert_one(mov) # type: ignore[arg-type] + coll.insert_one({"name": "THX-1138", "year": 1971}) # This will work because it is in-line. + coll.insert_one(movie) + coll.insert_many([mov]) # type: ignore[list-item] + coll.insert_many([movie]) + bad_mov = {"name": "THX-1138", "year": "WRONG TYPE"} + bad_movie = Movie(name="THX-1138", year="WRONG TYPE") # type: ignore[typeddict-item] + coll.insert_one(bad_mov) # type:ignore[arg-type] + coll.insert_one({"name": "THX-1138", "year": "WRONG TYPE"}) # type: ignore[typeddict-item] + coll.insert_one(bad_movie) + coll.insert_many([bad_mov]) # type: ignore[list-item] + coll.insert_many( + [{"name": "THX-1138", "year": "WRONG TYPE"}] # type: ignore[typeddict-item] + ) + coll.insert_many([bad_movie]) + + @only_type_check + def test_bulk_write_document_type_insertion(self): + client: MongoClient[MovieWithId] = MongoClient() + coll: Collection[MovieWithId] = client.test.test + coll.bulk_write( + [InsertOne(Movie({"name": "THX-1138", "year": 1971}))] # type:ignore[arg-type] + ) + mov_dict = {"_id": ObjectId(), "name": "THX-1138", "year": 1971} + coll.bulk_write( + [InsertOne(mov_dict)] # type:ignore[arg-type] + ) + coll.bulk_write( + [ + InsertOne({"_id": ObjectId(), "name": "THX-1138", "year": 1971}) + ] # No error because it is in-line. + ) + + @only_type_check + def test_bulk_write_document_type_replacement(self): + client: MongoClient[MovieWithId] = MongoClient() + coll: Collection[MovieWithId] = client.test.test + coll.bulk_write( + [ReplaceOne({}, Movie({"name": "THX-1138", "year": 1971}))] # type:ignore[arg-type] + ) + mov_dict = {"_id": ObjectId(), "name": "THX-1138", "year": 1971} + coll.bulk_write( + [ReplaceOne({}, mov_dict)] # type:ignore[arg-type] + ) + coll.bulk_write( + [ + ReplaceOne({}, {"_id": ObjectId(), "name": "THX-1138", "year": 1971}) + ] # No error because it is in-line. + ) + + @only_type_check + def test_typeddict_explicit_document_type(self) -> None: + out = MovieWithId(_id=ObjectId(), name="THX-1138", year=1971) + assert out is not None + # This should fail because the output is a Movie. + assert out["foo"] # type:ignore[typeddict-item] + assert out["_id"] + + # This should work the same as the test above, but this time using NotRequired to allow + # automatic insertion of the _id field by insert_one. + @only_type_check + def test_typeddict_not_required_document_type(self) -> None: + out = ImplicitMovie(name="THX-1138", year=1971) + assert out is not None + # This should fail because the output is a Movie. + assert out["foo"] # type:ignore[typeddict-item] + # pyright gives reportTypedDictNotRequiredAccess for the following: + assert out["_id"] # type:ignore + + @only_type_check + def test_typeddict_empty_document_type(self) -> None: + out = Movie(name="THX-1138", year=1971) + assert out is not None + # This should fail because the output is a Movie. + assert out["foo"] # type:ignore[typeddict-item] + # This should fail because _id is not included in our TypedDict definition. + assert out["_id"] # type:ignore[typeddict-item] + + @client_context.require_connection + def test_typeddict_find_notrequired(self): + if NotRequired is None or ImplicitMovie is None: + raise unittest.SkipTest("Python 3.11+ is required to use NotRequired.") + client: MongoClient[ImplicitMovie] = rs_or_single_client() + coll = client.test.test + coll.insert_one(ImplicitMovie(name="THX-1138", year=1971)) + out = coll.find_one({}) + assert out is not None + # pyright gives reportTypedDictNotRequiredAccess for the following: + assert out["_id"] # type:ignore + + @only_type_check + def test_raw_bson_document_type(self) -> None: + client = MongoClient(document_class=RawBSONDocument) + coll = client.test.test + retrieved = coll.find_one({"_id": "foo"}) + assert retrieved is not None + assert len(retrieved.raw) > 0 + + @only_type_check + def test_son_document_type(self) -> None: + client = MongoClient(document_class=SON[str, Any]) + coll = client.test.test + retrieved = coll.find_one({"_id": "foo"}) + assert retrieved is not None + retrieved["a"] = 1 + + def test_son_document_type_runtime(self) -> None: + MongoClient(document_class=SON[str, Any], connect=False) + + @only_type_check + def test_create_index(self) -> None: + client: MongoClient[Dict[str, str]] = MongoClient("test") + db = client.test + with client.start_session() as session: + index = db.test.create_index([("user_id", ASCENDING)], unique=True, session=session) + assert isinstance(index, str) + + +class TestCommandDocumentType(unittest.TestCase): + @only_type_check + def test_default(self) -> None: + client: MongoClient = MongoClient() + result: Dict = client.admin.command("ping") + result["a"] = 1 + + @only_type_check + def test_explicit_document_type(self) -> None: + client: MongoClient = MongoClient() + codec_options: CodecOptions[Dict[str, Any]] = CodecOptions() + result = client.admin.command("ping", codec_options=codec_options) + result["a"] = 1 + + @only_type_check + def test_typeddict_document_type(self) -> None: + client: MongoClient = MongoClient() + codec_options: CodecOptions[Movie] = CodecOptions() + result = client.admin.command("ping", codec_options=codec_options) + assert result["year"] == 1 + assert result["name"] == "a" + + @only_type_check + def test_raw_bson_document_type(self) -> None: + client: MongoClient = MongoClient() + codec_options = CodecOptions(RawBSONDocument) + result = client.admin.command("ping", codec_options=codec_options) + assert len(result.raw) > 0 + + @only_type_check + def test_son_document_type(self) -> None: + client = MongoClient(document_class=SON[str, Any]) + codec_options = CodecOptions(SON[str, Any]) + result = client.admin.command("ping", codec_options=codec_options) + result["a"] = 1 + + +class TestCodecOptionsDocumentType(unittest.TestCase): + def test_default(self) -> None: + options: CodecOptions = CodecOptions() + obj = options.document_class() + obj["a"] = 1 + + def test_explicit_document_type(self) -> None: + options: CodecOptions[Dict[str, Any]] = CodecOptions() + obj = options.document_class() + obj["a"] = 1 + + def test_typeddict_document_type(self) -> None: + options: CodecOptions[Movie] = CodecOptions() + # Suppress: Cannot instantiate type "Type[Movie]". + obj = options.document_class(name="a", year=1) # type: ignore[misc] + assert obj["year"] == 1 + assert obj["name"] == "a" + + def test_raw_bson_document_type(self) -> None: + options = CodecOptions(RawBSONDocument) + doc_bson = b"\x10\x00\x00\x00\x11a\x00\xff\xff\xff\xff\xff\xff\xff\xff\x00" + obj = options.document_class(doc_bson) + assert len(obj.raw) > 0 + + def test_son_document_type(self) -> None: + options = CodecOptions(SON[str, Any]) + obj = options.document_class() + obj["a"] = 1 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_typing_strict.py b/test/test_typing_strict.py new file mode 100644 index 0000000000..4b03b2bfdf --- /dev/null +++ b/test/test_typing_strict.py @@ -0,0 +1,40 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test typings in strict mode.""" +from __future__ import annotations + +import unittest +from typing import TYPE_CHECKING, Any, Dict + +import pymongo +from pymongo.collection import Collection +from pymongo.database import Database + + +def test_generic_arguments() -> None: + """Ensure known usages of generic arguments pass strict typing""" + if not TYPE_CHECKING: + raise unittest.SkipTest("Used for Type Checking Only") + mongo_client: pymongo.MongoClient[Dict[str, Any]] = pymongo.MongoClient() + mongo_client.drop_database("foo") + mongo_client.get_default_database() + db = mongo_client.get_database("test_db") + db = Database(mongo_client, "test_db") + db.with_options() + db.validate_collection("py_test") + col = db.get_collection("py_test") + col.insert_one({"abc": 123}) + col = Collection(db, "py_test") + col.with_options() diff --git a/test/test_unified_format.py b/test/test_unified_format.py new file mode 100644 index 0000000000..bc6dbcc5c2 --- /dev/null +++ b/test/test_unified_format.py @@ -0,0 +1,91 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import os +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.unified_format import MatchEvaluatorUtil, generate_test_classes + +from bson import ObjectId + +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "unified-test-format") + + +globals().update( + generate_test_classes( + os.path.join(_TEST_PATH, "valid-pass"), + module=__name__, + class_name_prefix="UnifiedTestFormat", + expected_failures=[ + "Client side error in command starting transaction", # PYTHON-1894 + ], + RUN_ON_SERVERLESS=False, + ) +) + + +globals().update( + generate_test_classes( + os.path.join(_TEST_PATH, "valid-fail"), + module=__name__, + class_name_prefix="UnifiedTestFormat", + bypass_test_generation_errors=True, + expected_failures=[ + ".*", # All tests expected to fail + ], + RUN_ON_SERVERLESS=False, + ) +) + + +class TestMatchEvaluatorUtil(unittest.TestCase): + def setUp(self): + self.match_evaluator = MatchEvaluatorUtil(self) + + def test_unsetOrMatches(self): + spec = {"$$unsetOrMatches": {"y": {"$$unsetOrMatches": 2}}} + for actual in [{}, {"y": 2}, None]: + self.match_evaluator.match_result(spec, actual) + + spec = {"x": {"$$unsetOrMatches": {"y": {"$$unsetOrMatches": 2}}}} + for actual in [{}, {"x": {}}, {"x": {"y": 2}}]: + self.match_evaluator.match_result(spec, actual) + + spec = {"y": {"$$unsetOrMatches": {"$$exists": True}}} + self.match_evaluator.match_result(spec, {}) + self.match_evaluator.match_result(spec, {"y": 2}) + self.match_evaluator.match_result(spec, {"x": 1}) + self.match_evaluator.match_result(spec, {"y": {}}) + + def test_type(self): + self.match_evaluator.match_result( + { + "operationType": "insert", + "ns": {"db": "change-stream-tests", "coll": "test"}, + "fullDocument": {"_id": {"$$type": "objectId"}, "x": 1}, + }, + { + "operationType": "insert", + "fullDocument": {"_id": ObjectId("5fc93511ac93941052098f0c"), "x": 1}, + "ns": {"db": "change-stream-tests", "coll": "test"}, + }, + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_uri_parser.py b/test/test_uri_parser.py index d218e2be7f..e1e59eb651 100644 --- a/test/test_uri_parser.py +++ b/test/test_uri_parser.py @@ -13,486 +13,506 @@ # limitations under the License. """Test the pymongo uri_parser module.""" +from __future__ import annotations import copy import sys import warnings +from urllib.parse import quote_plus sys.path[0:0] = [""] -from pymongo.uri_parser import (parse_userinfo, - split_hosts, - split_options, - parse_uri) -from pymongo.errors import ConfigurationError, InvalidURI -from pymongo.ssl_support import ssl -from pymongo import ReadPreference -from bson.binary import JAVA_LEGACY -from bson.py3compat import string_type, _unicode from test import unittest +from bson.binary import JAVA_LEGACY +from pymongo import ReadPreference +from pymongo.errors import ConfigurationError, InvalidURI +from pymongo.uri_parser import parse_uri, parse_userinfo, split_hosts, split_options -class TestURI(unittest.TestCase): +class TestURI(unittest.TestCase): def test_validate_userinfo(self): - self.assertRaises(InvalidURI, parse_userinfo, - 'foo@') - self.assertRaises(InvalidURI, parse_userinfo, - ':password') - self.assertRaises(InvalidURI, parse_userinfo, - 'fo::o:p@ssword') - self.assertRaises(InvalidURI, parse_userinfo, ':') - self.assertTrue(parse_userinfo('user:password')) - self.assertEqual(('us:r', 'p@ssword'), - parse_userinfo('us%3Ar:p%40ssword')) - self.assertEqual(('us er', 'p ssword'), - parse_userinfo('us+er:p+ssword')) - self.assertEqual(('us er', 'p ssword'), - parse_userinfo('us%20er:p%20ssword')) - self.assertEqual(('us+er', 'p+ssword'), - parse_userinfo('us%2Ber:p%2Bssword')) - self.assertEqual(('dev1@FOO.COM', ''), - parse_userinfo('dev1%40FOO.COM')) - self.assertEqual(('dev1@FOO.COM', ''), - parse_userinfo('dev1%40FOO.COM:')) + self.assertRaises(InvalidURI, parse_userinfo, "foo@") + self.assertRaises(InvalidURI, parse_userinfo, ":password") + self.assertRaises(InvalidURI, parse_userinfo, "fo::o:p@ssword") + self.assertRaises(InvalidURI, parse_userinfo, ":") + self.assertTrue(parse_userinfo("user:password")) + self.assertEqual(("us:r", "p@ssword"), parse_userinfo("us%3Ar:p%40ssword")) + self.assertEqual(("us er", "p ssword"), parse_userinfo("us+er:p+ssword")) + self.assertEqual(("us er", "p ssword"), parse_userinfo("us%20er:p%20ssword")) + self.assertEqual(("us+er", "p+ssword"), parse_userinfo("us%2Ber:p%2Bssword")) + self.assertEqual(("dev1@FOO.COM", ""), parse_userinfo("dev1%40FOO.COM")) + self.assertEqual(("dev1@FOO.COM", ""), parse_userinfo("dev1%40FOO.COM:")) def test_split_hosts(self): - self.assertRaises(ConfigurationError, split_hosts, - 'localhost:27017,') - self.assertRaises(ConfigurationError, split_hosts, - ',localhost:27017') - self.assertRaises(ConfigurationError, split_hosts, - 'localhost:27017,,localhost:27018') - self.assertEqual([('localhost', 27017), ('example.com', 27017)], - split_hosts('localhost,example.com')) - self.assertEqual([('localhost', 27018), ('example.com', 27019)], - split_hosts('localhost:27018,example.com:27019')) - self.assertEqual([('/tmp/mongodb-27017.sock', None)], - split_hosts('/tmp/mongodb-27017.sock')) - self.assertEqual([('/tmp/mongodb-27017.sock', None), - ('example.com', 27017)], - split_hosts('/tmp/mongodb-27017.sock,' - 'example.com:27017')) - self.assertEqual([('example.com', 27017), - ('/tmp/mongodb-27017.sock', None)], - split_hosts('example.com:27017,' - '/tmp/mongodb-27017.sock')) - self.assertRaises(ValueError, split_hosts, '::1', 27017) - self.assertRaises(ValueError, split_hosts, '[::1:27017') - self.assertRaises(ValueError, split_hosts, '::1') - self.assertRaises(ValueError, split_hosts, '::1]:27017') - self.assertEqual([('::1', 27017)], split_hosts('[::1]:27017')) - self.assertEqual([('::1', 27017)], split_hosts('[::1]')) + self.assertRaises(ConfigurationError, split_hosts, "localhost:27017,") + self.assertRaises(ConfigurationError, split_hosts, ",localhost:27017") + self.assertRaises(ConfigurationError, split_hosts, "localhost:27017,,localhost:27018") + self.assertEqual( + [("localhost", 27017), ("example.com", 27017)], split_hosts("localhost,example.com") + ) + self.assertEqual( + [("localhost", 27018), ("example.com", 27019)], + split_hosts("localhost:27018,example.com:27019"), + ) + self.assertEqual( + [("/tmp/mongodb-27017.sock", None)], split_hosts("/tmp/mongodb-27017.sock") + ) + self.assertEqual( + [("/tmp/mongodb-27017.sock", None), ("example.com", 27017)], + split_hosts("/tmp/mongodb-27017.sock,example.com:27017"), + ) + self.assertEqual( + [("example.com", 27017), ("/tmp/mongodb-27017.sock", None)], + split_hosts("example.com:27017,/tmp/mongodb-27017.sock"), + ) + self.assertRaises(ValueError, split_hosts, "::1", 27017) + self.assertRaises(ValueError, split_hosts, "[::1:27017") + self.assertRaises(ValueError, split_hosts, "::1") + self.assertRaises(ValueError, split_hosts, "::1]:27017") + self.assertEqual([("::1", 27017)], split_hosts("[::1]:27017")) + self.assertEqual([("::1", 27017)], split_hosts("[::1]")) def test_split_options(self): - self.assertRaises(ConfigurationError, split_options, 'foo') - self.assertRaises(ConfigurationError, split_options, 'foo=bar;foo') - self.assertTrue(split_options('ssl=true')) - self.assertTrue(split_options('connect=true')) - self.assertTrue(split_options('ssl_match_hostname=true')) + self.assertRaises(ConfigurationError, split_options, "foo") + self.assertRaises(ConfigurationError, split_options, "foo=bar;foo") + self.assertTrue(split_options("ssl=true")) + self.assertTrue(split_options("connect=true")) + self.assertTrue(split_options("tlsAllowInvalidHostnames=false")) # Test Invalid URI options that should throw warnings. with warnings.catch_warnings(): - warnings.filterwarnings('error') - self.assertRaises(Warning, split_options, - 'foo=bar', warn=True) - self.assertRaises(Warning, split_options, - 'socketTimeoutMS=foo', warn=True) - self.assertRaises(Warning, split_options, - 'socketTimeoutMS=0.0', warn=True) - self.assertRaises(Warning, split_options, - 'connectTimeoutMS=foo', warn=True) - self.assertRaises(Warning, split_options, - 'connectTimeoutMS=0.0', warn=True) - self.assertRaises(Warning, split_options, - 'connectTimeoutMS=1e100000', warn=True) - self.assertRaises(Warning, split_options, - 'connectTimeoutMS=-1e100000', warn=True) - self.assertRaises(Warning, split_options, - 'ssl=foo', warn=True) - self.assertRaises(Warning, split_options, - 'connect=foo', warn=True) - self.assertRaises(Warning, split_options, - 'ssl_match_hostname=foo', warn=True) - self.assertRaises(Warning, split_options, - 'connectTimeoutMS=inf', warn=True) - self.assertRaises(Warning, split_options, - 'connectTimeoutMS=-inf', warn=True) - self.assertRaises(Warning, split_options, 'wtimeoutms=foo', - warn=True) - self.assertRaises(Warning, split_options, 'wtimeoutms=5.5', - warn=True) - self.assertRaises(Warning, split_options, 'fsync=foo', - warn=True) - self.assertRaises(Warning, split_options, 'fsync=5.5', - warn=True) - self.assertRaises(Warning, - split_options, 'authMechanism=foo', - warn=True) + warnings.filterwarnings("error") + self.assertRaises(Warning, split_options, "foo=bar", warn=True) + self.assertRaises(Warning, split_options, "socketTimeoutMS=foo", warn=True) + self.assertRaises(Warning, split_options, "socketTimeoutMS=0.0", warn=True) + self.assertRaises(Warning, split_options, "connectTimeoutMS=foo", warn=True) + self.assertRaises(Warning, split_options, "connectTimeoutMS=0.0", warn=True) + self.assertRaises(Warning, split_options, "connectTimeoutMS=1e100000", warn=True) + self.assertRaises(Warning, split_options, "connectTimeoutMS=-1e100000", warn=True) + self.assertRaises(Warning, split_options, "ssl=foo", warn=True) + self.assertRaises(Warning, split_options, "connect=foo", warn=True) + self.assertRaises(Warning, split_options, "tlsAllowInvalidHostnames=foo", warn=True) + self.assertRaises(Warning, split_options, "connectTimeoutMS=inf", warn=True) + self.assertRaises(Warning, split_options, "connectTimeoutMS=-inf", warn=True) + self.assertRaises(Warning, split_options, "wtimeoutms=foo", warn=True) + self.assertRaises(Warning, split_options, "wtimeoutms=5.5", warn=True) + self.assertRaises(Warning, split_options, "fsync=foo", warn=True) + self.assertRaises(Warning, split_options, "fsync=5.5", warn=True) + self.assertRaises(Warning, split_options, "authMechanism=foo", warn=True) # Test invalid options with warn=False. - self.assertRaises(ConfigurationError, split_options, 'foo=bar') - self.assertRaises(ValueError, split_options, 'socketTimeoutMS=foo') - self.assertRaises(ValueError, split_options, 'socketTimeoutMS=0.0') - self.assertRaises(ValueError, split_options, 'connectTimeoutMS=foo') - self.assertRaises(ValueError, split_options, 'connectTimeoutMS=0.0') - self.assertRaises(ValueError, split_options, - 'connectTimeoutMS=1e100000') - self.assertRaises(ValueError, split_options, - 'connectTimeoutMS=-1e100000') - self.assertRaises(ValueError, split_options, 'ssl=foo') - self.assertRaises(ValueError, split_options, 'connect=foo') - self.assertRaises(ValueError, split_options, 'ssl_match_hostname=foo') - self.assertRaises(ValueError, split_options, 'connectTimeoutMS=inf') - self.assertRaises(ValueError, split_options, 'connectTimeoutMS=-inf') - self.assertRaises(ValueError, split_options, 'wtimeoutms=foo') - self.assertRaises(ValueError, split_options, 'wtimeoutms=5.5') - self.assertRaises(ValueError, split_options, 'fsync=foo') - self.assertRaises(ValueError, split_options, 'fsync=5.5') - self.assertRaises(ValueError, - split_options, 'authMechanism=foo') + self.assertRaises(ConfigurationError, split_options, "foo=bar") + self.assertRaises(ValueError, split_options, "socketTimeoutMS=foo") + self.assertRaises(ValueError, split_options, "socketTimeoutMS=0.0") + self.assertRaises(ValueError, split_options, "connectTimeoutMS=foo") + self.assertRaises(ValueError, split_options, "connectTimeoutMS=0.0") + self.assertRaises(ValueError, split_options, "connectTimeoutMS=1e100000") + self.assertRaises(ValueError, split_options, "connectTimeoutMS=-1e100000") + self.assertRaises(ValueError, split_options, "ssl=foo") + self.assertRaises(ValueError, split_options, "connect=foo") + self.assertRaises(ValueError, split_options, "tlsAllowInvalidHostnames=foo") + self.assertRaises(ValueError, split_options, "connectTimeoutMS=inf") + self.assertRaises(ValueError, split_options, "connectTimeoutMS=-inf") + self.assertRaises(ValueError, split_options, "wtimeoutms=foo") + self.assertRaises(ValueError, split_options, "wtimeoutms=5.5") + self.assertRaises(ValueError, split_options, "fsync=foo") + self.assertRaises(ValueError, split_options, "fsync=5.5") + self.assertRaises(ValueError, split_options, "authMechanism=foo") # Test splitting options works when valid. - self.assertTrue(split_options('socketTimeoutMS=300')) - self.assertTrue(split_options('connectTimeoutMS=300')) - self.assertEqual({'sockettimeoutms': 0.3}, - split_options('socketTimeoutMS=300')) - self.assertEqual({'sockettimeoutms': 0.0001}, - split_options('socketTimeoutMS=0.1')) - self.assertEqual({'connecttimeoutms': 0.3}, - split_options('connectTimeoutMS=300')) - self.assertEqual({'connecttimeoutms': 0.0001}, - split_options('connectTimeoutMS=0.1')) - self.assertTrue(split_options('connectTimeoutMS=300')) - self.assertTrue(isinstance(split_options('w=5')['w'], int)) - self.assertTrue(isinstance(split_options('w=5.5')['w'], string_type)) - self.assertTrue(split_options('w=foo')) - self.assertTrue(split_options('w=majority')) - self.assertTrue(split_options('wtimeoutms=500')) - self.assertEqual({'fsync': True}, split_options('fsync=true')) - self.assertEqual({'fsync': False}, split_options('fsync=false')) - self.assertEqual({'authmechanism': 'GSSAPI'}, - split_options('authMechanism=GSSAPI')) - self.assertEqual({'authmechanism': 'MONGODB-CR'}, - split_options('authMechanism=MONGODB-CR')) - self.assertEqual({'authmechanism': 'SCRAM-SHA-1'}, - split_options('authMechanism=SCRAM-SHA-1')) - self.assertEqual({'authsource': 'foobar'}, - split_options('authSource=foobar')) - self.assertEqual({'maxpoolsize': 50}, split_options('maxpoolsize=50')) + self.assertTrue(split_options("socketTimeoutMS=300")) + self.assertTrue(split_options("connectTimeoutMS=300")) + self.assertEqual({"sockettimeoutms": 0.3}, split_options("socketTimeoutMS=300")) + self.assertEqual({"sockettimeoutms": 0.0001}, split_options("socketTimeoutMS=0.1")) + self.assertEqual({"connecttimeoutms": 0.3}, split_options("connectTimeoutMS=300")) + self.assertEqual({"connecttimeoutms": 0.0001}, split_options("connectTimeoutMS=0.1")) + self.assertTrue(split_options("connectTimeoutMS=300")) + self.assertTrue(isinstance(split_options("w=5")["w"], int)) + self.assertTrue(isinstance(split_options("w=5.5")["w"], str)) + self.assertTrue(split_options("w=foo")) + self.assertTrue(split_options("w=majority")) + self.assertTrue(split_options("wtimeoutms=500")) + self.assertEqual({"fsync": True}, split_options("fsync=true")) + self.assertEqual({"fsync": False}, split_options("fsync=false")) + self.assertEqual({"authmechanism": "GSSAPI"}, split_options("authMechanism=GSSAPI")) + self.assertEqual({"authmechanism": "MONGODB-CR"}, split_options("authMechanism=MONGODB-CR")) + self.assertEqual( + {"authmechanism": "SCRAM-SHA-1"}, split_options("authMechanism=SCRAM-SHA-1") + ) + self.assertEqual({"authsource": "foobar"}, split_options("authSource=foobar")) + self.assertEqual({"maxpoolsize": 50}, split_options("maxpoolsize=50")) def test_parse_uri(self): self.assertRaises(InvalidURI, parse_uri, "http://foobar.com") self.assertRaises(InvalidURI, parse_uri, "http://foo@foobar.com") - self.assertRaises(ValueError, - parse_uri, "mongodb://::1", 27017) - - orig = { - 'nodelist': [("localhost", 27017)], - 'username': None, - 'password': None, - 'database': None, - 'collection': None, - 'options': {}, - 'fqdn': None + self.assertRaises(ValueError, parse_uri, "mongodb://::1", 27017) + + # Extra whitespace should be visible in error message. + with self.assertRaisesRegex(ValueError, "'27017 '"): + parse_uri("mongodb://localhost:27017 ") + + orig: dict = { + "nodelist": [("localhost", 27017)], + "username": None, + "password": None, + "database": None, + "collection": None, + "options": {}, + "fqdn": None, } - res = copy.deepcopy(orig) + res: dict = copy.deepcopy(orig) self.assertEqual(res, parse_uri("mongodb://localhost")) - res.update({'username': 'fred', 'password': 'foobar'}) + res.update({"username": "fred", "password": "foobar"}) self.assertEqual(res, parse_uri("mongodb://fred:foobar@localhost")) - res.update({'database': 'baz'}) + res.update({"database": "baz"}) self.assertEqual(res, parse_uri("mongodb://fred:foobar@localhost/baz")) res = copy.deepcopy(orig) - res['nodelist'] = [("example1.com", 27017), ("example2.com", 27017)] - self.assertEqual(res, - parse_uri("mongodb://example1.com:27017," - "example2.com:27017")) + res["nodelist"] = [("example1.com", 27017), ("example2.com", 27017)] + self.assertEqual(res, parse_uri("mongodb://example1.com:27017,example2.com:27017")) res = copy.deepcopy(orig) - res['nodelist'] = [("localhost", 27017), - ("localhost", 27018), - ("localhost", 27019)] - self.assertEqual(res, - parse_uri("mongodb://localhost," - "localhost:27018,localhost:27019")) + res["nodelist"] = [("localhost", 27017), ("localhost", 27018), ("localhost", 27019)] + self.assertEqual(res, parse_uri("mongodb://localhost,localhost:27018,localhost:27019")) res = copy.deepcopy(orig) - res['database'] = 'foo' + res["database"] = "foo" self.assertEqual(res, parse_uri("mongodb://localhost/foo")) res = copy.deepcopy(orig) self.assertEqual(res, parse_uri("mongodb://localhost/")) - res.update({'database': 'test', 'collection': 'yield_historical.in'}) - self.assertEqual(res, parse_uri("mongodb://" - "localhost/test.yield_historical.in")) + res.update({"database": "test", "collection": "yield_historical.in"}) + self.assertEqual(res, parse_uri("mongodb://localhost/test.yield_historical.in")) - res.update({'username': 'fred', 'password': 'foobar'}) - self.assertEqual(res, - parse_uri("mongodb://fred:foobar@localhost/" - "test.yield_historical.in")) + res.update({"username": "fred", "password": "foobar"}) + self.assertEqual(res, parse_uri("mongodb://fred:foobar@localhost/test.yield_historical.in")) res = copy.deepcopy(orig) - res['nodelist'] = [("example1.com", 27017), ("example2.com", 27017)] - res.update({'database': 'test', 'collection': 'yield_historical.in'}) - self.assertEqual(res, - parse_uri("mongodb://example1.com:27017,example2.com" - ":27017/test.yield_historical.in")) + res["nodelist"] = [("example1.com", 27017), ("example2.com", 27017)] + res.update({"database": "test", "collection": "yield_historical.in"}) + self.assertEqual( + res, + parse_uri("mongodb://example1.com:27017,example2.com:27017/test.yield_historical.in"), + ) # Test socket path without escaped characters. - self.assertRaises(InvalidURI, parse_uri, - "mongodb:///tmp/mongodb-27017.sock") + self.assertRaises(InvalidURI, parse_uri, "mongodb:///tmp/mongodb-27017.sock") # Test with escaped characters. res = copy.deepcopy(orig) - res['nodelist'] = [("example2.com", 27017), - ("/tmp/mongodb-27017.sock", None)] - self.assertEqual(res, - parse_uri("mongodb://example2.com," - "%2Ftmp%2Fmongodb-27017.sock")) + res["nodelist"] = [("example2.com", 27017), ("/tmp/mongodb-27017.sock", None)] + self.assertEqual(res, parse_uri("mongodb://example2.com,%2Ftmp%2Fmongodb-27017.sock")) res = copy.deepcopy(orig) - res['nodelist'] = [("shoe.sock.pants.co.uk", 27017), - ("/tmp/mongodb-27017.sock", None)] - res['database'] = "nethers_db" - self.assertEqual(res, - parse_uri("mongodb://shoe.sock.pants.co.uk," - "%2Ftmp%2Fmongodb-27017.sock/nethers_db")) + res["nodelist"] = [("shoe.sock.pants.co.uk", 27017), ("/tmp/mongodb-27017.sock", None)] + res["database"] = "nethers_db" + self.assertEqual( + res, + parse_uri("mongodb://shoe.sock.pants.co.uk,%2Ftmp%2Fmongodb-27017.sock/nethers_db"), + ) res = copy.deepcopy(orig) - res['nodelist'] = [("/tmp/mongodb-27017.sock", None), - ("example2.com", 27017)] - res.update({'database': 'test', 'collection': 'yield_historical.in'}) - self.assertEqual(res, - parse_uri("mongodb://%2Ftmp%2Fmongodb-27017.sock," - "example2.com:27017" - "/test.yield_historical.in")) + res["nodelist"] = [("/tmp/mongodb-27017.sock", None), ("example2.com", 27017)] + res.update({"database": "test", "collection": "yield_historical.in"}) + self.assertEqual( + res, + parse_uri( + "mongodb://%2Ftmp%2Fmongodb-27017.sock," + "example2.com:27017" + "/test.yield_historical.in" + ), + ) res = copy.deepcopy(orig) - res['nodelist'] = [("/tmp/mongodb-27017.sock", None), - ("example2.com", 27017)] - res.update({'database': 'test', 'collection': 'yield_historical.sock'}) - self.assertEqual(res, - parse_uri("mongodb://%2Ftmp%2Fmongodb-27017.sock," - "example2.com:27017/test.yield_historical" - ".sock")) + res["nodelist"] = [("/tmp/mongodb-27017.sock", None), ("example2.com", 27017)] + res.update({"database": "test", "collection": "yield_historical.sock"}) + self.assertEqual( + res, + parse_uri( + "mongodb://%2Ftmp%2Fmongodb-27017.sock," + "example2.com:27017/test.yield_historical" + ".sock" + ), + ) res = copy.deepcopy(orig) - res['nodelist'] = [("example2.com", 27017)] - res.update({'database': 'test', 'collection': 'yield_historical.sock'}) - self.assertEqual(res, - parse_uri("mongodb://example2.com:27017" - "/test.yield_historical.sock")) + res["nodelist"] = [("example2.com", 27017)] + res.update({"database": "test", "collection": "yield_historical.sock"}) + self.assertEqual(res, parse_uri("mongodb://example2.com:27017/test.yield_historical.sock")) res = copy.deepcopy(orig) - res['nodelist'] = [("/tmp/mongodb-27017.sock", None)] - res.update({'database': 'test', 'collection': 'mongodb-27017.sock'}) - self.assertEqual(res, - parse_uri("mongodb://%2Ftmp%2Fmongodb-27017.sock" - "/test.mongodb-27017.sock")) + res["nodelist"] = [("/tmp/mongodb-27017.sock", None)] + res.update({"database": "test", "collection": "mongodb-27017.sock"}) + self.assertEqual( + res, parse_uri("mongodb://%2Ftmp%2Fmongodb-27017.sock/test.mongodb-27017.sock") + ) res = copy.deepcopy(orig) - res['nodelist'] = [('/tmp/mongodb-27020.sock', None), - ("::1", 27017), - ("2001:0db8:85a3:0000:0000:8a2e:0370:7334", 27018), - ("192.168.0.212", 27019), - ("localhost", 27018)] - self.assertEqual(res, parse_uri("mongodb://%2Ftmp%2Fmongodb-27020.sock" - ",[::1]:27017,[2001:0db8:" - "85a3:0000:0000:8a2e:0370:7334]," - "192.168.0.212:27019,localhost", - 27018)) + res["nodelist"] = [ + ("/tmp/mongodb-27020.sock", None), + ("::1", 27017), + ("2001:0db8:85a3:0000:0000:8a2e:0370:7334", 27018), + ("192.168.0.212", 27019), + ("localhost", 27018), + ] + self.assertEqual( + res, + parse_uri( + "mongodb://%2Ftmp%2Fmongodb-27020.sock" + ",[::1]:27017,[2001:0db8:" + "85a3:0000:0000:8a2e:0370:7334]," + "192.168.0.212:27019,localhost", + 27018, + ), + ) res = copy.deepcopy(orig) - res.update({'username': 'fred', 'password': 'foobar'}) - res.update({'database': 'test', 'collection': 'yield_historical.in'}) - self.assertEqual(res, - parse_uri("mongodb://fred:foobar@localhost/" - "test.yield_historical.in")) + res.update({"username": "fred", "password": "foobar"}) + res.update({"database": "test", "collection": "yield_historical.in"}) + self.assertEqual(res, parse_uri("mongodb://fred:foobar@localhost/test.yield_historical.in")) res = copy.deepcopy(orig) - res['database'] = 'test' - res['collection'] = 'name/with "delimiters' - self.assertEqual( - res, parse_uri("mongodb://localhost/test.name/with \"delimiters")) + res["database"] = "test" + res["collection"] = 'name/with "delimiters' + self.assertEqual(res, parse_uri('mongodb://localhost/test.name/with "delimiters')) res = copy.deepcopy(orig) - res['options'] = { - 'readpreference': ReadPreference.SECONDARY.mongos_mode - } - self.assertEqual(res, parse_uri( - "mongodb://localhost/?readPreference=secondary")) + res["options"] = {"readpreference": ReadPreference.SECONDARY.mongos_mode} + self.assertEqual(res, parse_uri("mongodb://localhost/?readPreference=secondary")) # Various authentication tests res = copy.deepcopy(orig) - res['options'] = {'authmechanism': 'MONGODB-CR'} - res['username'] = 'user' - res['password'] = 'password' - self.assertEqual(res, - parse_uri("mongodb://user:password@localhost/" - "?authMechanism=MONGODB-CR")) + res["options"] = {"authmechanism": "MONGODB-CR"} + res["username"] = "user" + res["password"] = "password" + self.assertEqual( + res, parse_uri("mongodb://user:password@localhost/?authMechanism=MONGODB-CR") + ) res = copy.deepcopy(orig) - res['options'] = {'authmechanism': 'MONGODB-CR', 'authsource': 'bar'} - res['username'] = 'user' - res['password'] = 'password' - res['database'] = 'foo' - self.assertEqual(res, - parse_uri("mongodb://user:password@localhost/foo" - "?authSource=bar;authMechanism=MONGODB-CR")) + res["options"] = {"authmechanism": "MONGODB-CR", "authsource": "bar"} + res["username"] = "user" + res["password"] = "password" + res["database"] = "foo" + self.assertEqual( + res, + parse_uri( + "mongodb://user:password@localhost/foo?authSource=bar;authMechanism=MONGODB-CR" + ), + ) res = copy.deepcopy(orig) - res['options'] = {'authmechanism': 'MONGODB-CR'} - res['username'] = 'user' - res['password'] = '' - self.assertEqual(res, - parse_uri("mongodb://user:@localhost/" - "?authMechanism=MONGODB-CR")) + res["options"] = {"authmechanism": "MONGODB-CR"} + res["username"] = "user" + res["password"] = "" + self.assertEqual(res, parse_uri("mongodb://user:@localhost/?authMechanism=MONGODB-CR")) res = copy.deepcopy(orig) - res['username'] = 'user@domain.com' - res['password'] = 'password' - res['database'] = 'foo' - self.assertEqual(res, - parse_uri("mongodb://user%40domain.com:password" - "@localhost/foo")) + res["username"] = "user@domain.com" + res["password"] = "password" + res["database"] = "foo" + self.assertEqual(res, parse_uri("mongodb://user%40domain.com:password@localhost/foo")) res = copy.deepcopy(orig) - res['options'] = {'authmechanism': 'GSSAPI'} - res['username'] = 'user@domain.com' - res['password'] = 'password' - res['database'] = 'foo' - self.assertEqual(res, - parse_uri("mongodb://user%40domain.com:password" - "@localhost/foo?authMechanism=GSSAPI")) + res["options"] = {"authmechanism": "GSSAPI"} + res["username"] = "user@domain.com" + res["password"] = "password" + res["database"] = "foo" + self.assertEqual( + res, + parse_uri("mongodb://user%40domain.com:password@localhost/foo?authMechanism=GSSAPI"), + ) res = copy.deepcopy(orig) - res['options'] = {'authmechanism': 'GSSAPI'} - res['username'] = 'user@domain.com' - res['password'] = '' - res['database'] = 'foo' - self.assertEqual(res, - parse_uri("mongodb://user%40domain.com" - "@localhost/foo?authMechanism=GSSAPI")) + res["options"] = {"authmechanism": "GSSAPI"} + res["username"] = "user@domain.com" + res["password"] = "" + res["database"] = "foo" + self.assertEqual( + res, parse_uri("mongodb://user%40domain.com@localhost/foo?authMechanism=GSSAPI") + ) res = copy.deepcopy(orig) - res['options'] = { - 'readpreference': ReadPreference.SECONDARY.mongos_mode, - 'readpreferencetags': [ - {'dc': 'west', 'use': 'website'}, - {'dc': 'east', 'use': 'website'} - ] + res["options"] = { + "readpreference": ReadPreference.SECONDARY.mongos_mode, + "readpreferencetags": [ + {"dc": "west", "use": "website"}, + {"dc": "east", "use": "website"}, + ], } - res['username'] = 'user@domain.com' - res['password'] = 'password' - res['database'] = 'foo' - self.assertEqual(res, - parse_uri("mongodb://user%40domain.com:password" - "@localhost/foo?readpreference=secondary&" - "readpreferencetags=dc:west,use:website&" - "readpreferencetags=dc:east,use:website")) + res["username"] = "user@domain.com" + res["password"] = "password" + res["database"] = "foo" + self.assertEqual( + res, + parse_uri( + "mongodb://user%40domain.com:password" + "@localhost/foo?readpreference=secondary&" + "readpreferencetags=dc:west,use:website&" + "readpreferencetags=dc:east,use:website" + ), + ) res = copy.deepcopy(orig) - res['options'] = { - 'readpreference': ReadPreference.SECONDARY.mongos_mode, - 'readpreferencetags': [ - {'dc': 'west', 'use': 'website'}, - {'dc': 'east', 'use': 'website'}, - {} - ] + res["options"] = { + "readpreference": ReadPreference.SECONDARY.mongos_mode, + "readpreferencetags": [ + {"dc": "west", "use": "website"}, + {"dc": "east", "use": "website"}, + {}, + ], } - res['username'] = 'user@domain.com' - res['password'] = 'password' - res['database'] = 'foo' - self.assertEqual(res, - parse_uri("mongodb://user%40domain.com:password" - "@localhost/foo?readpreference=secondary&" - "readpreferencetags=dc:west,use:website&" - "readpreferencetags=dc:east,use:website&" - "readpreferencetags=")) + res["username"] = "user@domain.com" + res["password"] = "password" + res["database"] = "foo" + self.assertEqual( + res, + parse_uri( + "mongodb://user%40domain.com:password" + "@localhost/foo?readpreference=secondary&" + "readpreferencetags=dc:west,use:website&" + "readpreferencetags=dc:east,use:website&" + "readpreferencetags=" + ), + ) res = copy.deepcopy(orig) - res['options'] = {'uuidrepresentation': JAVA_LEGACY} - res['username'] = 'user@domain.com' - res['password'] = 'password' - res['database'] = 'foo' - self.assertEqual(res, - parse_uri("mongodb://user%40domain.com:password" - "@localhost/foo?uuidrepresentation=" - "javaLegacy")) + res["options"] = {"uuidrepresentation": JAVA_LEGACY} + res["username"] = "user@domain.com" + res["password"] = "password" + res["database"] = "foo" + self.assertEqual( + res, + parse_uri( + "mongodb://user%40domain.com:password" + "@localhost/foo?uuidrepresentation=" + "javaLegacy" + ), + ) with warnings.catch_warnings(): - warnings.filterwarnings('error') - self.assertRaises(Warning, parse_uri, - "mongodb://user%40domain.com:password" - "@localhost/foo?uuidrepresentation=notAnOption", - warn=True) - self.assertRaises(ValueError, parse_uri, - "mongodb://user%40domain.com:password" - "@localhost/foo?uuidrepresentation=notAnOption") + warnings.filterwarnings("error") + self.assertRaises( + Warning, + parse_uri, + "mongodb://user%40domain.com:password" + "@localhost/foo?uuidrepresentation=notAnOption", + warn=True, + ) + self.assertRaises( + ValueError, + parse_uri, + "mongodb://user%40domain.com:password@localhost/foo?uuidrepresentation=notAnOption", + ) def test_parse_ssl_paths(self): # Turn off "validate" since these paths don't exist on filesystem. self.assertEqual( - {'collection': None, - 'database': None, - 'nodelist': [('/MongoDB.sock', None)], - 'options': {'ssl_certfile': '/a/b'}, - 'password': 'foo/bar', - 'username': 'jesse', - 'fqdn': None}, + { + "collection": None, + "database": None, + "nodelist": [("/MongoDB.sock", None)], + "options": {"tlsCertificateKeyFile": "/a/b"}, + "password": "foo/bar", + "username": "jesse", + "fqdn": None, + }, parse_uri( - 'mongodb://jesse:foo%2Fbar@%2FMongoDB.sock/?ssl_certfile=/a/b', - validate=False)) + "mongodb://jesse:foo%2Fbar@%2FMongoDB.sock/?tlsCertificateKeyFile=/a/b", + validate=False, + ), + ) self.assertEqual( - {'collection': None, - 'database': None, - 'nodelist': [('/MongoDB.sock', None)], - 'options': {'ssl_certfile': 'a/b'}, - 'password': 'foo/bar', - 'username': 'jesse', - 'fqdn': None}, + { + "collection": None, + "database": None, + "nodelist": [("/MongoDB.sock", None)], + "options": {"tlsCertificateKeyFile": "a/b"}, + "password": "foo/bar", + "username": "jesse", + "fqdn": None, + }, parse_uri( - 'mongodb://jesse:foo%2Fbar@%2FMongoDB.sock/?ssl_certfile=a/b', - validate=False)) + "mongodb://jesse:foo%2Fbar@%2FMongoDB.sock/?tlsCertificateKeyFile=a/b", + validate=False, + ), + ) def test_tlsinsecure_simple(self): # check that tlsInsecure is expanded correctly. + self.maxDiff = None uri = "mongodb://example.com/?tlsInsecure=true" res = { - "ssl_match_hostname": False, "ssl_cert_reqs": ssl.CERT_NONE, - "tlsinsecure": True} + "tlsAllowInvalidHostnames": True, + "tlsAllowInvalidCertificates": True, + "tlsInsecure": True, + "tlsDisableOCSPEndpointCheck": True, + } self.assertEqual(res, parse_uri(uri)["options"]) - def test_tlsinsecure_legacy_conflict(self): - # must not allow use of tlsinsecure alongside legacy TLS options. - # same check for modern TLS options is performed in the spec-tests. - uri = "mongodb://srv.com/?tlsInsecure=true&ssl_match_hostname=true" - with self.assertRaises(InvalidURI): - parse_uri(uri, validate=False, warn=False, normalize=False) - def test_normalize_options(self): # check that options are converted to their internal names correctly. - uri = ("mongodb://example.com/?tls=true&appname=myapp&maxPoolSize=10&" - "fsync=true&wtimeout=10") - res = { - "ssl": True, "appname": "myapp", "maxpoolsize": 10, - "fsync": True, "wtimeoutms": 10} + uri = "mongodb://example.com/?ssl=true&appname=myapp" + res = {"tls": True, "appname": "myapp"} self.assertEqual(res, parse_uri(uri)["options"]) - def test_waitQueueMultiple_deprecated(self): - uri = "mongodb://example.com/?waitQueueMultiple=5" - with warnings.catch_warnings(record=True) as ctx: - warnings.simplefilter('always') + def test_unquote_after_parsing(self): + quoted_val = "val%21%40%23%24%25%5E%26%2A%28%29_%2B%2C%3A+etc" + unquoted_val = "val!@#$%^&*()_+,: etc" + uri = ( + "mongodb://user:password@localhost/?authMechanism=MONGODB-AWS" + "&authMechanismProperties=AWS_SESSION_TOKEN:" + quoted_val + ) + res = parse_uri(uri) + options = { + "authmechanism": "MONGODB-AWS", + "authmechanismproperties": {"AWS_SESSION_TOKEN": unquoted_val}, + } + self.assertEqual(options, res["options"]) + + uri = ( + "mongodb://localhost/foo?readpreference=secondary&" + "readpreferencetags=dc:west," + quoted_val + ":" + quoted_val + "&" + "readpreferencetags=dc:east,use:" + quoted_val + ) + res = parse_uri(uri) + options = { + "readpreference": ReadPreference.SECONDARY.mongos_mode, + "readpreferencetags": [ + {"dc": "west", unquoted_val: unquoted_val}, + {"dc": "east", "use": unquoted_val}, + ], + } + self.assertEqual(options, res["options"]) + + def test_redact_AWS_SESSION_TOKEN(self): + unquoted_colon = "token:" + uri = ( + "mongodb://user:password@localhost/?authMechanism=MONGODB-AWS" + "&authMechanismProperties=AWS_SESSION_TOKEN:" + unquoted_colon + ) + with self.assertRaisesRegex( + ValueError, + "auth mechanism properties must be key:value pairs like " + "SERVICE_NAME:mongodb, not AWS_SESSION_TOKEN:" + ", did you forget to percent-escape the token with " + "quote_plus?", + ): parse_uri(uri) - self.assertEqual(len(ctx), 1) - self.assertTrue(issubclass(ctx[0].category, DeprecationWarning)) + def test_special_chars(self): + user = "user@ /9+:?~!$&'()*+,;=" + pwd = "pwd@ /9+:?~!$&'()*+,;=" + uri = f"mongodb://{quote_plus(user)}:{quote_plus(pwd)}@localhost" + res = parse_uri(uri) + self.assertEqual(user, res["username"]) + self.assertEqual(pwd, res["password"]) if __name__ == "__main__": diff --git a/test/test_uri_spec.py b/test/test_uri_spec.py index 9bc7cb178a..ad48fe787c 100644 --- a/test/test_uri_spec.py +++ b/test/test_uri_spec.py @@ -13,7 +13,9 @@ # limitations under the License. """Test that the pymongo.uri_parser module is compliant with the connection -string and uri options specifications.""" +string and uri options specifications. +""" +from __future__ import annotations import json import os @@ -22,22 +24,49 @@ sys.path[0:0] = [""] -from pymongo.common import INTERNAL_URI_OPTION_NAME_MAP, validate -from pymongo.compression_support import _HAVE_SNAPPY -from pymongo.uri_parser import parse_uri from test import clear_warning_registry, unittest +from pymongo.common import INTERNAL_URI_OPTION_NAME_MAP, validate +from pymongo.compression_support import _HAVE_SNAPPY +from pymongo.srv_resolver import _HAVE_DNSPYTHON +from pymongo.uri_parser import SRV_SCHEME, parse_uri CONN_STRING_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), - os.path.join('connection_string', 'test')) + os.path.dirname(os.path.realpath(__file__)), os.path.join("connection_string", "test") +) -URI_OPTIONS_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'uri_options') +URI_OPTIONS_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "uri_options") TEST_DESC_SKIP_LIST = [ "Valid options specific to single-threaded drivers are parsed correctly", - "Invalid serverSelectionTryOnce causes a warning"] + "Invalid serverSelectionTryOnce causes a warning", + "tlsDisableCertificateRevocationCheck can be set to true", + "tlsDisableCertificateRevocationCheck can be set to false", + "tlsAllowInvalidCertificates and tlsDisableCertificateRevocationCheck both present (and true) raises an error", + "tlsAllowInvalidCertificates=true and tlsDisableCertificateRevocationCheck=false raises an error", + "tlsAllowInvalidCertificates=false and tlsDisableCertificateRevocationCheck=true raises an error", + "tlsAllowInvalidCertificates and tlsDisableCertificateRevocationCheck both present (and false) raises an error", + "tlsDisableCertificateRevocationCheck and tlsAllowInvalidCertificates both present (and true) raises an error", + "tlsDisableCertificateRevocationCheck=true and tlsAllowInvalidCertificates=false raises an error", + "tlsDisableCertificateRevocationCheck=false and tlsAllowInvalidCertificates=true raises an error", + "tlsDisableCertificateRevocationCheck and tlsAllowInvalidCertificates both present (and false) raises an error", + "tlsInsecure and tlsDisableCertificateRevocationCheck both present (and true) raises an error", + "tlsInsecure=true and tlsDisableCertificateRevocationCheck=false raises an error", + "tlsInsecure=false and tlsDisableCertificateRevocationCheck=true raises an error", + "tlsInsecure and tlsDisableCertificateRevocationCheck both present (and false) raises an error", + "tlsDisableCertificateRevocationCheck and tlsInsecure both present (and true) raises an error", + "tlsDisableCertificateRevocationCheck=true and tlsInsecure=false raises an error", + "tlsDisableCertificateRevocationCheck=false and tlsInsecure=true raises an error", + "tlsDisableCertificateRevocationCheck and tlsInsecure both present (and false) raises an error", + "tlsDisableCertificateRevocationCheck and tlsDisableOCSPEndpointCheck both present (and true) raises an error", + "tlsDisableCertificateRevocationCheck=true and tlsDisableOCSPEndpointCheck=false raises an error", + "tlsDisableCertificateRevocationCheck=false and tlsDisableOCSPEndpointCheck=true raises an error", + "tlsDisableCertificateRevocationCheck and tlsDisableOCSPEndpointCheck both present (and false) raises an error", + "tlsDisableOCSPEndpointCheck and tlsDisableCertificateRevocationCheck both present (and true) raises an error", + "tlsDisableOCSPEndpointCheck=true and tlsDisableCertificateRevocationCheck=false raises an error", + "tlsDisableOCSPEndpointCheck=false and tlsDisableCertificateRevocationCheck=true raises an error", + "tlsDisableOCSPEndpointCheck and tlsDisableCertificateRevocationCheck both present (and false) raises an error", +] class TestAllScenarios(unittest.TestCase): @@ -45,9 +74,8 @@ def setUp(self): clear_warning_registry() -def get_error_message_template(expected, artefact): - return "%s %s for test '%s'" % ( - "Expected" if expected else "Unexpected", artefact, "%s") +def get_error_message_template(expected, artifact): + return "{} {} for test '{}'".format("Expected" if expected else "Unexpected", artifact, "%s") def run_scenario_in_dir(target_workdir): @@ -57,87 +85,111 @@ def modified_test_scenario(*args, **kwargs): os.chdir(target_workdir) func(*args, **kwargs) os.chdir(original_workdir) + return modified_test_scenario + return workdir_context_decorator def create_test(test, test_workdir): def run_scenario(self): - compressors = (test.get('options') or {}).get('compressors', []) - if 'snappy' in compressors and not _HAVE_SNAPPY: - self.skipTest('This test needs the snappy module.') - + compressors = (test.get("options") or {}).get("compressors", []) + if "snappy" in compressors and not _HAVE_SNAPPY: + self.skipTest("This test needs the snappy module.") + if test["uri"].startswith(SRV_SCHEME) and not _HAVE_DNSPYTHON: + self.skipTest("This test needs dnspython package.") valid = True warning = False + expected_warning = test.get("warning", False) + expected_valid = test.get("valid", True) with warnings.catch_warnings(record=True) as ctx: - warnings.simplefilter('always') + warnings.simplefilter("ignore", category=ResourceWarning) try: - options = parse_uri(test['uri'], warn=True) + options = parse_uri(test["uri"], warn=True) except Exception: valid = False else: warning = len(ctx) > 0 + if expected_valid and warning and not expected_warning: + raise ValueError("Got unexpected warning(s): ", [str(i) for i in ctx]) - expected_valid = test.get('valid', True) self.assertEqual( - valid, expected_valid, get_error_message_template( - not expected_valid, "error") % test['description']) + valid, + expected_valid, + get_error_message_template(not expected_valid, "error") % test["description"], + ) if expected_valid: - expected_warning = test.get('warning', False) self.assertEqual( - warning, expected_warning, get_error_message_template( - expected_warning, "warning") % test['description']) + warning, + expected_warning, + get_error_message_template(expected_warning, "warning") % test["description"], + ) # Compare hosts and port. - if test['hosts'] is not None: + if test["hosts"] is not None: self.assertEqual( - len(test['hosts']), len(options['nodelist']), - "Incorrect number of hosts parsed from URI") - - for exp, actual in zip(test['hosts'], - options['nodelist']): - self.assertEqual(exp['host'], actual[0], - "Expected host %s but got %s" - % (exp['host'], actual[0])) - if exp['port'] is not None: - self.assertEqual(exp['port'], actual[1], - "Expected port %s but got %s" - % (exp['port'], actual)) + len(test["hosts"]), + len(options["nodelist"]), + "Incorrect number of hosts parsed from URI", + ) + + for exp, actual in zip(test["hosts"], options["nodelist"]): + self.assertEqual( + exp["host"], + actual[0], + "Expected host {} but got {}".format(exp["host"], actual[0]), + ) + if exp["port"] is not None: + self.assertEqual( + exp["port"], + actual[1], + "Expected port {} but got {}".format(exp["port"], actual), + ) # Compare auth options. - auth = test['auth'] + auth = test["auth"] if auth is not None: - auth['database'] = auth.pop('db') # db == database + auth["database"] = auth.pop("db") # db == database # Special case for PyMongo's collection parsing. - if options.get('collection') is not None: - options['database'] += "." + options['collection'] + if options.get("collection") is not None: + options["database"] += "." + options["collection"] for elm in auth: if auth[elm] is not None: - self.assertEqual(auth[elm], options[elm], - "Expected %s but got %s" - % (auth[elm], options[elm])) + # We have to do this because while the spec requires + # "+"->"+", unquote_plus does "+"->" " + options[elm] = options[elm].replace(" ", "+") + self.assertEqual( + auth[elm], + options[elm], + f"Expected {auth[elm]} but got {options[elm]}", + ) # Compare URI options. err_msg = "For option %s expected %s but got %s" - if test['options']: - opts = options['options'] - for opt in test['options']: + if test["options"]: + opts = options["options"] + for opt in test["options"]: lopt = opt.lower() optname = INTERNAL_URI_OPTION_NAME_MAP.get(lopt, lopt) if opts.get(optname) is not None: - if opts[optname] == test['options'][opt]: - expected_value = test['options'][opt] + if opts[optname] == test["options"][opt]: + expected_value = test["options"][opt] else: - expected_value = validate( - lopt, test['options'][opt])[1] + expected_value = validate(lopt, test["options"][opt])[1] self.assertEqual( - opts[optname], expected_value, - err_msg % (opt, expected_value, opts[optname],)) + opts[optname], + expected_value, + err_msg + % ( + opt, + expected_value, + opts[optname], + ), + ) else: - self.fail( - "Missing expected option %s" % (opt,)) + self.fail(f"Missing expected option {opt}") return run_scenario_in_dir(test_workdir)(run_scenario) @@ -145,26 +197,29 @@ def run_scenario(self): def create_tests(test_path): for dirpath, _, filenames in os.walk(test_path): dirname = os.path.split(dirpath) - dirname = os.path.split(dirname[-2])[-1] + '_' + dirname[-1] + dirname = os.path.split(dirname[-2])[-1] + "_" + dirname[-1] for filename in filenames: - if not filename.endswith('.json'): + if not filename.endswith(".json"): # skip everything that is not a test specification continue - with open(os.path.join(dirpath, filename)) as scenario_stream: + json_path = os.path.join(dirpath, filename) + with open(json_path, encoding="utf-8") as scenario_stream: scenario_def = json.load(scenario_stream) - for testcase in scenario_def['tests']: - dsc = testcase['description'] + for testcase in scenario_def["tests"]: + dsc = testcase["description"] if dsc in TEST_DESC_SKIP_LIST: print("Skipping test '%s'" % dsc) continue testmethod = create_test(testcase, dirpath) - testname = 'test_%s_%s_%s' % ( - dirname, os.path.splitext(filename)[0], - str(dsc).replace(' ', '_')) + testname = "test_{}_{}_{}".format( + dirname, + os.path.splitext(filename)[0], + str(dsc).replace(" ", "_"), + ) testmethod.__name__ = testname setattr(TestAllScenarios, testmethod.__name__, testmethod) diff --git a/test/test_versioned_api.py b/test/test_versioned_api.py new file mode 100644 index 0000000000..cb25c3f66b --- /dev/null +++ b/test/test_versioned_api.py @@ -0,0 +1,108 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import os +import sys + +sys.path[0:0] = [""] + +from test import IntegrationTest, client_context, unittest +from test.unified_format import generate_test_classes +from test.utils import OvertCommandListener, rs_or_single_client + +from pymongo.mongo_client import MongoClient +from pymongo.server_api import ServerApi, ServerApiVersion + +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "versioned-api") + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + + +class TestServerApi(IntegrationTest): + RUN_ON_LOAD_BALANCER = True + RUN_ON_SERVERLESS = True + + def test_server_api_defaults(self): + api = ServerApi(ServerApiVersion.V1) + self.assertEqual(api.version, "1") + self.assertIsNone(api.strict) + self.assertIsNone(api.deprecation_errors) + + def test_server_api_explicit_false(self): + api = ServerApi("1", strict=False, deprecation_errors=False) + self.assertEqual(api.version, "1") + self.assertFalse(api.strict) + self.assertFalse(api.deprecation_errors) + + def test_server_api_strict(self): + api = ServerApi("1", strict=True, deprecation_errors=True) + self.assertEqual(api.version, "1") + self.assertTrue(api.strict) + self.assertTrue(api.deprecation_errors) + + def test_server_api_validation(self): + with self.assertRaises(ValueError): + ServerApi("2") + with self.assertRaises(TypeError): + ServerApi("1", strict="not-a-bool") # type: ignore[arg-type] + with self.assertRaises(TypeError): + ServerApi("1", deprecation_errors="not-a-bool") # type: ignore[arg-type] + with self.assertRaises(TypeError): + MongoClient(server_api="not-a-ServerApi") + + def assertServerApi(self, event): + self.assertIn("apiVersion", event.command) + self.assertEqual(event.command["apiVersion"], "1") + + def assertNoServerApi(self, event): + self.assertNotIn("apiVersion", event.command) + + def assertServerApiInAllCommands(self, events): + for event in events: + self.assertServerApi(event) + + @client_context.require_version_min(4, 7) + def test_command_options(self): + listener = OvertCommandListener() + client = rs_or_single_client(server_api=ServerApi("1"), event_listeners=[listener]) + self.addCleanup(client.close) + coll = client.test.test + coll.insert_many([{} for _ in range(100)]) + self.addCleanup(coll.delete_many, {}) + list(coll.find(batch_size=25)) + client.admin.command("ping") + self.assertServerApiInAllCommands(listener.started_events) + + @client_context.require_version_min(4, 7) + @client_context.require_transactions + def test_command_options_txn(self): + listener = OvertCommandListener() + client = rs_or_single_client(server_api=ServerApi("1"), event_listeners=[listener]) + self.addCleanup(client.close) + coll = client.test.test + coll.insert_many([{} for _ in range(100)]) + self.addCleanup(coll.delete_many, {}) + + listener.reset() + with client.start_session() as s, s.start_transaction(): + coll.insert_many([{} for _ in range(100)], session=s) + list(coll.find(batch_size=25, session=s)) + client.test.command("find", "test", session=s) + self.assertServerApiInAllCommands(listener.started_events) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_write_concern.py b/test/test_write_concern.py index c6b803618a..e22c7e7a8c 100644 --- a/test/test_write_concern.py +++ b/test/test_write_concern.py @@ -13,14 +13,21 @@ # limitations under the License. """Run the unit tests for WriteConcern.""" +from __future__ import annotations import collections import unittest +from pymongo.errors import ConfigurationError from pymongo.write_concern import WriteConcern class TestWriteConcern(unittest.TestCase): + def test_invalid(self): + # Can't use fsync and j options together + self.assertRaises(ConfigurationError, WriteConcern, j=True, fsync=True) + # Can't use w=0 and j options together + self.assertRaises(ConfigurationError, WriteConcern, w=0, j=True) def test_equality(self): concern = WriteConcern(j=True, wtimeout=3000) @@ -31,12 +38,10 @@ def test_equality_to_none(self): concern = WriteConcern() self.assertNotEqual(concern, None) # Explicitly use the != operator. - self.assertTrue(concern != None) # noqa + self.assertTrue(concern != None) # noqa: E711 def test_equality_compatible_type(self): - - class _FakeWriteConcern(object): - + class _FakeWriteConcern: def __init__(self, **document): self.document = document @@ -59,9 +64,9 @@ def __ne__(self, other): self.assertNotEqual(WriteConcern(wtimeout=42), _FakeWriteConcern(wtimeout=2000)) def test_equality_incompatible_type(self): - _fake_type = collections.namedtuple('NotAWriteConcern', ['document']) - self.assertNotEqual(WriteConcern(j=True), _fake_type({'j': True})) + _fake_type = collections.namedtuple("NotAWriteConcern", ["document"]) # type: ignore + self.assertNotEqual(WriteConcern(j=True), _fake_type({"j": True})) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/transactions-convenient-api/callback-retry.json b/test/transactions-convenient-api/callback-retry.json index ed36434452..a0391c1b5d 100644 --- a/test/transactions-convenient-api/callback-retry.json +++ b/test/transactions-convenient-api/callback-retry.json @@ -235,7 +235,8 @@ "errorLabelsOmit": [ "TransientTransactionError", "UnknownTransactionCommitResult" - ] + ], + "errorContains": "E11000" } } ], diff --git a/test/transactions-convenient-api/commit-retry.json b/test/transactions-convenient-api/commit-retry.json index d4b948ce1a..02e38460d0 100644 --- a/test/transactions-convenient-api/commit-retry.json +++ b/test/transactions-convenient-api/commit-retry.json @@ -293,7 +293,7 @@ } }, { - "description": "commit is retried after commitTransaction UnknownTransactionCommitResult (NotMaster)", + "description": "commit is retried after commitTransaction UnknownTransactionCommitResult (NotWritablePrimary)", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -304,6 +304,9 @@ "commitTransaction" ], "errorCode": 10107, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, diff --git a/test/transactions/abort.json b/test/transactions/legacy/abort.json similarity index 99% rename from test/transactions/abort.json rename to test/transactions/legacy/abort.json index 821a15afbe..3729a98298 100644 --- a/test/transactions/abort.json +++ b/test/transactions/legacy/abort.json @@ -458,7 +458,8 @@ "errorLabelsOmit": [ "TransientTransactionError", "UnknownTransactionCommitResult" - ] + ], + "errorContains": "E11000" } }, { diff --git a/test/transactions/bulk.json b/test/transactions/legacy/bulk.json similarity index 97% rename from test/transactions/bulk.json rename to test/transactions/legacy/bulk.json index ea4571c1d5..8a9793b8b3 100644 --- a/test/transactions/bulk.json +++ b/test/transactions/legacy/bulk.json @@ -304,9 +304,7 @@ "$set": { "x": 1 } - }, - "multi": false, - "upsert": false + } }, { "q": { @@ -317,7 +315,6 @@ "x": 2 } }, - "multi": false, "upsert": true } ], @@ -379,9 +376,7 @@ }, "u": { "y": 1 - }, - "multi": false, - "upsert": false + } }, { "q": { @@ -389,9 +384,7 @@ }, "u": { "y": 2 - }, - "multi": false, - "upsert": false + } } ], "ordered": true, @@ -454,8 +447,7 @@ "z": 1 } }, - "multi": true, - "upsert": false + "multi": true } ], "ordered": true, diff --git a/test/transactions/causal-consistency.json b/test/transactions/legacy/causal-consistency.json similarity index 94% rename from test/transactions/causal-consistency.json rename to test/transactions/legacy/causal-consistency.json index f1ca3d83a8..0e81bf2ff2 100644 --- a/test/transactions/causal-consistency.json +++ b/test/transactions/legacy/causal-consistency.json @@ -40,8 +40,7 @@ "$inc": { "count": 1 } - }, - "upsert": false + } }, "result": { "matchedCount": 1, @@ -65,8 +64,7 @@ "$inc": { "count": 1 } - }, - "upsert": false + } }, "result": { "matchedCount": 1, @@ -93,9 +91,7 @@ "$inc": { "count": 1 } - }, - "multi": false, - "upsert": false + } } ], "ordered": true, @@ -123,9 +119,7 @@ "$inc": { "count": 1 } - }, - "multi": false, - "upsert": false + } } ], "ordered": true, @@ -212,8 +206,7 @@ "$inc": { "count": 1 } - }, - "upsert": false + } }, "result": { "matchedCount": 1, @@ -260,9 +253,7 @@ "$inc": { "count": 1 } - }, - "multi": false, - "upsert": false + } } ], "ordered": true, diff --git a/test/transactions/commit.json b/test/transactions/legacy/commit.json similarity index 100% rename from test/transactions/commit.json rename to test/transactions/legacy/commit.json diff --git a/test/transactions/count.json b/test/transactions/legacy/count.json similarity index 100% rename from test/transactions/count.json rename to test/transactions/legacy/count.json diff --git a/test/transactions/legacy/create-collection.json b/test/transactions/legacy/create-collection.json new file mode 100644 index 0000000000..9071c59c41 --- /dev/null +++ b/test/transactions/legacy/create-collection.json @@ -0,0 +1,204 @@ +{ + "runOn": [ + { + "minServerVersion": "4.3.4", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "transaction-tests", + "collection_name": "test", + "data": [], + "tests": [ + { + "description": "explicitly create collection using create command", + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "test" + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "session": "session0", + "collection": "test" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "transaction-tests", + "collection": "test" + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "transaction-tests", + "collection": "test" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "test", + "writeConcern": null + }, + "command_name": "drop", + "database_name": "transaction-tests" + } + }, + { + "command_started_event": { + "command": { + "create": "test", + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": null + }, + "command_name": "create", + "database_name": "transaction-tests" + } + }, + { + "command_started_event": { + "command": { + "commitTransaction": 1, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": null, + "autocommit": false, + "writeConcern": null + }, + "command_name": "commitTransaction", + "database_name": "admin" + } + } + ] + }, + { + "description": "implicitly create collection using insert", + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "test" + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "result": { + "insertedId": 1 + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "transaction-tests", + "collection": "test" + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "transaction-tests", + "collection": "test" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "test", + "writeConcern": null + }, + "command_name": "drop", + "database_name": "transaction-tests" + } + }, + { + "command_started_event": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": null, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": null + }, + "command_name": "insert", + "database_name": "transaction-tests" + } + }, + { + "command_started_event": { + "command": { + "commitTransaction": 1, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": null, + "autocommit": false, + "writeConcern": null + }, + "command_name": "commitTransaction", + "database_name": "admin" + } + } + ] + } + ] +} diff --git a/test/transactions/legacy/create-index.json b/test/transactions/legacy/create-index.json new file mode 100644 index 0000000000..2ff09c9288 --- /dev/null +++ b/test/transactions/legacy/create-index.json @@ -0,0 +1,237 @@ +{ + "runOn": [ + { + "minServerVersion": "4.3.4", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "transaction-tests", + "collection_name": "test", + "data": [], + "tests": [ + { + "description": "create index on a non-existing collection", + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "test" + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "session": "session0", + "name": "t_1", + "keys": { + "x": 1 + } + } + }, + { + "name": "assertIndexNotExists", + "object": "testRunner", + "arguments": { + "database": "transaction-tests", + "collection": "test", + "index": "t_1" + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "transaction-tests", + "collection": "test", + "index": "t_1" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "test", + "writeConcern": null + }, + "command_name": "drop", + "database_name": "transaction-tests" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "test", + "indexes": [ + { + "name": "t_1", + "key": { + "x": 1 + } + } + ], + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": null + }, + "command_name": "createIndexes", + "database_name": "transaction-tests" + } + }, + { + "command_started_event": { + "command": { + "commitTransaction": 1, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": null, + "autocommit": false, + "writeConcern": null + }, + "command_name": "commitTransaction", + "database_name": "admin" + } + } + ] + }, + { + "description": "create index on a collection created within the same transaction", + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "test" + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "session": "session0", + "collection": "test" + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "session": "session0", + "name": "t_1", + "keys": { + "x": 1 + } + } + }, + { + "name": "assertIndexNotExists", + "object": "testRunner", + "arguments": { + "database": "transaction-tests", + "collection": "test", + "index": "t_1" + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "transaction-tests", + "collection": "test", + "index": "t_1" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "test", + "writeConcern": null + }, + "command_name": "drop", + "database_name": "transaction-tests" + } + }, + { + "command_started_event": { + "command": { + "create": "test", + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": null + }, + "command_name": "create", + "database_name": "transaction-tests" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "test", + "indexes": [ + { + "name": "t_1", + "key": { + "x": 1 + } + } + ], + "lsid": "session0", + "writeConcern": null + }, + "command_name": "createIndexes", + "database_name": "transaction-tests" + } + }, + { + "command_started_event": { + "command": { + "commitTransaction": 1, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": null, + "autocommit": false, + "writeConcern": null + }, + "command_name": "commitTransaction", + "database_name": "admin" + } + } + ] + } + ] +} diff --git a/test/transactions/delete.json b/test/transactions/legacy/delete.json similarity index 100% rename from test/transactions/delete.json rename to test/transactions/legacy/delete.json diff --git a/test/transactions/legacy/error-labels-blockConnection.json b/test/transactions/legacy/error-labels-blockConnection.json new file mode 100644 index 0000000000..56b646f7ad --- /dev/null +++ b/test/transactions/legacy/error-labels-blockConnection.json @@ -0,0 +1,159 @@ +{ + "runOn": [ + { + "minServerVersion": "4.2", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "transaction-tests", + "collection_name": "test", + "data": [], + "tests": [ + { + "description": "add RetryableWriteError and UnknownTransactionCommitResult labels to connection errors", + "clientOptions": { + "socketTimeoutMS": 100 + }, + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "blockConnection": true, + "blockTimeMS": 150 + } + }, + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "result": { + "insertedId": 1 + } + }, + { + "name": "commitTransaction", + "object": "session0", + "result": { + "errorLabelsContain": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "name": "commitTransaction", + "object": "session0" + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": null, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": null + }, + "command_name": "insert", + "database_name": "transaction-tests" + } + }, + { + "command_started_event": { + "command": { + "commitTransaction": 1, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": null, + "autocommit": false, + "writeConcern": null + }, + "command_name": "commitTransaction", + "database_name": "admin" + } + }, + { + "command_started_event": { + "command": { + "commitTransaction": 1, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": null, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "command_name": "commitTransaction", + "database_name": "admin" + } + }, + { + "command_started_event": { + "command": { + "commitTransaction": 1, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": null, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "command_name": "commitTransaction", + "database_name": "admin" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + } + ] + } + } + } + ] +} diff --git a/test/transactions/error-labels.json b/test/transactions/legacy/error-labels.json similarity index 91% rename from test/transactions/error-labels.json rename to test/transactions/legacy/error-labels.json index 3e2451ade8..0be19c731c 100644 --- a/test/transactions/error-labels.json +++ b/test/transactions/legacy/error-labels.json @@ -10,7 +10,8 @@ "minServerVersion": "4.1.8", "topology": [ "sharded" - ] + ], + "serverless": "forbid" } ], "database_name": "transaction-tests", @@ -42,7 +43,8 @@ "errorLabelsOmit": [ "TransientTransactionError", "UnknownTransactionCommitResult" - ] + ], + "errorContains": "E11000" } }, { @@ -101,7 +103,7 @@ } }, { - "description": "NotMaster errors contain transient label", + "description": "NotWritablePrimary errors contain transient label", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -133,6 +135,7 @@ "TransientTransactionError" ], "errorLabelsOmit": [ + "RetryableWriteError", "UnknownTransactionCommitResult" ] } @@ -222,6 +225,7 @@ "TransientTransactionError" ], "errorLabelsOmit": [ + "RetryableWriteError", "UnknownTransactionCommitResult" ] } @@ -311,6 +315,7 @@ "TransientTransactionError" ], "errorLabelsOmit": [ + "RetryableWriteError", "UnknownTransactionCommitResult" ] } @@ -407,6 +412,7 @@ "TransientTransactionError" ], "errorLabelsOmit": [ + "RetryableWriteError", "UnknownTransactionCommitResult" ] } @@ -460,7 +466,7 @@ } }, { - "description": "add transient label to connection errors", + "description": "add TransientTransactionError label to connection errors, but do not add RetryableWriteError label", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -495,6 +501,7 @@ "TransientTransactionError" ], "errorLabelsOmit": [ + "RetryableWriteError", "UnknownTransactionCommitResult" ] } @@ -510,6 +517,7 @@ "TransientTransactionError" ], "errorLabelsOmit": [ + "RetryableWriteError", "UnknownTransactionCommitResult" ] } @@ -532,6 +540,7 @@ "TransientTransactionError" ], "errorLabelsOmit": [ + "RetryableWriteError", "UnknownTransactionCommitResult" ] } @@ -548,6 +557,7 @@ "TransientTransactionError" ], "errorLabelsOmit": [ + "RetryableWriteError", "UnknownTransactionCommitResult" ] } @@ -662,7 +672,7 @@ } }, { - "description": "add unknown commit label to connection errors", + "description": "add RetryableWriteError and UnknownTransactionCommitResult labels to connection errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -698,6 +708,7 @@ "object": "session0", "result": { "errorLabelsContain": [ + "RetryableWriteError", "UnknownTransactionCommitResult" ], "errorLabelsOmit": [ @@ -800,7 +811,7 @@ } }, { - "description": "add unknown commit label to retryable commit errors", + "description": "add RetryableWriteError and UnknownTransactionCommitResult labels to retryable commit errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -810,7 +821,10 @@ "failCommands": [ "commitTransaction" ], - "errorCode": 11602 + "errorCode": 11602, + "errorLabels": [ + "RetryableWriteError" + ] } }, "operations": [ @@ -836,6 +850,7 @@ "object": "session0", "result": { "errorLabelsContain": [ + "RetryableWriteError", "UnknownTransactionCommitResult" ], "errorLabelsOmit": [ @@ -938,7 +953,7 @@ } }, { - "description": "add unknown commit label to writeConcernError ShutdownInProgress", + "description": "add RetryableWriteError and UnknownTransactionCommitResult labels to writeConcernError ShutdownInProgress", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -948,6 +963,9 @@ "failCommands": [ "commitTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, "errmsg": "Replication is being shut down" @@ -984,6 +1002,7 @@ "object": "session0", "result": { "errorLabelsContain": [ + "RetryableWriteError", "UnknownTransactionCommitResult" ], "errorLabelsOmit": [ @@ -1088,7 +1107,104 @@ } }, { - "description": "add unknown commit label to writeConcernError WriteConcernFailed", + "description": "do not add RetryableWriteError label to writeConcernError ShutdownInProgress that occurs within transaction", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + }, + "operations": [ + { + "name": "startTransaction", + "object": "session0", + "arguments": { + "options": { + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "result": { + "errorLabelsContain": [], + "errorLabelsOmit": [ + "RetryableWriteError", + "TransientTransactionError", + "UnknownTransactionCommitResult" + ] + } + }, + { + "name": "abortTransaction", + "object": "session0" + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": null, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false + }, + "command_name": "insert", + "database_name": "transaction-tests" + } + }, + { + "command_started_event": { + "command": { + "abortTransaction": 1, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": null, + "autocommit": false + }, + "command_name": "abortTransaction", + "database_name": "admin" + } + } + ], + "outcome": { + "collection": { + "data": [] + } + } + }, + { + "description": "add UnknownTransactionCommitResult label to writeConcernError WriteConcernFailed", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -1137,6 +1253,7 @@ "UnknownTransactionCommitResult" ], "errorLabelsOmit": [ + "RetryableWriteError", "TransientTransactionError" ] } @@ -1219,7 +1336,7 @@ } }, { - "description": "add unknown commit label to writeConcernError WriteConcernFailed with wtimeout", + "description": "add UnknownTransactionCommitResult label to writeConcernError WriteConcernFailed with wtimeout", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -1272,6 +1389,7 @@ "UnknownTransactionCommitResult" ], "errorLabelsOmit": [ + "RetryableWriteError", "TransientTransactionError" ] } @@ -1354,7 +1472,7 @@ } }, { - "description": "omit unknown commit label to writeConcernError UnsatisfiableWriteConcern", + "description": "omit UnknownTransactionCommitResult label from writeConcernError UnsatisfiableWriteConcern", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -1400,6 +1518,7 @@ "object": "session0", "result": { "errorLabelsOmit": [ + "RetryableWriteError", "TransientTransactionError", "UnknownTransactionCommitResult" ] @@ -1460,7 +1579,7 @@ } }, { - "description": "omit unknown commit label to writeConcernError UnknownReplWriteConcern", + "description": "omit UnknownTransactionCommitResult label from writeConcernError UnknownReplWriteConcern", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -1506,6 +1625,7 @@ "object": "session0", "result": { "errorLabelsOmit": [ + "RetryableWriteConcern", "TransientTransactionError", "UnknownTransactionCommitResult" ] @@ -1566,7 +1686,7 @@ } }, { - "description": "do not add unknown commit label to MaxTimeMSExpired inside transactions", + "description": "do not add UnknownTransactionCommitResult label to MaxTimeMSExpired inside transactions", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -1613,6 +1733,7 @@ }, "result": { "errorLabelsOmit": [ + "RetryableWriteError", "UnknownTransactionCommitResult", "TransientTransactionError" ] @@ -1695,7 +1816,7 @@ } }, { - "description": "add unknown commit label to MaxTimeMSExpired", + "description": "add UnknownTransactionCommitResult label to MaxTimeMSExpired", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -1742,6 +1863,7 @@ "UnknownTransactionCommitResult" ], "errorLabelsOmit": [ + "RetryableWriteError", "TransientTransactionError" ] } @@ -1826,7 +1948,7 @@ } }, { - "description": "add unknown commit label to writeConcernError MaxTimeMSExpired", + "description": "add UnknownTransactionCommitResult label to writeConcernError MaxTimeMSExpired", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -1876,6 +1998,7 @@ "UnknownTransactionCommitResult" ], "errorLabelsOmit": [ + "RetryableWriteError", "TransientTransactionError" ] } diff --git a/test/transactions/errors.json b/test/transactions/legacy/errors.json similarity index 100% rename from test/transactions/errors.json rename to test/transactions/legacy/errors.json diff --git a/test/transactions/findOneAndDelete.json b/test/transactions/legacy/findOneAndDelete.json similarity index 100% rename from test/transactions/findOneAndDelete.json rename to test/transactions/legacy/findOneAndDelete.json diff --git a/test/transactions/findOneAndReplace.json b/test/transactions/legacy/findOneAndReplace.json similarity index 100% rename from test/transactions/findOneAndReplace.json rename to test/transactions/legacy/findOneAndReplace.json diff --git a/test/transactions/findOneAndUpdate.json b/test/transactions/legacy/findOneAndUpdate.json similarity index 100% rename from test/transactions/findOneAndUpdate.json rename to test/transactions/legacy/findOneAndUpdate.json diff --git a/test/transactions/insert.json b/test/transactions/legacy/insert.json similarity index 100% rename from test/transactions/insert.json rename to test/transactions/legacy/insert.json diff --git a/test/transactions/isolation.json b/test/transactions/legacy/isolation.json similarity index 100% rename from test/transactions/isolation.json rename to test/transactions/legacy/isolation.json diff --git a/test/transactions/mongos-pin-auto.json b/test/transactions/legacy/mongos-pin-auto.json similarity index 99% rename from test/transactions/mongos-pin-auto.json rename to test/transactions/legacy/mongos-pin-auto.json index f6ede52687..037f212f49 100644 --- a/test/transactions/mongos-pin-auto.json +++ b/test/transactions/legacy/mongos-pin-auto.json @@ -4,7 +4,8 @@ "minServerVersion": "4.1.8", "topology": [ "sharded" - ] + ], + "serverless": "forbid" } ], "database_name": "transaction-tests", diff --git a/test/transactions/mongos-recovery-token.json b/test/transactions/legacy/mongos-recovery-token.json similarity index 98% rename from test/transactions/mongos-recovery-token.json rename to test/transactions/legacy/mongos-recovery-token.json index 50c7349c1e..da4e9861d1 100644 --- a/test/transactions/mongos-recovery-token.json +++ b/test/transactions/legacy/mongos-recovery-token.json @@ -4,7 +4,8 @@ "minServerVersion": "4.1.8", "topology": [ "sharded" - ] + ], + "serverless": "forbid" } ], "database_name": "transaction-tests", @@ -179,6 +180,9 @@ "failCommands": [ "commitTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, "errmsg": "Replication is being shut down" @@ -303,7 +307,8 @@ "data": { "failCommands": [ "commitTransaction", - "isMaster" + "isMaster", + "hello" ], "closeConnection": true } diff --git a/test/transactions/pin-mongos.json b/test/transactions/legacy/pin-mongos.json similarity index 99% rename from test/transactions/pin-mongos.json rename to test/transactions/legacy/pin-mongos.json index 5eb4fc57d9..485a3d9322 100644 --- a/test/transactions/pin-mongos.json +++ b/test/transactions/legacy/pin-mongos.json @@ -4,7 +4,8 @@ "minServerVersion": "4.1.8", "topology": [ "sharded" - ] + ], + "serverless": "forbid" } ], "database_name": "transaction-tests", @@ -875,7 +876,7 @@ "failCommands": [ "commitTransaction" ], - "errorCode": 50 + "errorCode": 51 } } } @@ -887,7 +888,7 @@ "errorLabelsOmit": [ "TransientTransactionError" ], - "errorCode": 50 + "errorCode": 51 } }, { @@ -1106,7 +1107,8 @@ "data": { "failCommands": [ "insert", - "isMaster" + "isMaster", + "hello" ], "closeConnection": true } diff --git a/test/transactions/read-concern.json b/test/transactions/legacy/read-concern.json similarity index 100% rename from test/transactions/read-concern.json rename to test/transactions/legacy/read-concern.json diff --git a/test/transactions/read-pref.json b/test/transactions/legacy/read-pref.json similarity index 100% rename from test/transactions/read-pref.json rename to test/transactions/legacy/read-pref.json diff --git a/test/transactions/reads.json b/test/transactions/legacy/reads.json similarity index 100% rename from test/transactions/reads.json rename to test/transactions/legacy/reads.json diff --git a/test/transactions/legacy/retryable-abort-errorLabels.json b/test/transactions/legacy/retryable-abort-errorLabels.json new file mode 100644 index 0000000000..1110ce2c32 --- /dev/null +++ b/test/transactions/legacy/retryable-abort-errorLabels.json @@ -0,0 +1,204 @@ +{ + "runOn": [ + { + "minServerVersion": "4.3.1", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "transaction-tests", + "collection_name": "test", + "data": [], + "tests": [ + { + "description": "abortTransaction only retries once with RetryableWriteError from server", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "result": { + "insertedId": 1 + } + }, + { + "name": "abortTransaction", + "object": "session0" + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": null, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": null + }, + "command_name": "insert", + "database_name": "transaction-tests" + } + }, + { + "command_started_event": { + "command": { + "abortTransaction": 1, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": null, + "autocommit": false, + "writeConcern": null + }, + "command_name": "abortTransaction", + "database_name": "admin" + } + }, + { + "command_started_event": { + "command": { + "abortTransaction": 1, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": null, + "autocommit": false, + "writeConcern": null + }, + "command_name": "abortTransaction", + "database_name": "admin" + } + } + ], + "outcome": { + "collection": { + "data": [] + } + } + }, + { + "description": "abortTransaction does not retry without RetryableWriteError label", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 11600, + "errorLabels": [] + } + }, + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "result": { + "insertedId": 1 + } + }, + { + "name": "abortTransaction", + "object": "session0" + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": null, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": null + }, + "command_name": "insert", + "database_name": "transaction-tests" + } + }, + { + "command_started_event": { + "command": { + "abortTransaction": 1, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": null, + "autocommit": false, + "writeConcern": null + }, + "command_name": "abortTransaction", + "database_name": "admin" + } + } + ], + "outcome": { + "collection": { + "data": [] + } + } + } + ] +} diff --git a/test/transactions/retryable-abort.json b/test/transactions/legacy/retryable-abort.json similarity index 97% rename from test/transactions/retryable-abort.json rename to test/transactions/legacy/retryable-abort.json index f6b7b0e49a..13cc7c88fb 100644 --- a/test/transactions/retryable-abort.json +++ b/test/transactions/legacy/retryable-abort.json @@ -402,7 +402,7 @@ } }, { - "description": "abortTransaction succeeds after NotMaster", + "description": "abortTransaction succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -413,6 +413,9 @@ "abortTransaction" ], "errorCode": 10107, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -503,7 +506,7 @@ } }, { - "description": "abortTransaction succeeds after NotMasterOrSecondary", + "description": "abortTransaction succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -514,6 +517,9 @@ "abortTransaction" ], "errorCode": 13436, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -604,7 +610,7 @@ } }, { - "description": "abortTransaction succeeds after NotMasterNoSlaveOk", + "description": "abortTransaction succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -615,6 +621,9 @@ "abortTransaction" ], "errorCode": 13435, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -716,6 +725,9 @@ "abortTransaction" ], "errorCode": 11602, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -817,6 +829,9 @@ "abortTransaction" ], "errorCode": 11600, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -918,6 +933,9 @@ "abortTransaction" ], "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -1019,6 +1037,9 @@ "abortTransaction" ], "errorCode": 91, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -1120,6 +1141,9 @@ "abortTransaction" ], "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -1221,6 +1245,9 @@ "abortTransaction" ], "errorCode": 6, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -1322,6 +1349,9 @@ "abortTransaction" ], "errorCode": 9001, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -1423,6 +1453,9 @@ "abortTransaction" ], "errorCode": 89, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -1523,6 +1556,9 @@ "failCommands": [ "abortTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 11600, "errmsg": "Replication is being shut down" @@ -1637,6 +1673,9 @@ "failCommands": [ "abortTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 11602, "errmsg": "Replication is being shut down" @@ -1751,6 +1790,9 @@ "failCommands": [ "abortTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 189, "errmsg": "Replication is being shut down" @@ -1865,6 +1907,9 @@ "failCommands": [ "abortTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, "errmsg": "Replication is being shut down" diff --git a/test/transactions/legacy/retryable-commit-errorLabels.json b/test/transactions/legacy/retryable-commit-errorLabels.json new file mode 100644 index 0000000000..e0818f237b --- /dev/null +++ b/test/transactions/legacy/retryable-commit-errorLabels.json @@ -0,0 +1,223 @@ +{ + "runOn": [ + { + "minServerVersion": "4.3.1", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "transaction-tests", + "collection_name": "test", + "data": [], + "tests": [ + { + "description": "commitTransaction does not retry error without RetryableWriteError label", + "clientOptions": { + "retryWrites": false + }, + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 11600, + "errorLabels": [] + } + }, + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "result": { + "insertedId": 1 + } + }, + { + "name": "commitTransaction", + "object": "session0", + "result": { + "errorLabelsOmit": [ + "RetryableWriteError", + "TransientTransactionError" + ] + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": null, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": null + }, + "command_name": "insert", + "database_name": "transaction-tests" + } + }, + { + "command_started_event": { + "command": { + "commitTransaction": 1, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": null, + "autocommit": false, + "writeConcern": null + }, + "command_name": "commitTransaction", + "database_name": "admin" + } + } + ], + "outcome": { + "collection": { + "data": [] + } + } + }, + { + "description": "commitTransaction retries once with RetryableWriteError from server", + "clientOptions": { + "retryWrites": false + }, + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "result": { + "insertedId": 1 + } + }, + { + "name": "commitTransaction", + "object": "session0" + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": null, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": null + }, + "command_name": "insert", + "database_name": "transaction-tests" + } + }, + { + "command_started_event": { + "command": { + "commitTransaction": 1, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": null, + "autocommit": false, + "writeConcern": null + }, + "command_name": "commitTransaction", + "database_name": "admin" + } + }, + { + "command_started_event": { + "command": { + "commitTransaction": 1, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": null, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "command_name": "commitTransaction", + "database_name": "admin" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + } + ] + } + } + } + ] +} diff --git a/test/transactions/retryable-commit.json b/test/transactions/legacy/retryable-commit.json similarity index 96% rename from test/transactions/retryable-commit.json rename to test/transactions/legacy/retryable-commit.json index b17438b700..49148c62d2 100644 --- a/test/transactions/retryable-commit.json +++ b/test/transactions/legacy/retryable-commit.json @@ -57,6 +57,7 @@ "object": "session0", "result": { "errorLabelsContain": [ + "RetryableWriteError", "UnknownTransactionCommitResult" ], "errorLabelsOmit": [ @@ -207,6 +208,7 @@ "object": "session0", "result": { "errorLabelsContain": [ + "RetryableWriteError", "UnknownTransactionCommitResult" ], "errorLabelsOmit": [ @@ -353,7 +355,9 @@ "result": { "errorCodeName": "Interrupted", "errorLabelsOmit": [ - "TransientTransactionError" + "RetryableWriteError", + "TransientTransactionError", + "UnknownTransactionCommitResult" ] } } @@ -406,7 +410,7 @@ } }, { - "description": "commitTransaction fails after WriteConcernError Interrupted", + "description": "commitTransaction is not retried after UnsatisfiableWriteConcern error", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -417,8 +421,8 @@ "commitTransaction" ], "writeConcernError": { - "code": 11601, - "errmsg": "operation was interrupted" + "code": 100, + "errmsg": "Not enough data-bearing nodes" } } }, @@ -452,7 +456,9 @@ "object": "session0", "result": { "errorLabelsOmit": [ - "TransientTransactionError" + "RetryableWriteError", + "TransientTransactionError", + "UnknownTransactionCommitResult" ] } } @@ -618,7 +624,7 @@ } }, { - "description": "commitTransaction succeeds after NotMaster", + "description": "commitTransaction succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -629,6 +635,9 @@ "commitTransaction" ], "errorCode": 10107, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -726,7 +735,7 @@ } }, { - "description": "commitTransaction succeeds after NotMasterOrSecondary", + "description": "commitTransaction succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -737,6 +746,9 @@ "commitTransaction" ], "errorCode": 13436, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -834,7 +846,7 @@ } }, { - "description": "commitTransaction succeeds after NotMasterNoSlaveOk", + "description": "commitTransaction succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -845,6 +857,9 @@ "commitTransaction" ], "errorCode": 13435, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -953,6 +968,9 @@ "commitTransaction" ], "errorCode": 11602, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -1061,6 +1079,9 @@ "commitTransaction" ], "errorCode": 11600, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -1169,6 +1190,9 @@ "commitTransaction" ], "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -1277,6 +1301,9 @@ "commitTransaction" ], "errorCode": 91, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -1385,6 +1412,9 @@ "commitTransaction" ], "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -1493,6 +1523,9 @@ "commitTransaction" ], "errorCode": 6, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -1601,6 +1634,9 @@ "commitTransaction" ], "errorCode": 9001, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -1709,6 +1745,9 @@ "commitTransaction" ], "errorCode": 89, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -1816,6 +1855,9 @@ "failCommands": [ "commitTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 11600, "errmsg": "Replication is being shut down" @@ -1935,6 +1977,9 @@ "failCommands": [ "commitTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 11602, "errmsg": "Replication is being shut down" @@ -2054,6 +2099,9 @@ "failCommands": [ "commitTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 189, "errmsg": "Replication is being shut down" @@ -2173,6 +2221,9 @@ "failCommands": [ "commitTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, "errmsg": "Replication is being shut down" diff --git a/test/transactions/retryable-writes.json b/test/transactions/legacy/retryable-writes.json similarity index 100% rename from test/transactions/retryable-writes.json rename to test/transactions/legacy/retryable-writes.json diff --git a/test/transactions/run-command.json b/test/transactions/legacy/run-command.json similarity index 100% rename from test/transactions/run-command.json rename to test/transactions/legacy/run-command.json diff --git a/test/transactions/transaction-options-repl.json b/test/transactions/legacy/transaction-options-repl.json similarity index 100% rename from test/transactions/transaction-options-repl.json rename to test/transactions/legacy/transaction-options-repl.json diff --git a/test/transactions/transaction-options.json b/test/transactions/legacy/transaction-options.json similarity index 100% rename from test/transactions/transaction-options.json rename to test/transactions/legacy/transaction-options.json diff --git a/test/transactions/update.json b/test/transactions/legacy/update.json similarity index 96% rename from test/transactions/update.json rename to test/transactions/legacy/update.json index 13cf2c9268..e33bf5b810 100644 --- a/test/transactions/update.json +++ b/test/transactions/legacy/update.json @@ -116,7 +116,6 @@ "x": 1 } }, - "multi": false, "upsert": true } ], @@ -145,9 +144,7 @@ }, "u": { "y": 1 - }, - "multi": false, - "upsert": false + } } ], "ordered": true, @@ -179,8 +176,7 @@ "z": 1 } }, - "multi": true, - "upsert": false + "multi": true } ], "ordered": true, @@ -346,7 +342,6 @@ "x": 1 } }, - "multi": false, "upsert": true } ], @@ -375,9 +370,7 @@ }, "u": { "y": 1 - }, - "multi": false, - "upsert": false + } } ], "ordered": true, @@ -409,8 +402,7 @@ "z": 1 } }, - "multi": true, - "upsert": false + "multi": true } ], "ordered": true, diff --git a/test/transactions/write-concern.json b/test/transactions/legacy/write-concern.json similarity index 99% rename from test/transactions/write-concern.json rename to test/transactions/legacy/write-concern.json index 88d062635f..84b1ea3650 100644 --- a/test/transactions/write-concern.json +++ b/test/transactions/legacy/write-concern.json @@ -877,7 +877,6 @@ "x": 1 } }, - "multi": false, "upsert": true } ], diff --git a/test/transactions/unified/do-not-retry-read-in-transaction.json b/test/transactions/unified/do-not-retry-read-in-transaction.json new file mode 100644 index 0000000000..6d9dc704b8 --- /dev/null +++ b/test/transactions/unified/do-not-retry-read-in-transaction.json @@ -0,0 +1,115 @@ +{ + "description": "do not retry read in a transaction", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.0.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.2.0", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "retryReads": true + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-read-in-transaction-test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "tests": [ + { + "description": "find does not retry in a transaction", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {}, + "session": "session0" + }, + "expectError": { + "isError": true, + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "startTransaction": true + }, + "commandName": "find", + "databaseName": "retryable-read-in-transaction-test" + } + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/mongos-unpin.json b/test/transactions/unified/mongos-unpin.json new file mode 100644 index 0000000000..4f7ae43794 --- /dev/null +++ b/test/transactions/unified/mongos-unpin.json @@ -0,0 +1,437 @@ +{ + "description": "mongos-unpin", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "topologies": [ + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "mongos-unpin-db" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "mongos-unpin-db", + "documents": [] + } + ], + "_yamlAnchors": { + "anchors": 24 + }, + "tests": [ + { + "description": "unpin after TransientTransactionError error on commit", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 24 + } + } + } + }, + { + "name": "commitTransaction", + "object": "session0", + "expectError": { + "errorCode": 24, + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "UnknownTransactionCommitResult" + ] + } + }, + { + "name": "assertSessionUnpinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "abortTransaction", + "object": "session0" + } + ] + }, + { + "description": "unpin on successful abort", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "abortTransaction", + "object": "session0" + }, + { + "name": "assertSessionUnpinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + } + ] + }, + { + "description": "unpin after non-transient error on abort", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 24 + } + } + } + }, + { + "name": "abortTransaction", + "object": "session0" + }, + { + "name": "assertSessionUnpinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "abortTransaction", + "object": "session0" + } + ] + }, + { + "description": "unpin after TransientTransactionError error on abort", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "abortTransaction", + "object": "session0" + }, + { + "name": "assertSessionUnpinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "abortTransaction", + "object": "session0" + } + ] + }, + { + "description": "unpin when a new transaction is started", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "assertSessionPinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "assertSessionUnpinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + } + ] + }, + { + "description": "unpin when a non-transaction write operation uses a session", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "assertSessionPinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "assertSessionUnpinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + } + ] + }, + { + "description": "unpin when a non-transaction read operation uses a session", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "assertSessionPinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "assertSessionUnpinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + } + ] + } + ] +} diff --git a/test/transactions/unified/retryable-abort-handshake.json b/test/transactions/unified/retryable-abort-handshake.json new file mode 100644 index 0000000000..4ad56e2f2f --- /dev/null +++ b/test/transactions/unified/retryable-abort-handshake.json @@ -0,0 +1,204 @@ +{ + "description": "retryable abortTransaction on handshake errors", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "serverless": "forbid", + "auth": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "connectionCheckOutStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-handshake-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "session": { + "id": "session1", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-handshake-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "AbortTransaction succeeds after handshake network error", + "skipReason": "DRIVERS-2032: Pinned servers need to be checked if they are still selectable", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "session": "session1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "saslContinue", + "ping" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + }, + "session": "session1" + }, + "expectError": { + "isError": true + } + }, + { + "name": "abortTransaction", + "object": "session0" + } + ], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "startTransaction": true + }, + "commandName": "insert", + "databaseName": "retryable-handshake-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-handshake-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-handshake-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/retryable-commit-handshake.json b/test/transactions/unified/retryable-commit-handshake.json new file mode 100644 index 0000000000..d9315a8fc6 --- /dev/null +++ b/test/transactions/unified/retryable-commit-handshake.json @@ -0,0 +1,211 @@ +{ + "description": "retryable commitTransaction on handshake errors", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "serverless": "forbid", + "auth": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "connectionCheckOutStartedEvent" + ], + "uriOptions": { + "retryWrites": false + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-handshake-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "session": { + "id": "session1", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-handshake-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "CommitTransaction succeeds after handshake network error", + "skipReason": "DRIVERS-2032: Pinned servers need to be checked if they are still selectable", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "session": "session1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "saslContinue", + "ping" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + }, + "session": "session1" + }, + "expectError": { + "isError": true + } + }, + { + "name": "commitTransaction", + "object": "session0" + } + ], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "startTransaction": true + }, + "commandName": "insert", + "databaseName": "retryable-handshake-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-handshake-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-handshake-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/unicode/test_utf8.py b/test/unicode/test_utf8.py index d5cbdb7bbc..fd7fb2154d 100644 --- a/test/unicode/test_utf8.py +++ b/test/unicode/test_utf8.py @@ -1,11 +1,14 @@ +from __future__ import annotations + import sys sys.path[0:0] = [""] +from test import unittest + from bson import encode from bson.errors import InvalidStringData -from bson.py3compat import PY3 -from test import unittest + class TestUTF8(unittest.TestCase): @@ -13,64 +16,19 @@ class TestUTF8(unittest.TestCase): # legal utf-8 if the first byte is 0xf4 (244) def _assert_same_utf8_validation(self, data): try: - data.decode('utf-8') - py_is_legal = True + data.decode("utf-8") + py_is_legal = True except UnicodeDecodeError: py_is_legal = False try: - encode({'x': data}) - bson_is_legal = True + encode({"x": data}) + bson_is_legal = True except InvalidStringData: bson_is_legal = False self.assertEqual(py_is_legal, bson_is_legal, data) - @unittest.skipIf(PY3, "python3 has strong separation between bytes/unicode") - def test_legal_utf8_full_coverage(self): - # This test takes 400 seconds. Which is too long to run each time. - # However it is the only one which covers all possible bit combinations - # in the 244 space. - b1 = chr(0xf4) - - for b2 in map(chr, range(255)): - m2 = b1 + b2 - self._assert_same_utf8_validation(m2) - - for b3 in map(chr, range(255)): - m3 = m2 + b3 - self._assert_same_utf8_validation(m3) - - for b4 in map(chr, range(255)): - m4 = m3 + b4 - self._assert_same_utf8_validation(m4) - - # In python3: - # - 'bytes' are not checked with isLegalutf - # - 'unicode' We cannot create unicode objects with invalid utf8, since it - # would result in non valid code-points. - @unittest.skipIf(PY3, "python3 has strong separation between bytes/unicode") - def test_legal_utf8_few_samples(self): - good_samples = [ - '\xf4\x80\x80\x80', - '\xf4\x8a\x80\x80', - '\xf4\x8e\x80\x80', - '\xf4\x81\x80\x80', - ] - - for data in good_samples: - self._assert_same_utf8_validation(data) - - bad_samples = [ - '\xf4\x00\x80\x80', - '\xf4\x3a\x80\x80', - '\xf4\x7f\x80\x80', - '\xf4\x90\x80\x80', - '\xf4\xff\x80\x80', - ] - - for data in bad_samples: - self._assert_same_utf8_validation(data) if __name__ == "__main__": unittest.main() diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-additionalProperties.json b/test/unified-test-format/invalid/clientEncryptionOpts-additionalProperties.json new file mode 100644 index 0000000000..26d14051a7 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-additionalProperties.json @@ -0,0 +1,30 @@ +{ + "description": "clientEncryptionOpts-additionalProperties", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": {} + }, + "invalid": {} + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultClient-required.json b/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultClient-required.json new file mode 100644 index 0000000000..c43a2a9125 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultClient-required.json @@ -0,0 +1,23 @@ +{ + "description": "clientEncryptionOpts-keyVaultClient-required", + "schemaVersion": "1.8", + "createEntities": [ + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": {} + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultClient-type.json b/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultClient-type.json new file mode 100644 index 0000000000..1be9167a40 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultClient-type.json @@ -0,0 +1,29 @@ +{ + "description": "clientEncryptionOpts-keyVaultClient-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": 0, + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": {} + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultNamespace-required.json b/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultNamespace-required.json new file mode 100644 index 0000000000..3f54d89aa7 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultNamespace-required.json @@ -0,0 +1,28 @@ +{ + "description": "clientEncryptionOpts-keyVaultNamespace-required", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "kmsProviders": { + "aws": {} + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultNamespace-type.json b/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultNamespace-type.json new file mode 100644 index 0000000000..53f2f5f086 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultNamespace-type.json @@ -0,0 +1,29 @@ +{ + "description": "clientEncryptionOpts-keyVaultNamespace-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": 0, + "kmsProviders": { + "aws": {} + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-additionalProperties.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-additionalProperties.json new file mode 100644 index 0000000000..cfd979e2b2 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-additionalProperties.json @@ -0,0 +1,29 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-additionalProperties", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "invalid": {} + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-aws-additionalProperties.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-aws-additionalProperties.json new file mode 100644 index 0000000000..59b273487d --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-aws-additionalProperties.json @@ -0,0 +1,31 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-aws-additionalProperties", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "invalid": {} + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-aws-type.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-aws-type.json new file mode 100644 index 0000000000..ffcc85bfcf --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-aws-type.json @@ -0,0 +1,29 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-aws-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": 0 + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-azure-additionalProperties.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-azure-additionalProperties.json new file mode 100644 index 0000000000..1664b79097 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-azure-additionalProperties.json @@ -0,0 +1,31 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-azure-additionalProperties", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "azure": { + "invalid": {} + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-azure-type.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-azure-type.json new file mode 100644 index 0000000000..5bd50c8078 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-azure-type.json @@ -0,0 +1,29 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-azure-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "azure": 0 + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-gcp-additionalProperties.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-gcp-additionalProperties.json new file mode 100644 index 0000000000..120c088b00 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-gcp-additionalProperties.json @@ -0,0 +1,31 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-gcp-additionalProperties", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "gcp": { + "invalid": {} + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-gcp-type.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-gcp-type.json new file mode 100644 index 0000000000..1dd1c8a2a3 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-gcp-type.json @@ -0,0 +1,29 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-gcp-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "gcp": 0 + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-kmip-additionalProperties.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-kmip-additionalProperties.json new file mode 100644 index 0000000000..22ded20440 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-kmip-additionalProperties.json @@ -0,0 +1,31 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-kmip-additionalProperties", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "kmip": { + "invalid": {} + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-kmip-type.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-kmip-type.json new file mode 100644 index 0000000000..9b9e74be37 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-kmip-type.json @@ -0,0 +1,29 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-kmip-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "kmip": 0 + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-local-additionalProperties.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-local-additionalProperties.json new file mode 100644 index 0000000000..b93cfe00d1 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-local-additionalProperties.json @@ -0,0 +1,31 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-local-additionalProperties", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "invalid": {} + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-local-type.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-local-type.json new file mode 100644 index 0000000000..526ea24831 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-local-type.json @@ -0,0 +1,29 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-local-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": 0 + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-required.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-required.json new file mode 100644 index 0000000000..b823a67baf --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-required.json @@ -0,0 +1,26 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-required", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys" + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-type.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-type.json new file mode 100644 index 0000000000..e7a6190b68 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-type.json @@ -0,0 +1,27 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": 0 + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-tlsOptions_not_supported.json b/test/unified-test-format/invalid/clientEncryptionOpts-tlsOptions_not_supported.json new file mode 100644 index 0000000000..3b4972f23d --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-tlsOptions_not_supported.json @@ -0,0 +1,30 @@ +{ + "description": "clientEncryptionOpts-tlsOptions_not_supported", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": {} + }, + "tlsOptions": {} + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionData-additionalProperties.json b/test/unified-test-format/invalid/collectionData-additionalProperties.json new file mode 100644 index 0000000000..1f4ed4c154 --- /dev/null +++ b/test/unified-test-format/invalid/collectionData-additionalProperties.json @@ -0,0 +1,39 @@ +{ + "description": "collectionData-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "foo" + } + } + ], + "initialData": [ + { + "collectionName": "foo", + "databaseName": "foo", + "documents": [], + "foo": 0 + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionData-collectionName-required.json b/test/unified-test-format/invalid/collectionData-collectionName-required.json new file mode 100644 index 0000000000..5426418c88 --- /dev/null +++ b/test/unified-test-format/invalid/collectionData-collectionName-required.json @@ -0,0 +1,37 @@ +{ + "description": "collectionData-collectionName-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "foo" + } + } + ], + "initialData": [ + { + "databaseName": "foo", + "documents": [] + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionData-collectionName-type.json b/test/unified-test-format/invalid/collectionData-collectionName-type.json new file mode 100644 index 0000000000..2a922de13e --- /dev/null +++ b/test/unified-test-format/invalid/collectionData-collectionName-type.json @@ -0,0 +1,38 @@ +{ + "description": "collectionData-collectionName-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "foo" + } + } + ], + "initialData": [ + { + "collectionName": 0, + "databaseName": "foo", + "documents": [] + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionData-createOptions-type.json b/test/unified-test-format/invalid/collectionData-createOptions-type.json new file mode 100644 index 0000000000..5b78bbcbb6 --- /dev/null +++ b/test/unified-test-format/invalid/collectionData-createOptions-type.json @@ -0,0 +1,39 @@ +{ + "description": "collectionData-createOptions-type", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "foo" + } + } + ], + "initialData": [ + { + "collectionName": "foo", + "databaseName": "foo", + "createOptions": 0, + "documents": [] + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionData-databaseName-required.json b/test/unified-test-format/invalid/collectionData-databaseName-required.json new file mode 100644 index 0000000000..8417801390 --- /dev/null +++ b/test/unified-test-format/invalid/collectionData-databaseName-required.json @@ -0,0 +1,37 @@ +{ + "description": "collectionData-databaseName-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "foo" + } + } + ], + "initialData": [ + { + "collectionName": "foo", + "documents": [] + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionData-databaseName-type.json b/test/unified-test-format/invalid/collectionData-databaseName-type.json new file mode 100644 index 0000000000..d3480e8034 --- /dev/null +++ b/test/unified-test-format/invalid/collectionData-databaseName-type.json @@ -0,0 +1,38 @@ +{ + "description": "collectionData-databaseName-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "foo" + } + } + ], + "initialData": [ + { + "collectionName": "foo", + "databaseName": 0, + "documents": [] + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionData-documents-items.json b/test/unified-test-format/invalid/collectionData-documents-items.json new file mode 100644 index 0000000000..beb5af61c4 --- /dev/null +++ b/test/unified-test-format/invalid/collectionData-documents-items.json @@ -0,0 +1,40 @@ +{ + "description": "collectionData-documents-items", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "foo" + } + } + ], + "initialData": [ + { + "collectionName": "foo", + "databaseName": "foo", + "documents": [ + 0 + ] + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionData-documents-required.json b/test/unified-test-format/invalid/collectionData-documents-required.json new file mode 100644 index 0000000000..4aadf9b159 --- /dev/null +++ b/test/unified-test-format/invalid/collectionData-documents-required.json @@ -0,0 +1,37 @@ +{ + "description": "collectionData-documents-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "foo" + } + } + ], + "initialData": [ + { + "collectionName": "foo", + "databaseName": "foo" + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionData-documents-type.json b/test/unified-test-format/invalid/collectionData-documents-type.json new file mode 100644 index 0000000000..9cbd3c164c --- /dev/null +++ b/test/unified-test-format/invalid/collectionData-documents-type.json @@ -0,0 +1,38 @@ +{ + "description": "collectionData-documents-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "foo" + } + } + ], + "initialData": [ + { + "collectionName": "foo", + "databaseName": "foo", + "documents": 0 + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionOrDatabaseOptions-additionalProperties.json b/test/unified-test-format/invalid/collectionOrDatabaseOptions-additionalProperties.json new file mode 100644 index 0000000000..beef260eed --- /dev/null +++ b/test/unified-test-format/invalid/collectionOrDatabaseOptions-additionalProperties.json @@ -0,0 +1,27 @@ +{ + "description": "collectionOrDatabaseOptions-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo", + "databaseOptions": { + "foo": 0 + } + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionOrDatabaseOptions-readConcern-type.json b/test/unified-test-format/invalid/collectionOrDatabaseOptions-readConcern-type.json new file mode 100644 index 0000000000..1b9f4bcbea --- /dev/null +++ b/test/unified-test-format/invalid/collectionOrDatabaseOptions-readConcern-type.json @@ -0,0 +1,27 @@ +{ + "description": "collectionOrDatabaseOptions-readConcern-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo", + "databaseOptions": { + "readConcern": 0 + } + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionOrDatabaseOptions-readPreference-type.json b/test/unified-test-format/invalid/collectionOrDatabaseOptions-readPreference-type.json new file mode 100644 index 0000000000..988b594d13 --- /dev/null +++ b/test/unified-test-format/invalid/collectionOrDatabaseOptions-readPreference-type.json @@ -0,0 +1,27 @@ +{ + "description": "collectionOrDatabaseOptions-readPreference-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo", + "databaseOptions": { + "readPreference": 0 + } + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionOrDatabaseOptions-timeoutMS-type.json b/test/unified-test-format/invalid/collectionOrDatabaseOptions-timeoutMS-type.json new file mode 100644 index 0000000000..088e9d1eb2 --- /dev/null +++ b/test/unified-test-format/invalid/collectionOrDatabaseOptions-timeoutMS-type.json @@ -0,0 +1,27 @@ +{ + "description": "collectionOrDatabaseOptions-timeoutMS-type", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo", + "databaseOptions": { + "timeoutMS": 4.5 + } + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionOrDatabaseOptions-writeConcern-type.json b/test/unified-test-format/invalid/collectionOrDatabaseOptions-writeConcern-type.json new file mode 100644 index 0000000000..bd2157c5cb --- /dev/null +++ b/test/unified-test-format/invalid/collectionOrDatabaseOptions-writeConcern-type.json @@ -0,0 +1,27 @@ +{ + "description": "collectionOrDatabaseOptions-writeConcern-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo", + "databaseOptions": { + "writeConcern": 0 + } + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/createEntities-items.json b/test/unified-test-format/invalid/createEntities-items.json new file mode 100644 index 0000000000..8e9d6ff702 --- /dev/null +++ b/test/unified-test-format/invalid/createEntities-items.json @@ -0,0 +1,13 @@ +{ + "description": "createEntities-items", + "schemaVersion": "1.0", + "createEntities": [ + 0 + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/createEntities-minItems.json b/test/unified-test-format/invalid/createEntities-minItems.json new file mode 100644 index 0000000000..3654923d28 --- /dev/null +++ b/test/unified-test-format/invalid/createEntities-minItems.json @@ -0,0 +1,11 @@ +{ + "description": "createEntities-minItems", + "schemaVersion": "1.0", + "createEntities": [], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/createEntities-type.json b/test/unified-test-format/invalid/createEntities-type.json new file mode 100644 index 0000000000..ce3c382c93 --- /dev/null +++ b/test/unified-test-format/invalid/createEntities-type.json @@ -0,0 +1,11 @@ +{ + "description": "createEntities-type", + "schemaVersion": "1.0", + "createEntities": 0, + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/description-required.json b/test/unified-test-format/invalid/description-required.json new file mode 100644 index 0000000000..e4e0d0efdf --- /dev/null +++ b/test/unified-test-format/invalid/description-required.json @@ -0,0 +1,9 @@ +{ + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-additionalProperties.json b/test/unified-test-format/invalid/entity-additionalProperties.json new file mode 100644 index 0000000000..38b8898787 --- /dev/null +++ b/test/unified-test-format/invalid/entity-additionalProperties.json @@ -0,0 +1,15 @@ +{ + "description": "entity-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "foo": 0 + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-bucket-additionalProperties.json b/test/unified-test-format/invalid/entity-bucket-additionalProperties.json new file mode 100644 index 0000000000..46f9b4038e --- /dev/null +++ b/test/unified-test-format/invalid/entity-bucket-additionalProperties.json @@ -0,0 +1,31 @@ +{ + "description": "entity-bucket-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0", + "foo": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-bucket-bucketOptions-type.json b/test/unified-test-format/invalid/entity-bucket-bucketOptions-type.json new file mode 100644 index 0000000000..c3d7423e65 --- /dev/null +++ b/test/unified-test-format/invalid/entity-bucket-bucketOptions-type.json @@ -0,0 +1,31 @@ +{ + "description": "entity-bucket-bucketOptions-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0", + "bucketOptions": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-bucket-database-required.json b/test/unified-test-format/invalid/entity-bucket-database-required.json new file mode 100644 index 0000000000..1fde5a96c9 --- /dev/null +++ b/test/unified-test-format/invalid/entity-bucket-database-required.json @@ -0,0 +1,29 @@ +{ + "description": "entity-bucket-database-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "bucket": { + "id": "bucket0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-bucket-database-type.json b/test/unified-test-format/invalid/entity-bucket-database-type.json new file mode 100644 index 0000000000..798d273fb0 --- /dev/null +++ b/test/unified-test-format/invalid/entity-bucket-database-type.json @@ -0,0 +1,30 @@ +{ + "description": "entity-bucket-database-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "bucket": { + "id": "bucket0", + "database": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-bucket-id-required.json b/test/unified-test-format/invalid/entity-bucket-id-required.json new file mode 100644 index 0000000000..c547d8ea3c --- /dev/null +++ b/test/unified-test-format/invalid/entity-bucket-id-required.json @@ -0,0 +1,29 @@ +{ + "description": "entity-bucket-id-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "bucket": { + "database": "database0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-bucket-id-type.json b/test/unified-test-format/invalid/entity-bucket-id-type.json new file mode 100644 index 0000000000..f4e10ee630 --- /dev/null +++ b/test/unified-test-format/invalid/entity-bucket-id-type.json @@ -0,0 +1,30 @@ +{ + "description": "entity-bucket-id-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "bucket": { + "id": 0, + "database": "database0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-additionalProperties.json b/test/unified-test-format/invalid/entity-client-additionalProperties.json new file mode 100644 index 0000000000..467e1d6ae1 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-additionalProperties.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "foo": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-id-required.json b/test/unified-test-format/invalid/entity-client-id-required.json new file mode 100644 index 0000000000..4be2fbf8e8 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-id-required.json @@ -0,0 +1,15 @@ +{ + "description": "entity-client-id-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": {} + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-id-type.json b/test/unified-test-format/invalid/entity-client-id-type.json new file mode 100644 index 0000000000..cdc7cbc0e7 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-id-type.json @@ -0,0 +1,17 @@ +{ + "description": "entity-client-id-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-ignoreCommandMonitoringEvents-items.json b/test/unified-test-format/invalid/entity-client-ignoreCommandMonitoringEvents-items.json new file mode 100644 index 0000000000..1252ac82d7 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-ignoreCommandMonitoringEvents-items.json @@ -0,0 +1,20 @@ +{ + "description": "entity-client-ignoreCommandMonitoringEvents-items", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "ignoreCommandMonitoringEvents": [ + 0 + ] + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-ignoreCommandMonitoringEvents-minItems.json b/test/unified-test-format/invalid/entity-client-ignoreCommandMonitoringEvents-minItems.json new file mode 100644 index 0000000000..e78068a442 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-ignoreCommandMonitoringEvents-minItems.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-ignoreCommandMonitoringEvents-minItems", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "ignoreCommandMonitoringEvents": [] + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-ignoreCommandMonitoringEvents-type.json b/test/unified-test-format/invalid/entity-client-ignoreCommandMonitoringEvents-type.json new file mode 100644 index 0000000000..5ac2b340c5 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-ignoreCommandMonitoringEvents-type.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-ignoreCommandMonitoringEvents-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "ignoreCommandMonitoringEvents": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-observeEvents-enum.json b/test/unified-test-format/invalid/entity-client-observeEvents-enum.json new file mode 100644 index 0000000000..c39c94eee2 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-observeEvents-enum.json @@ -0,0 +1,20 @@ +{ + "description": "entity-client-observeEvents-enum", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "foo" + ] + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-observeEvents-items.json b/test/unified-test-format/invalid/entity-client-observeEvents-items.json new file mode 100644 index 0000000000..3aee11e3d5 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-observeEvents-items.json @@ -0,0 +1,20 @@ +{ + "description": "entity-client-observeEvents-items", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + 0 + ] + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-observeEvents-minItems.json b/test/unified-test-format/invalid/entity-client-observeEvents-minItems.json new file mode 100644 index 0000000000..e70d90c0a7 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-observeEvents-minItems.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-observeEvents-minItems", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [] + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-observeEvents-type.json b/test/unified-test-format/invalid/entity-client-observeEvents-type.json new file mode 100644 index 0000000000..c144e32369 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-observeEvents-type.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-observeEvents-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-observeSensitiveCommands-type.json b/test/unified-test-format/invalid/entity-client-observeSensitiveCommands-type.json new file mode 100644 index 0000000000..c5572f1fbe --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-observeSensitiveCommands-type.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-observeSensitiveCommands-type", + "schemaVersion": "1.5", + "createEntities": [ + { + "client": { + "id": "client0", + "observeSensitiveCommands": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-serverApi-deprecationErrors-type.json b/test/unified-test-format/invalid/entity-client-serverApi-deprecationErrors-type.json new file mode 100644 index 0000000000..b688dae631 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-serverApi-deprecationErrors-type.json @@ -0,0 +1,21 @@ +{ + "description": "entity-client-serverApi-version-type", + "schemaVersion": "1.1", + "createEntities": [ + { + "client": { + "id": "client0", + "serverApi": { + "version": "1", + "deprecationErrors": 0 + } + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-serverApi-strict-type.json b/test/unified-test-format/invalid/entity-client-serverApi-strict-type.json new file mode 100644 index 0000000000..0b2fdc4849 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-serverApi-strict-type.json @@ -0,0 +1,21 @@ +{ + "description": "entity-client-serverApi-version-type", + "schemaVersion": "1.1", + "createEntities": [ + { + "client": { + "id": "client0", + "serverApi": { + "version": "1", + "strict": 0 + } + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-serverApi-type.json b/test/unified-test-format/invalid/entity-client-serverApi-type.json new file mode 100644 index 0000000000..20c9d1dce3 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-serverApi-type.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-serverApi-type", + "schemaVersion": "1.1", + "createEntities": [ + { + "client": { + "id": "client0", + "serverApi": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-serverApi-version-required.json b/test/unified-test-format/invalid/entity-client-serverApi-version-required.json new file mode 100644 index 0000000000..8bef92b06f --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-serverApi-version-required.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-serverApi-version-required", + "schemaVersion": "1.1", + "createEntities": [ + { + "client": { + "id": "client0", + "serverApi": {} + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-serverApi-version-type.json b/test/unified-test-format/invalid/entity-client-serverApi-version-type.json new file mode 100644 index 0000000000..2c36ff57ed --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-serverApi-version-type.json @@ -0,0 +1,20 @@ +{ + "description": "entity-client-serverApi-version-type", + "schemaVersion": "1.1", + "createEntities": [ + { + "client": { + "id": "client0", + "serverApi": { + "version": 0 + } + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-storeEventsAsEntities-minItems.json b/test/unified-test-format/invalid/entity-client-storeEventsAsEntities-minItems.json new file mode 100644 index 0000000000..d94863ed11 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-storeEventsAsEntities-minItems.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-storeEventsAsEntities-minItems", + "schemaVersion": "1.2", + "createEntities": [ + { + "client": { + "id": "client0", + "storeEventsAsEntities": [] + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-storeEventsAsEntities-type.json b/test/unified-test-format/invalid/entity-client-storeEventsAsEntities-type.json new file mode 100644 index 0000000000..79f6b85ed2 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-storeEventsAsEntities-type.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-storeEventsAsEntities-type", + "schemaVersion": "1.2", + "createEntities": [ + { + "client": { + "id": "client0", + "storeEventsAsEntities": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-uriOptions-type.json b/test/unified-test-format/invalid/entity-client-uriOptions-type.json new file mode 100644 index 0000000000..4252480e98 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-uriOptions-type.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-uriOptions-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "uriOptions": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-useMultipleMongoses-type.json b/test/unified-test-format/invalid/entity-client-useMultipleMongoses-type.json new file mode 100644 index 0000000000..e429cd71f8 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-useMultipleMongoses-type.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-useMultipleMongoses-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-clientEncryption-additionalProperties.json b/test/unified-test-format/invalid/entity-clientEncryption-additionalProperties.json new file mode 100644 index 0000000000..77c0a91434 --- /dev/null +++ b/test/unified-test-format/invalid/entity-clientEncryption-additionalProperties.json @@ -0,0 +1,30 @@ +{ + "description": "entity-clientEncryption-additionalProperties", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": {} + } + }, + "invalid": {} + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-clientEncryption-clientEncryptionOpts-required.json b/test/unified-test-format/invalid/entity-clientEncryption-clientEncryptionOpts-required.json new file mode 100644 index 0000000000..88e852342a --- /dev/null +++ b/test/unified-test-format/invalid/entity-clientEncryption-clientEncryptionOpts-required.json @@ -0,0 +1,17 @@ +{ + "description": "entity-clientEncryption-clientEncryptionOpts-required", + "schemaVersion": "1.8", + "createEntities": [ + { + "clientEncryption": { + "id": "clientEncryption0" + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-clientEncryption-clientEncryptionOpts-type.json b/test/unified-test-format/invalid/entity-clientEncryption-clientEncryptionOpts-type.json new file mode 100644 index 0000000000..77fb6a362a --- /dev/null +++ b/test/unified-test-format/invalid/entity-clientEncryption-clientEncryptionOpts-type.json @@ -0,0 +1,18 @@ +{ + "description": "entity-clientEncryption-clientEncryptionOpts-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": 0 + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-clientEncryption-id-required.json b/test/unified-test-format/invalid/entity-clientEncryption-id-required.json new file mode 100644 index 0000000000..464ba7159a --- /dev/null +++ b/test/unified-test-format/invalid/entity-clientEncryption-id-required.json @@ -0,0 +1,28 @@ +{ + "description": "entity-clientEncryption-id-required", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": {} + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-clientEncryption-id-type.json b/test/unified-test-format/invalid/entity-clientEncryption-id-type.json new file mode 100644 index 0000000000..a7746657fc --- /dev/null +++ b/test/unified-test-format/invalid/entity-clientEncryption-id-type.json @@ -0,0 +1,29 @@ +{ + "description": "entity-clientEncryption-id-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": 0, + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": {} + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-collection-additionalProperties.json b/test/unified-test-format/invalid/entity-collection-additionalProperties.json new file mode 100644 index 0000000000..90ee2b1ca0 --- /dev/null +++ b/test/unified-test-format/invalid/entity-collection-additionalProperties.json @@ -0,0 +1,32 @@ +{ + "description": "entity-collection-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "foo", + "foo": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-collection-collectionName-required.json b/test/unified-test-format/invalid/entity-collection-collectionName-required.json new file mode 100644 index 0000000000..2446722e5e --- /dev/null +++ b/test/unified-test-format/invalid/entity-collection-collectionName-required.json @@ -0,0 +1,30 @@ +{ + "description": "entity-collection-collectionName-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-collection-collectionName-type.json b/test/unified-test-format/invalid/entity-collection-collectionName-type.json new file mode 100644 index 0000000000..ccad66aac9 --- /dev/null +++ b/test/unified-test-format/invalid/entity-collection-collectionName-type.json @@ -0,0 +1,31 @@ +{ + "description": "entity-collection-collectionName-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-collection-collectionOptions-type.json b/test/unified-test-format/invalid/entity-collection-collectionOptions-type.json new file mode 100644 index 0000000000..52220c1cd1 --- /dev/null +++ b/test/unified-test-format/invalid/entity-collection-collectionOptions-type.json @@ -0,0 +1,32 @@ +{ + "description": "entity-collection-collectionOptions-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "foo", + "collectionOptions": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-collection-database-required.json b/test/unified-test-format/invalid/entity-collection-database-required.json new file mode 100644 index 0000000000..ba96b43f76 --- /dev/null +++ b/test/unified-test-format/invalid/entity-collection-database-required.json @@ -0,0 +1,30 @@ +{ + "description": "entity-collection-database-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "collectionName": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-collection-database-type.json b/test/unified-test-format/invalid/entity-collection-database-type.json new file mode 100644 index 0000000000..b87134498d --- /dev/null +++ b/test/unified-test-format/invalid/entity-collection-database-type.json @@ -0,0 +1,31 @@ +{ + "description": "entity-collection-database-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": 0, + "collectionName": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-collection-id-required.json b/test/unified-test-format/invalid/entity-collection-id-required.json new file mode 100644 index 0000000000..84e5352ead --- /dev/null +++ b/test/unified-test-format/invalid/entity-collection-id-required.json @@ -0,0 +1,30 @@ +{ + "description": "entity-collection-id-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "database": "database0", + "collectionName": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-collection-id-type.json b/test/unified-test-format/invalid/entity-collection-id-type.json new file mode 100644 index 0000000000..f0821e5250 --- /dev/null +++ b/test/unified-test-format/invalid/entity-collection-id-type.json @@ -0,0 +1,31 @@ +{ + "description": "entity-collection-id-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": 0, + "database": "database0", + "collectionName": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-database-additionalProperties.json b/test/unified-test-format/invalid/entity-database-additionalProperties.json new file mode 100644 index 0000000000..964cd27966 --- /dev/null +++ b/test/unified-test-format/invalid/entity-database-additionalProperties.json @@ -0,0 +1,25 @@ +{ + "description": "entity-database-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo", + "foo": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-database-client-required.json b/test/unified-test-format/invalid/entity-database-client-required.json new file mode 100644 index 0000000000..54f99cf13e --- /dev/null +++ b/test/unified-test-format/invalid/entity-database-client-required.json @@ -0,0 +1,23 @@ +{ + "description": "entity-database-client-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "databaseName": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-database-client-type.json b/test/unified-test-format/invalid/entity-database-client-type.json new file mode 100644 index 0000000000..ff4584c405 --- /dev/null +++ b/test/unified-test-format/invalid/entity-database-client-type.json @@ -0,0 +1,24 @@ +{ + "description": "entity-database-client-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": 0, + "databaseName": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-database-databaseName-required.json b/test/unified-test-format/invalid/entity-database-databaseName-required.json new file mode 100644 index 0000000000..64cca95c49 --- /dev/null +++ b/test/unified-test-format/invalid/entity-database-databaseName-required.json @@ -0,0 +1,23 @@ +{ + "description": "entity-database-databaseName-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-database-databaseName-type.json b/test/unified-test-format/invalid/entity-database-databaseName-type.json new file mode 100644 index 0000000000..bd01aef781 --- /dev/null +++ b/test/unified-test-format/invalid/entity-database-databaseName-type.json @@ -0,0 +1,24 @@ +{ + "description": "entity-database-databaseName-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-database-databaseOptions-type.json b/test/unified-test-format/invalid/entity-database-databaseOptions-type.json new file mode 100644 index 0000000000..bc22ad3129 --- /dev/null +++ b/test/unified-test-format/invalid/entity-database-databaseOptions-type.json @@ -0,0 +1,25 @@ +{ + "description": "entity-database-databaseOptions-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo", + "databaseOptions": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-database-id-required.json b/test/unified-test-format/invalid/entity-database-id-required.json new file mode 100644 index 0000000000..0b65cf1159 --- /dev/null +++ b/test/unified-test-format/invalid/entity-database-id-required.json @@ -0,0 +1,23 @@ +{ + "description": "entity-database-id-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "client": "client0", + "databaseName": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-database-id-type.json b/test/unified-test-format/invalid/entity-database-id-type.json new file mode 100644 index 0000000000..98b5789d04 --- /dev/null +++ b/test/unified-test-format/invalid/entity-database-id-type.json @@ -0,0 +1,24 @@ +{ + "description": "entity-database-id-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": 0, + "client": "client0", + "databaseName": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-maxProperties.json b/test/unified-test-format/invalid/entity-maxProperties.json new file mode 100644 index 0000000000..f4a6b7c914 --- /dev/null +++ b/test/unified-test-format/invalid/entity-maxProperties.json @@ -0,0 +1,22 @@ +{ + "description": "entity-maxProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + }, + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-minProperties.json b/test/unified-test-format/invalid/entity-minProperties.json new file mode 100644 index 0000000000..d89949ce30 --- /dev/null +++ b/test/unified-test-format/invalid/entity-minProperties.json @@ -0,0 +1,13 @@ +{ + "description": "entity-minProperties", + "schemaVersion": "1.0", + "createEntities": [ + {} + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-session-additionalProperties.json b/test/unified-test-format/invalid/entity-session-additionalProperties.json new file mode 100644 index 0000000000..ab4cd2014f --- /dev/null +++ b/test/unified-test-format/invalid/entity-session-additionalProperties.json @@ -0,0 +1,24 @@ +{ + "description": "entity-session-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "session": { + "id": "session0", + "client": "client0", + "foo": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-session-client-required.json b/test/unified-test-format/invalid/entity-session-client-required.json new file mode 100644 index 0000000000..8c9ed72e99 --- /dev/null +++ b/test/unified-test-format/invalid/entity-session-client-required.json @@ -0,0 +1,22 @@ +{ + "description": "entity-session-client-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "session": { + "id": "session0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-session-client-type.json b/test/unified-test-format/invalid/entity-session-client-type.json new file mode 100644 index 0000000000..b5ccc3f60f --- /dev/null +++ b/test/unified-test-format/invalid/entity-session-client-type.json @@ -0,0 +1,23 @@ +{ + "description": "entity-session-client-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "session": { + "id": "session0", + "client": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-session-id-required.json b/test/unified-test-format/invalid/entity-session-id-required.json new file mode 100644 index 0000000000..3e5d5c5439 --- /dev/null +++ b/test/unified-test-format/invalid/entity-session-id-required.json @@ -0,0 +1,22 @@ +{ + "description": "entity-session-id-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "session": { + "client": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-session-id-type.json b/test/unified-test-format/invalid/entity-session-id-type.json new file mode 100644 index 0000000000..dcd46e5be7 --- /dev/null +++ b/test/unified-test-format/invalid/entity-session-id-type.json @@ -0,0 +1,23 @@ +{ + "description": "entity-session-id-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "session": { + "id": 0, + "client": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-session-sessionOptions-type.json b/test/unified-test-format/invalid/entity-session-sessionOptions-type.json new file mode 100644 index 0000000000..0ee15891eb --- /dev/null +++ b/test/unified-test-format/invalid/entity-session-sessionOptions-type.json @@ -0,0 +1,24 @@ +{ + "description": "entity-session-sessionOptions-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "session": { + "id": "session0", + "client": "client0", + "sessionOptions": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-stream-additionalProperties.json b/test/unified-test-format/invalid/entity-stream-additionalProperties.json new file mode 100644 index 0000000000..c8e76e9985 --- /dev/null +++ b/test/unified-test-format/invalid/entity-stream-additionalProperties.json @@ -0,0 +1,19 @@ +{ + "description": "entity-stream-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "stream": { + "id": "stream0", + "hexBytes": "FF", + "foo": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-stream-hexBytes-pattern.json b/test/unified-test-format/invalid/entity-stream-hexBytes-pattern.json new file mode 100644 index 0000000000..7381893b55 --- /dev/null +++ b/test/unified-test-format/invalid/entity-stream-hexBytes-pattern.json @@ -0,0 +1,18 @@ +{ + "description": "entity-stream-hexBytes-pattern", + "schemaVersion": "1.0", + "createEntities": [ + { + "stream": { + "id": "stream0", + "hexBytes": "FFF" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-stream-hexBytes-required.json b/test/unified-test-format/invalid/entity-stream-hexBytes-required.json new file mode 100644 index 0000000000..cc3bf09b20 --- /dev/null +++ b/test/unified-test-format/invalid/entity-stream-hexBytes-required.json @@ -0,0 +1,17 @@ +{ + "description": "entity-stream-hexBytes-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "stream": { + "id": "stream0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-stream-hexBytes-type.json b/test/unified-test-format/invalid/entity-stream-hexBytes-type.json new file mode 100644 index 0000000000..e6e2299eac --- /dev/null +++ b/test/unified-test-format/invalid/entity-stream-hexBytes-type.json @@ -0,0 +1,18 @@ +{ + "description": "entity-stream-hexBytes-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "stream": { + "id": "stream0", + "hexBytes": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-stream-id-required.json b/test/unified-test-format/invalid/entity-stream-id-required.json new file mode 100644 index 0000000000..ff814d4e9c --- /dev/null +++ b/test/unified-test-format/invalid/entity-stream-id-required.json @@ -0,0 +1,17 @@ +{ + "description": "entity-stream-id-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "stream": { + "hexBytes": "FF" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-stream-id-type.json b/test/unified-test-format/invalid/entity-stream-id-type.json new file mode 100644 index 0000000000..5fc654d97e --- /dev/null +++ b/test/unified-test-format/invalid/entity-stream-id-type.json @@ -0,0 +1,18 @@ +{ + "description": "entity-stream-id-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "stream": { + "id": 0, + "hexBytes": "FF" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-thread-additionalProperties.json b/test/unified-test-format/invalid/entity-thread-additionalProperties.json new file mode 100644 index 0000000000..b296719f13 --- /dev/null +++ b/test/unified-test-format/invalid/entity-thread-additionalProperties.json @@ -0,0 +1,18 @@ +{ + "description": "entity-thread-additionalProperties", + "schemaVersion": "1.10", + "createEntities": [ + { + "thread": { + "id": "thread0", + "foo": "bar" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-thread-id-required.json b/test/unified-test-format/invalid/entity-thread-id-required.json new file mode 100644 index 0000000000..3b197e3d6b --- /dev/null +++ b/test/unified-test-format/invalid/entity-thread-id-required.json @@ -0,0 +1,15 @@ +{ + "description": "entity-thread-id-required", + "schemaVersion": "1.10", + "createEntities": [ + { + "thread": {} + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-thread-id-type.json b/test/unified-test-format/invalid/entity-thread-id-type.json new file mode 100644 index 0000000000..8f281ef6f4 --- /dev/null +++ b/test/unified-test-format/invalid/entity-thread-id-type.json @@ -0,0 +1,17 @@ +{ + "description": "entity-thread-id-type", + "schemaVersion": "1.10", + "createEntities": [ + { + "thread": { + "id": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCmapEvent-connectionCheckOutFailedEvent-reason-type.json b/test/unified-test-format/invalid/expectedCmapEvent-connectionCheckOutFailedEvent-reason-type.json new file mode 100644 index 0000000000..110ce7869e --- /dev/null +++ b/test/unified-test-format/invalid/expectedCmapEvent-connectionCheckOutFailedEvent-reason-type.json @@ -0,0 +1,23 @@ +{ + "description": "expectedCmapEvent-connectionCheckOutFailedEvent-reason-type", + "schemaVersion": "1.3", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutFailedEvent": { + "reason": 10 + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCmapEvent-connectionCheckOutStartedEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedCmapEvent-connectionCheckOutStartedEvent-additionalProperties.json new file mode 100644 index 0000000000..f84e208d6a --- /dev/null +++ b/test/unified-test-format/invalid/expectedCmapEvent-connectionCheckOutStartedEvent-additionalProperties.json @@ -0,0 +1,23 @@ +{ + "description": "expectedCmapEvent-connectionCheckOutStartedEvent-additionalProperties", + "schemaVersion": "1.3", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": { + "foo": "bar" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCmapEvent-connectionCheckedInEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedCmapEvent-connectionCheckedInEvent-additionalProperties.json new file mode 100644 index 0000000000..56ffcdee72 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCmapEvent-connectionCheckedInEvent-additionalProperties.json @@ -0,0 +1,23 @@ +{ + "description": "expectedCmapEvent-connectionCheckedInEvent-additionalProperties", + "schemaVersion": "1.3", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCheckedInEvent": { + "foo": "bar" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCmapEvent-connectionCheckedOutEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedCmapEvent-connectionCheckedOutEvent-additionalProperties.json new file mode 100644 index 0000000000..9b804aad0a --- /dev/null +++ b/test/unified-test-format/invalid/expectedCmapEvent-connectionCheckedOutEvent-additionalProperties.json @@ -0,0 +1,23 @@ +{ + "description": "expectedCmapEvent-connectionCheckedOutEvent-additionalProperties", + "schemaVersion": "1.3", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCheckedOutEvent": { + "foo": "bar" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCmapEvent-connectionClosedEvent-reason-type.json b/test/unified-test-format/invalid/expectedCmapEvent-connectionClosedEvent-reason-type.json new file mode 100644 index 0000000000..053cd0b413 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCmapEvent-connectionClosedEvent-reason-type.json @@ -0,0 +1,23 @@ +{ + "description": "expectedCmapEvent-connectionClosedEvent-reason-type", + "schemaVersion": "1.3", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionClosedEvent": { + "reason": 10 + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCmapEvent-connectionCreatedEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedCmapEvent-connectionCreatedEvent-additionalProperties.json new file mode 100644 index 0000000000..c2edc3f6aa --- /dev/null +++ b/test/unified-test-format/invalid/expectedCmapEvent-connectionCreatedEvent-additionalProperties.json @@ -0,0 +1,23 @@ +{ + "description": "expectedCmapEvent-connectionCreatedEvent-additionalProperties", + "schemaVersion": "1.3", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCreatedEvent": { + "foo": "bar" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCmapEvent-connectionReadyEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedCmapEvent-connectionReadyEvent-additionalProperties.json new file mode 100644 index 0000000000..994fb63314 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCmapEvent-connectionReadyEvent-additionalProperties.json @@ -0,0 +1,23 @@ +{ + "description": "expectedCmapEvent-connectionReadyEvent-additionalProperties", + "schemaVersion": "1.3", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": { + "foo": "bar" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCmapEvent-poolClearedEvent-hasServiceId-type.json b/test/unified-test-format/invalid/expectedCmapEvent-poolClearedEvent-hasServiceId-type.json new file mode 100644 index 0000000000..5a1a25d463 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCmapEvent-poolClearedEvent-hasServiceId-type.json @@ -0,0 +1,23 @@ +{ + "description": "expectedCmapEvent-poolClearedEvent-hasServiceId-type", + "schemaVersion": "1.3", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "poolClearedEvent": { + "hasServiceId": "foo" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCmapEvent-poolClosedEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedCmapEvent-poolClosedEvent-additionalProperties.json new file mode 100644 index 0000000000..c181707f4a --- /dev/null +++ b/test/unified-test-format/invalid/expectedCmapEvent-poolClosedEvent-additionalProperties.json @@ -0,0 +1,23 @@ +{ + "description": "expectedCmapEvent-poolClosedEvent-additionalProperties", + "schemaVersion": "1.3", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "poolClosedEvent": { + "foo": "bar" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCmapEvent-poolCreatedEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedCmapEvent-poolCreatedEvent-additionalProperties.json new file mode 100644 index 0000000000..6aaa59a600 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCmapEvent-poolCreatedEvent-additionalProperties.json @@ -0,0 +1,23 @@ +{ + "description": "expectedCmapEvent-poolCreatedEvent-additionalProperties", + "schemaVersion": "1.3", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "poolCreatedEvent": { + "foo": "bar" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCmapEvent-poolReadyEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedCmapEvent-poolReadyEvent-additionalProperties.json new file mode 100644 index 0000000000..66c803a5d8 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCmapEvent-poolReadyEvent-additionalProperties.json @@ -0,0 +1,23 @@ +{ + "description": "expectedCmapEvent-poolReadyEvent-additionalProperties", + "schemaVersion": "1.3", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "poolReadyEvent": { + "foo": "bar" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedCommandEvent-additionalProperties.json new file mode 100644 index 0000000000..9e45cbadda --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-additionalProperties.json @@ -0,0 +1,27 @@ +{ + "description": "expectedCommandEvent-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "foo": 0 + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-commandName-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-commandName-type.json new file mode 100644 index 0000000000..a571d8e0c0 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-commandName-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandFailedEvent-commandName-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandFailedEvent": { + "commandName": 0 + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-hasServerConnectionId-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-hasServerConnectionId-type.json new file mode 100644 index 0000000000..7787ea6516 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-hasServerConnectionId-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandFailedEvent-hasServerConnectionId-type", + "schemaVersion": "1.6", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandFailedEvent": { + "hasServerConnectionId": "foo" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-hasServiceId-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-hasServiceId-type.json new file mode 100644 index 0000000000..5314dc9f80 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-hasServiceId-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandFailedEvent-hasServiceId-type", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandFailedEvent": { + "hasServiceId": "foo" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-additionalProperties.json new file mode 100644 index 0000000000..996332d27d --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-additionalProperties.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandStartedEvent-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "foo": 0 + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-command-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-command-type.json new file mode 100644 index 0000000000..8f89460617 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-command-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandStartedEvent-command-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": 0 + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-commandName-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-commandName-type.json new file mode 100644 index 0000000000..121947b06f --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-commandName-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandStartedEvent-commandName-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": 0 + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-databaseName-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-databaseName-type.json new file mode 100644 index 0000000000..97d2b84f68 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-databaseName-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandStartedEvent-databaseName-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": 0 + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-hasServerConnectionId-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-hasServerConnectionId-type.json new file mode 100644 index 0000000000..a913f00ab7 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-hasServerConnectionId-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandStartedEvent-hasServerConnectionId-type", + "schemaVersion": "1.6", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "hasServerConnectionId": "foo" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-hasServiceId-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-hasServiceId-type.json new file mode 100644 index 0000000000..39ab925efb --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-hasServiceId-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandStartedEvent-hasServiceId-type", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "hasServiceId": "foo" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-commandName-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-commandName-type.json new file mode 100644 index 0000000000..bde2f4817b --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-commandName-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandSucceededEvent-commandName-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandSucceededEvent": { + "commandName": 0 + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-hasServerConnectionId-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-hasServerConnectionId-type.json new file mode 100644 index 0000000000..0712c33694 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-hasServerConnectionId-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandSucceededEvent-hasServerConnectionId-type", + "schemaVersion": "1.6", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandSucceededEvent": { + "hasServerConnectionId": "foo" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-hasServiceId-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-hasServiceId-type.json new file mode 100644 index 0000000000..edc9d3cd72 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-hasServiceId-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandSucceededEvent-hasServiceId-type", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandSucceededEvent": { + "hasServiceId": "foo" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-reply-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-reply-type.json new file mode 100644 index 0000000000..9df04acd29 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-reply-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandSucceededEvent-reply-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandSucceededEvent": { + "reply": 0 + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-maxProperties.json b/test/unified-test-format/invalid/expectedCommandEvent-maxProperties.json new file mode 100644 index 0000000000..dd8b0e7e7c --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-maxProperties.json @@ -0,0 +1,28 @@ +{ + "description": "expectedCommandEvent-maxProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": {}, + "commandSucceededEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-minProperties.json b/test/unified-test-format/invalid/expectedCommandEvent-minProperties.json new file mode 100644 index 0000000000..0f3e711a18 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-minProperties.json @@ -0,0 +1,25 @@ +{ + "description": "expectedCommandEvent-minProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + {} + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-additionalProperties.json b/test/unified-test-format/invalid/expectedError-additionalProperties.json new file mode 100644 index 0000000000..3a79df8e34 --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-additionalProperties.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "foo": 0 + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-errorCode-type.json b/test/unified-test-format/invalid/expectedError-errorCode-type.json new file mode 100644 index 0000000000..b6b6f5d05a --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-errorCode-type.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-errorCode-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "errorCode": "foo" + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-errorCodeName-type.json b/test/unified-test-format/invalid/expectedError-errorCodeName-type.json new file mode 100644 index 0000000000..3ac5e43045 --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-errorCodeName-type.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-errorCodeName-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "errorCodeName": 0 + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-errorContains-type.json b/test/unified-test-format/invalid/expectedError-errorContains-type.json new file mode 100644 index 0000000000..847a987dff --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-errorContains-type.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-errorContains-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "errorContains": 0 + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-errorLabelsContain-items.json b/test/unified-test-format/invalid/expectedError-errorLabelsContain-items.json new file mode 100644 index 0000000000..4eab56ad18 --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-errorLabelsContain-items.json @@ -0,0 +1,27 @@ +{ + "description": "expectedError-errorLabelsContain-items", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "errorLabelsContain": [ + 0 + ] + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-errorLabelsContain-minItems.json b/test/unified-test-format/invalid/expectedError-errorLabelsContain-minItems.json new file mode 100644 index 0000000000..48162110aa --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-errorLabelsContain-minItems.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-errorLabelsContain-minItems", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "errorLabelsContain": [] + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-errorLabelsContain-type.json b/test/unified-test-format/invalid/expectedError-errorLabelsContain-type.json new file mode 100644 index 0000000000..a0aba918b5 --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-errorLabelsContain-type.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-errorLabelsContain-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "errorLabelsContain": 0 + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-errorLabelsOmit-items.json b/test/unified-test-format/invalid/expectedError-errorLabelsOmit-items.json new file mode 100644 index 0000000000..6c94d07135 --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-errorLabelsOmit-items.json @@ -0,0 +1,27 @@ +{ + "description": "expectedError-errorLabelsOmit-items", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "errorLabelsOmit": [ + 0 + ] + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-errorLabelsOmit-minItems.json b/test/unified-test-format/invalid/expectedError-errorLabelsOmit-minItems.json new file mode 100644 index 0000000000..88c6582028 --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-errorLabelsOmit-minItems.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-errorLabelsOmit-minItems", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "errorLabelsOmit": [] + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-errorLabelsOmit-type.json b/test/unified-test-format/invalid/expectedError-errorLabelsOmit-type.json new file mode 100644 index 0000000000..5f57114fea --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-errorLabelsOmit-type.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-errorLabelsOmit-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "errorLabelsOmit": 0 + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-isClientError-type.json b/test/unified-test-format/invalid/expectedError-isClientError-type.json new file mode 100644 index 0000000000..bfcc06679b --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-isClientError-type.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-isClientError-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "isClientError": 0 + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-isError-const.json b/test/unified-test-format/invalid/expectedError-isError-const.json new file mode 100644 index 0000000000..6a398bbf22 --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-isError-const.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-isError-const", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "isError": false + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-isError-type.json b/test/unified-test-format/invalid/expectedError-isError-type.json new file mode 100644 index 0000000000..354aff31f4 --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-isError-type.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-isError-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "isError": 0 + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-isTimeoutError-type.json b/test/unified-test-format/invalid/expectedError-isTimeoutError-type.json new file mode 100644 index 0000000000..5683911d0d --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-isTimeoutError-type.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-isTimeoutError-type", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "isTimeoutError": 0 + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-minProperties.json b/test/unified-test-format/invalid/expectedError-minProperties.json new file mode 100644 index 0000000000..10e0b89ab7 --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-minProperties.json @@ -0,0 +1,23 @@ +{ + "description": "expectedError-minProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": {} + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEventsForClient-additionalProperties.json b/test/unified-test-format/invalid/expectedEventsForClient-additionalProperties.json new file mode 100644 index 0000000000..90ed9c3273 --- /dev/null +++ b/test/unified-test-format/invalid/expectedEventsForClient-additionalProperties.json @@ -0,0 +1,24 @@ +{ + "description": "expectedEventsForClient-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [], + "foo": 0 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEventsForClient-client-required.json b/test/unified-test-format/invalid/expectedEventsForClient-client-required.json new file mode 100644 index 0000000000..24b6330de7 --- /dev/null +++ b/test/unified-test-format/invalid/expectedEventsForClient-client-required.json @@ -0,0 +1,22 @@ +{ + "description": "expectedEventsForClient-client-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "events": [] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEventsForClient-client-type.json b/test/unified-test-format/invalid/expectedEventsForClient-client-type.json new file mode 100644 index 0000000000..6e66857ee6 --- /dev/null +++ b/test/unified-test-format/invalid/expectedEventsForClient-client-type.json @@ -0,0 +1,23 @@ +{ + "description": "expectedEventsForClient-client-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": 0, + "events": [] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEventsForClient-eventType-enum.json b/test/unified-test-format/invalid/expectedEventsForClient-eventType-enum.json new file mode 100644 index 0000000000..6e26cfaa7e --- /dev/null +++ b/test/unified-test-format/invalid/expectedEventsForClient-eventType-enum.json @@ -0,0 +1,24 @@ +{ + "description": "expectedEventsForClient-eventType-enum", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "invalid eventType value", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "foo", + "events": [] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEventsForClient-eventType-type.json b/test/unified-test-format/invalid/expectedEventsForClient-eventType-type.json new file mode 100644 index 0000000000..105bb001e5 --- /dev/null +++ b/test/unified-test-format/invalid/expectedEventsForClient-eventType-type.json @@ -0,0 +1,24 @@ +{ + "description": "expectedEventsForClient-eventType-type", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "invalid eventType type", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": 10, + "events": [] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEventsForClient-events-items.json b/test/unified-test-format/invalid/expectedEventsForClient-events-items.json new file mode 100644 index 0000000000..c1fcd4a6c3 --- /dev/null +++ b/test/unified-test-format/invalid/expectedEventsForClient-events-items.json @@ -0,0 +1,25 @@ +{ + "description": "expectedEventsForClient-events-items", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + 0 + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEventsForClient-events-required.json b/test/unified-test-format/invalid/expectedEventsForClient-events-required.json new file mode 100644 index 0000000000..39c1e9e12d --- /dev/null +++ b/test/unified-test-format/invalid/expectedEventsForClient-events-required.json @@ -0,0 +1,22 @@ +{ + "description": "expectedEventsForClient-events-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0" + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEventsForClient-events-type.json b/test/unified-test-format/invalid/expectedEventsForClient-events-type.json new file mode 100644 index 0000000000..4199d042b0 --- /dev/null +++ b/test/unified-test-format/invalid/expectedEventsForClient-events-type.json @@ -0,0 +1,23 @@ +{ + "description": "expectedEventsForClient-events-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": 0 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEventsForClient-events_conflicts_with_cmap_eventType.json b/test/unified-test-format/invalid/expectedEventsForClient-events_conflicts_with_cmap_eventType.json new file mode 100644 index 0000000000..b380219912 --- /dev/null +++ b/test/unified-test-format/invalid/expectedEventsForClient-events_conflicts_with_cmap_eventType.json @@ -0,0 +1,28 @@ +{ + "description": "expectedEventsForClient-events_conflicts_with_cmap_eventType", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "invalid event when eventType is cmap", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "commandStartedEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEventsForClient-events_conflicts_with_command_eventType.json b/test/unified-test-format/invalid/expectedEventsForClient-events_conflicts_with_command_eventType.json new file mode 100644 index 0000000000..08446fe180 --- /dev/null +++ b/test/unified-test-format/invalid/expectedEventsForClient-events_conflicts_with_command_eventType.json @@ -0,0 +1,28 @@ +{ + "description": "expectedEventsForClient-events_conflicts_with_command_eventType", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "invalid event when eventType is command", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "command", + "events": [ + { + "poolCreatedEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEventsForClient-events_conflicts_with_default_eventType.json b/test/unified-test-format/invalid/expectedEventsForClient-events_conflicts_with_default_eventType.json new file mode 100644 index 0000000000..c31efbb8b7 --- /dev/null +++ b/test/unified-test-format/invalid/expectedEventsForClient-events_conflicts_with_default_eventType.json @@ -0,0 +1,27 @@ +{ + "description": "expectedEventsForClient-events_conflicts_with_default_eventType", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "invalid event when eventType is unset", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "poolCreatedEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEventsForClient-ignoreExtraEvents-type.json b/test/unified-test-format/invalid/expectedEventsForClient-ignoreExtraEvents-type.json new file mode 100644 index 0000000000..965190664e --- /dev/null +++ b/test/unified-test-format/invalid/expectedEventsForClient-ignoreExtraEvents-type.json @@ -0,0 +1,24 @@ +{ + "description": "expectedEventsForClient-ignoreExtraEvents-type", + "schemaVersion": "1.7", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [], + "ignoreExtraEvents": 0 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-additionalProperties.json new file mode 100644 index 0000000000..1c6ec460b7 --- /dev/null +++ b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-additionalProperties.json @@ -0,0 +1,23 @@ +{ + "description": "expectedSdamEvent-serverDescriptionChangedEvent-additionalProperties", + "schemaVersion": "1.10", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "sdam", + "events": [ + { + "serverDescriptionChangedEvent": { + "foo": "bar" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-additionalProperties.json b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-additionalProperties.json new file mode 100644 index 0000000000..58f686739a --- /dev/null +++ b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-additionalProperties.json @@ -0,0 +1,25 @@ +{ + "description": "expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-additionalProperties", + "schemaVersion": "1.10", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "sdam", + "events": [ + { + "serverDescriptionChangedEvent": { + "previousDescription": { + "foo": "bar" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-enum.json b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-enum.json new file mode 100644 index 0000000000..1b4a7e2e70 --- /dev/null +++ b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-enum.json @@ -0,0 +1,25 @@ +{ + "description": "expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-enum", + "schemaVersion": "1.10", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "sdam", + "events": [ + { + "serverDescriptionChangedEvent": { + "previousDescription": { + "type": "not a server type" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-type.json b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-type.json new file mode 100644 index 0000000000..c7ea9cc9be --- /dev/null +++ b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-type.json @@ -0,0 +1,25 @@ +{ + "description": "expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-type", + "schemaVersion": "1.10", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "sdam", + "events": [ + { + "serverDescriptionChangedEvent": { + "previousDescription": { + "type": 12 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/initialData-items.json b/test/unified-test-format/invalid/initialData-items.json new file mode 100644 index 0000000000..9c27d554f9 --- /dev/null +++ b/test/unified-test-format/invalid/initialData-items.json @@ -0,0 +1,13 @@ +{ + "description": "initialData-items", + "schemaVersion": "1.0", + "initialData": [ + 0 + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/initialData-minItems.json b/test/unified-test-format/invalid/initialData-minItems.json new file mode 100644 index 0000000000..984100a2be --- /dev/null +++ b/test/unified-test-format/invalid/initialData-minItems.json @@ -0,0 +1,11 @@ +{ + "description": "initialData-minItems", + "schemaVersion": "1.0", + "initialData": [], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/initialData-type.json b/test/unified-test-format/invalid/initialData-type.json new file mode 100644 index 0000000000..c33585e03a --- /dev/null +++ b/test/unified-test-format/invalid/initialData-type.json @@ -0,0 +1,11 @@ +{ + "description": "initialData-type", + "schemaVersion": "1.0", + "initialData": 0, + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-additionalProperties.json b/test/unified-test-format/invalid/operation-additionalProperties.json new file mode 100644 index 0000000000..8f2f1434ec --- /dev/null +++ b/test/unified-test-format/invalid/operation-additionalProperties.json @@ -0,0 +1,23 @@ +{ + "description": "operation-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "foo": 0 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-arguments-type.json b/test/unified-test-format/invalid/operation-arguments-type.json new file mode 100644 index 0000000000..a22f3921c3 --- /dev/null +++ b/test/unified-test-format/invalid/operation-arguments-type.json @@ -0,0 +1,23 @@ +{ + "description": "operation-arguments-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "arguments": 0 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-expectError-conflicts_with_expectResult.json b/test/unified-test-format/invalid/operation-expectError-conflicts_with_expectResult.json new file mode 100644 index 0000000000..bc15fbac76 --- /dev/null +++ b/test/unified-test-format/invalid/operation-expectError-conflicts_with_expectResult.json @@ -0,0 +1,26 @@ +{ + "description": "operation-expectError-conflicts_with_expectResult", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "isError": true + }, + "expectResult": {} + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-expectError-conflicts_with_saveResultAsEntity.json b/test/unified-test-format/invalid/operation-expectError-conflicts_with_saveResultAsEntity.json new file mode 100644 index 0000000000..dead4a3b9d --- /dev/null +++ b/test/unified-test-format/invalid/operation-expectError-conflicts_with_saveResultAsEntity.json @@ -0,0 +1,26 @@ +{ + "description": "operation-expectError-conflicts_with_saveResultAsEntity", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "isError": true + }, + "saveResultAsEntity": "foo" + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-expectError-type.json b/test/unified-test-format/invalid/operation-expectError-type.json new file mode 100644 index 0000000000..b224ba3535 --- /dev/null +++ b/test/unified-test-format/invalid/operation-expectError-type.json @@ -0,0 +1,23 @@ +{ + "description": "operation-expectError-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": 0 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-expectEvents-type.json b/test/unified-test-format/invalid/operation-expectEvents-type.json new file mode 100644 index 0000000000..ecd4c011a9 --- /dev/null +++ b/test/unified-test-format/invalid/operation-expectEvents-type.json @@ -0,0 +1,23 @@ +{ + "description": "operation-expectEvents-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectEvents": 0 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-ignoreResultAndError-conflicts_with_expectError.json b/test/unified-test-format/invalid/operation-ignoreResultAndError-conflicts_with_expectError.json new file mode 100644 index 0000000000..b47e6be2a1 --- /dev/null +++ b/test/unified-test-format/invalid/operation-ignoreResultAndError-conflicts_with_expectError.json @@ -0,0 +1,19 @@ +{ + "description": "operation-ignoreResultAndError-conflicts_with_expectError", + "schemaVersion": "1.3", + "tests": [ + { + "description": "ignoreResultAndError used with expectError", + "operations": [ + { + "name": "foo", + "object": "bar", + "ignoreResultAndError": true, + "expectError": { + "isError": true + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-ignoreResultAndError-conflicts_with_expectResult.json b/test/unified-test-format/invalid/operation-ignoreResultAndError-conflicts_with_expectResult.json new file mode 100644 index 0000000000..03c5a1dbbc --- /dev/null +++ b/test/unified-test-format/invalid/operation-ignoreResultAndError-conflicts_with_expectResult.json @@ -0,0 +1,17 @@ +{ + "description": "operation-ignoreResultAndError-conflicts_with_expectResult", + "schemaVersion": "1.3", + "tests": [ + { + "description": "ignoreResultAndError used with expectResult", + "operations": [ + { + "name": "foo", + "object": "bar", + "ignoreResultAndError": true, + "expectResult": 1 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-ignoreResultAndError-conflicts_with_saveResultAsEntity.json b/test/unified-test-format/invalid/operation-ignoreResultAndError-conflicts_with_saveResultAsEntity.json new file mode 100644 index 0000000000..6745dff2eb --- /dev/null +++ b/test/unified-test-format/invalid/operation-ignoreResultAndError-conflicts_with_saveResultAsEntity.json @@ -0,0 +1,17 @@ +{ + "description": "operation-ignoreResultAndError-conflicts_with_saveResultAsEntity", + "schemaVersion": "1.3", + "tests": [ + { + "description": "ignoreResultAndError used with saveResultAsEntity", + "operations": [ + { + "name": "foo", + "object": "bar", + "ignoreResultAndError": true, + "saveResultAsEntity": "entity0" + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-name-required.json b/test/unified-test-format/invalid/operation-name-required.json new file mode 100644 index 0000000000..42fcb3a308 --- /dev/null +++ b/test/unified-test-format/invalid/operation-name-required.json @@ -0,0 +1,21 @@ +{ + "description": "operation-name-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "object": "client0" + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-name-type.json b/test/unified-test-format/invalid/operation-name-type.json new file mode 100644 index 0000000000..2f91da078a --- /dev/null +++ b/test/unified-test-format/invalid/operation-name-type.json @@ -0,0 +1,22 @@ +{ + "description": "operation-name-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": 0, + "object": "client0" + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-object-required.json b/test/unified-test-format/invalid/operation-object-required.json new file mode 100644 index 0000000000..c0410ce3fd --- /dev/null +++ b/test/unified-test-format/invalid/operation-object-required.json @@ -0,0 +1,21 @@ +{ + "description": "operation-object-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo" + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-object-type.json b/test/unified-test-format/invalid/operation-object-type.json new file mode 100644 index 0000000000..edb0a0b51a --- /dev/null +++ b/test/unified-test-format/invalid/operation-object-type.json @@ -0,0 +1,22 @@ +{ + "description": "operation-object-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": 0 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-saveResultAsEntity-type.json b/test/unified-test-format/invalid/operation-saveResultAsEntity-type.json new file mode 100644 index 0000000000..65ead94c7a --- /dev/null +++ b/test/unified-test-format/invalid/operation-saveResultAsEntity-type.json @@ -0,0 +1,23 @@ +{ + "description": "operation-saveResultAsEntity-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "saveResultAsEntity": 0 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-additionalProperties.json b/test/unified-test-format/invalid/runOnRequirement-additionalProperties.json new file mode 100644 index 0000000000..79fa687e45 --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-additionalProperties.json @@ -0,0 +1,16 @@ +{ + "description": "runOnRequirement-additionalProperties", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "foo": 0 + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-auth-type.json b/test/unified-test-format/invalid/runOnRequirement-auth-type.json new file mode 100644 index 0000000000..e5475d079d --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-auth-type.json @@ -0,0 +1,15 @@ +{ + "description": "runOnRequirement-auth-type", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "auth": "foo" + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-csfle-type.json b/test/unified-test-format/invalid/runOnRequirement-csfle-type.json new file mode 100644 index 0000000000..b48c850d14 --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-csfle-type.json @@ -0,0 +1,15 @@ +{ + "description": "runOnRequirement-csfle-type", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": "foo" + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-maxServerVersion-pattern.json b/test/unified-test-format/invalid/runOnRequirement-maxServerVersion-pattern.json new file mode 100644 index 0000000000..78766eb925 --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-maxServerVersion-pattern.json @@ -0,0 +1,15 @@ +{ + "description": "runOnRequirement-maxServerVersion-pattern", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "maxServerVersion": "1.2.3.4" + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-maxServerVersion-type.json b/test/unified-test-format/invalid/runOnRequirement-maxServerVersion-type.json new file mode 100644 index 0000000000..ffc9118ba2 --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-maxServerVersion-type.json @@ -0,0 +1,15 @@ +{ + "description": "runOnRequirement-maxServerVersion-type", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "maxServerVersion": 0 + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-minProperties.json b/test/unified-test-format/invalid/runOnRequirement-minProperties.json new file mode 100644 index 0000000000..c2bfed3be7 --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-minProperties.json @@ -0,0 +1,13 @@ +{ + "description": "runOnRequirement-minProperties", + "schemaVersion": "1.0", + "runOnRequirements": [ + {} + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-minServerVersion-pattern.json b/test/unified-test-format/invalid/runOnRequirement-minServerVersion-pattern.json new file mode 100644 index 0000000000..19abc1755f --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-minServerVersion-pattern.json @@ -0,0 +1,15 @@ +{ + "description": "runOnRequirement-minServerVersion-pattern", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "1.2.3.4" + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-minServerVersion-type.json b/test/unified-test-format/invalid/runOnRequirement-minServerVersion-type.json new file mode 100644 index 0000000000..688d1c67ee --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-minServerVersion-type.json @@ -0,0 +1,15 @@ +{ + "description": "runOnRequirement-minServerVersion-type", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": 0 + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-serverless-enum.json b/test/unified-test-format/invalid/runOnRequirement-serverless-enum.json new file mode 100644 index 0000000000..031fa539df --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-serverless-enum.json @@ -0,0 +1,15 @@ +{ + "description": "runOnRequirement-serverless-enum", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "serverless": "foo" + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-serverless-type.json b/test/unified-test-format/invalid/runOnRequirement-serverless-type.json new file mode 100644 index 0000000000..1aa41712f9 --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-serverless-type.json @@ -0,0 +1,15 @@ +{ + "description": "runOnRequirement-serverless-type", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "serverless": 1234 + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-topologies-enum.json b/test/unified-test-format/invalid/runOnRequirement-topologies-enum.json new file mode 100644 index 0000000000..f62e5040d4 --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-topologies-enum.json @@ -0,0 +1,17 @@ +{ + "description": "runOnRequirement-topologies-enum", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "topologies": [ + "foo" + ] + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-topologies-items.json b/test/unified-test-format/invalid/runOnRequirement-topologies-items.json new file mode 100644 index 0000000000..a205b3293d --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-topologies-items.json @@ -0,0 +1,17 @@ +{ + "description": "runOnRequirement-topologies-items", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "topologies": [ + 0 + ] + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-topologies-minItems.json b/test/unified-test-format/invalid/runOnRequirement-topologies-minItems.json new file mode 100644 index 0000000000..16f29b3f4b --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-topologies-minItems.json @@ -0,0 +1,15 @@ +{ + "description": "runOnRequirement-topologies-minItems", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "topologies": [] + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-topologies-type.json b/test/unified-test-format/invalid/runOnRequirement-topologies-type.json new file mode 100644 index 0000000000..f6d147cd6f --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-topologies-type.json @@ -0,0 +1,15 @@ +{ + "description": "runOnRequirement-topologies-type", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "topologies": 0 + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirements-items.json b/test/unified-test-format/invalid/runOnRequirements-items.json new file mode 100644 index 0000000000..40ec84a3f3 --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirements-items.json @@ -0,0 +1,13 @@ +{ + "description": "runOnRequirements-items", + "schemaVersion": "1.0", + "runOnRequirements": [ + 0 + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirements-minItems.json b/test/unified-test-format/invalid/runOnRequirements-minItems.json new file mode 100644 index 0000000000..4ca9f99b5d --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirements-minItems.json @@ -0,0 +1,11 @@ +{ + "description": "runOnRequirements-minItems", + "schemaVersion": "1.0", + "runOnRequirements": [], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirements-type.json b/test/unified-test-format/invalid/runOnRequirements-type.json new file mode 100644 index 0000000000..98b859f3ea --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirements-type.json @@ -0,0 +1,11 @@ +{ + "description": "runOnRequirements-type", + "schemaVersion": "1.0", + "runOnRequirements": 0, + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/schemaVersion-pattern.json b/test/unified-test-format/invalid/schemaVersion-pattern.json new file mode 100644 index 0000000000..bcb8980516 --- /dev/null +++ b/test/unified-test-format/invalid/schemaVersion-pattern.json @@ -0,0 +1,10 @@ +{ + "description": "schemaVersion-pattern", + "schemaVersion": "1.2.3.4", + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/schemaVersion-required.json b/test/unified-test-format/invalid/schemaVersion-required.json new file mode 100644 index 0000000000..7388ff0bf1 --- /dev/null +++ b/test/unified-test-format/invalid/schemaVersion-required.json @@ -0,0 +1,9 @@ +{ + "description": "schemaVersion-required", + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/schemaVersion-type.json b/test/unified-test-format/invalid/schemaVersion-type.json new file mode 100644 index 0000000000..646473a209 --- /dev/null +++ b/test/unified-test-format/invalid/schemaVersion-type.json @@ -0,0 +1,10 @@ +{ + "description": "schemaVersion-type", + "schemaVersion": 0, + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/storeEventsAsEntity-additionalProperties.json b/test/unified-test-format/invalid/storeEventsAsEntity-additionalProperties.json new file mode 100644 index 0000000000..5357da8d8d --- /dev/null +++ b/test/unified-test-format/invalid/storeEventsAsEntity-additionalProperties.json @@ -0,0 +1,26 @@ +{ + "description": "storeEventsAsEntity-additionalProperties", + "schemaVersion": "1.2", + "createEntities": [ + { + "client": { + "id": "client0", + "storeEventsAsEntities": [ + { + "id": "client0_events", + "events": [ + "CommandStartedEvent" + ], + "foo": 0 + } + ] + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/storeEventsAsEntity-events-enum.json b/test/unified-test-format/invalid/storeEventsAsEntity-events-enum.json new file mode 100644 index 0000000000..ee99a55381 --- /dev/null +++ b/test/unified-test-format/invalid/storeEventsAsEntity-events-enum.json @@ -0,0 +1,25 @@ +{ + "description": "storeEventsAsEntity-events-enum", + "schemaVersion": "1.2", + "createEntities": [ + { + "client": { + "id": "client0", + "storeEventsAsEntities": [ + { + "id": "client0_events", + "events": [ + "foo" + ] + } + ] + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/storeEventsAsEntity-events-minItems.json b/test/unified-test-format/invalid/storeEventsAsEntity-events-minItems.json new file mode 100644 index 0000000000..ddab042b1b --- /dev/null +++ b/test/unified-test-format/invalid/storeEventsAsEntity-events-minItems.json @@ -0,0 +1,23 @@ +{ + "description": "storeEventsAsEntity-events-minItems", + "schemaVersion": "1.2", + "createEntities": [ + { + "client": { + "id": "client0", + "storeEventsAsEntities": [ + { + "id": "client0_events", + "events": [] + } + ] + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/storeEventsAsEntity-events-required.json b/test/unified-test-format/invalid/storeEventsAsEntity-events-required.json new file mode 100644 index 0000000000..90b45918ce --- /dev/null +++ b/test/unified-test-format/invalid/storeEventsAsEntity-events-required.json @@ -0,0 +1,22 @@ +{ + "description": "storeEventsAsEntity-events-required", + "schemaVersion": "1.2", + "createEntities": [ + { + "client": { + "id": "client0", + "storeEventsAsEntities": [ + { + "id": "client0_events" + } + ] + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/storeEventsAsEntity-events-type.json b/test/unified-test-format/invalid/storeEventsAsEntity-events-type.json new file mode 100644 index 0000000000..1b920ebd5d --- /dev/null +++ b/test/unified-test-format/invalid/storeEventsAsEntity-events-type.json @@ -0,0 +1,23 @@ +{ + "description": "storeEventsAsEntity-events-type", + "schemaVersion": "1.2", + "createEntities": [ + { + "client": { + "id": "client0", + "storeEventsAsEntities": [ + { + "id": "client0_events", + "events": 0 + } + ] + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/storeEventsAsEntity-id-required.json b/test/unified-test-format/invalid/storeEventsAsEntity-id-required.json new file mode 100644 index 0000000000..71387c5315 --- /dev/null +++ b/test/unified-test-format/invalid/storeEventsAsEntity-id-required.json @@ -0,0 +1,24 @@ +{ + "description": "storeEventsAsEntity-id-required", + "schemaVersion": "1.2", + "createEntities": [ + { + "client": { + "id": "client0", + "storeEventsAsEntities": [ + { + "events": [ + "CommandStartedEvent" + ] + } + ] + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/storeEventsAsEntity-id-type.json b/test/unified-test-format/invalid/storeEventsAsEntity-id-type.json new file mode 100644 index 0000000000..4f52dc2533 --- /dev/null +++ b/test/unified-test-format/invalid/storeEventsAsEntity-id-type.json @@ -0,0 +1,25 @@ +{ + "description": "storeEventsAsEntity-id-type", + "schemaVersion": "1.2", + "createEntities": [ + { + "client": { + "id": "client0", + "storeEventsAsEntities": [ + { + "id": 0, + "events": [ + "CommandStartedEvent" + ] + } + ] + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/test-additionalProperties.json b/test/unified-test-format/invalid/test-additionalProperties.json new file mode 100644 index 0000000000..a699319c30 --- /dev/null +++ b/test/unified-test-format/invalid/test-additionalProperties.json @@ -0,0 +1,11 @@ +{ + "description": "test-additionalProperties", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [], + "foo": 0 + } + ] +} diff --git a/test/unified-test-format/invalid/test-description-required.json b/test/unified-test-format/invalid/test-description-required.json new file mode 100644 index 0000000000..8bf23014d4 --- /dev/null +++ b/test/unified-test-format/invalid/test-description-required.json @@ -0,0 +1,9 @@ +{ + "description": "test-description-required", + "schemaVersion": "1.0", + "tests": [ + { + "operation": [] + } + ] +} diff --git a/test/unified-test-format/invalid/test-description-type.json b/test/unified-test-format/invalid/test-description-type.json new file mode 100644 index 0000000000..bba3690449 --- /dev/null +++ b/test/unified-test-format/invalid/test-description-type.json @@ -0,0 +1,10 @@ +{ + "description": "test-description-type", + "schemaVersion": "1.0", + "tests": [ + { + "description": 0, + "operation": [] + } + ] +} diff --git a/test/unified-test-format/invalid/test-expectEvents-items.json b/test/unified-test-format/invalid/test-expectEvents-items.json new file mode 100644 index 0000000000..394f74746c --- /dev/null +++ b/test/unified-test-format/invalid/test-expectEvents-items.json @@ -0,0 +1,13 @@ +{ + "description": "test-expectEvents-items", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + 0 + ] + } + ] +} diff --git a/test/unified-test-format/invalid/test-expectEvents-minItems.json b/test/unified-test-format/invalid/test-expectEvents-minItems.json new file mode 100644 index 0000000000..0da3a56f79 --- /dev/null +++ b/test/unified-test-format/invalid/test-expectEvents-minItems.json @@ -0,0 +1,11 @@ +{ + "description": "test-expectEvents-minItems", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [] + } + ] +} diff --git a/test/unified-test-format/invalid/test-expectEvents-type.json b/test/unified-test-format/invalid/test-expectEvents-type.json new file mode 100644 index 0000000000..1569f0a0d7 --- /dev/null +++ b/test/unified-test-format/invalid/test-expectEvents-type.json @@ -0,0 +1,11 @@ +{ + "description": "test-expectEvents-type", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": 0 + } + ] +} diff --git a/test/unified-test-format/invalid/test-operations-items.json b/test/unified-test-format/invalid/test-operations-items.json new file mode 100644 index 0000000000..00af8e7453 --- /dev/null +++ b/test/unified-test-format/invalid/test-operations-items.json @@ -0,0 +1,12 @@ +{ + "description": "test-operations-items", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [ + 0 + ] + } + ] +} diff --git a/test/unified-test-format/invalid/test-operations-required.json b/test/unified-test-format/invalid/test-operations-required.json new file mode 100644 index 0000000000..67c6f83044 --- /dev/null +++ b/test/unified-test-format/invalid/test-operations-required.json @@ -0,0 +1,9 @@ +{ + "description": "test-operations-required", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo" + } + ] +} diff --git a/test/unified-test-format/invalid/test-operations-type.json b/test/unified-test-format/invalid/test-operations-type.json new file mode 100644 index 0000000000..1e8b5b2496 --- /dev/null +++ b/test/unified-test-format/invalid/test-operations-type.json @@ -0,0 +1,10 @@ +{ + "description": "test-operations-type", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": 0 + } + ] +} diff --git a/test/unified-test-format/invalid/test-outcome-items.json b/test/unified-test-format/invalid/test-outcome-items.json new file mode 100644 index 0000000000..cf6bb54f87 --- /dev/null +++ b/test/unified-test-format/invalid/test-outcome-items.json @@ -0,0 +1,13 @@ +{ + "description": "test-outcome-items", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [], + "outcome": [ + 0 + ] + } + ] +} diff --git a/test/unified-test-format/invalid/test-outcome-minItems.json b/test/unified-test-format/invalid/test-outcome-minItems.json new file mode 100644 index 0000000000..aadf8e514a --- /dev/null +++ b/test/unified-test-format/invalid/test-outcome-minItems.json @@ -0,0 +1,11 @@ +{ + "description": "test-outcome-minItems", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [], + "outcome": [] + } + ] +} diff --git a/test/unified-test-format/invalid/test-outcome-type.json b/test/unified-test-format/invalid/test-outcome-type.json new file mode 100644 index 0000000000..e60c119d7e --- /dev/null +++ b/test/unified-test-format/invalid/test-outcome-type.json @@ -0,0 +1,11 @@ +{ + "description": "test-outcome-type", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [], + "outcome": 0 + } + ] +} diff --git a/test/unified-test-format/invalid/test-runOnRequirements-items.json b/test/unified-test-format/invalid/test-runOnRequirements-items.json new file mode 100644 index 0000000000..866bebb51f --- /dev/null +++ b/test/unified-test-format/invalid/test-runOnRequirements-items.json @@ -0,0 +1,13 @@ +{ + "description": "test-runOnRequirements-items", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [], + "runOnRequirements": [ + 0 + ] + } + ] +} diff --git a/test/unified-test-format/invalid/test-runOnRequirements-minItems.json b/test/unified-test-format/invalid/test-runOnRequirements-minItems.json new file mode 100644 index 0000000000..d61f063849 --- /dev/null +++ b/test/unified-test-format/invalid/test-runOnRequirements-minItems.json @@ -0,0 +1,11 @@ +{ + "description": "test-runOnRequirements-minItems", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [], + "runOnRequirements": [] + } + ] +} diff --git a/test/unified-test-format/invalid/test-runOnRequirements-type.json b/test/unified-test-format/invalid/test-runOnRequirements-type.json new file mode 100644 index 0000000000..5b25b1005d --- /dev/null +++ b/test/unified-test-format/invalid/test-runOnRequirements-type.json @@ -0,0 +1,11 @@ +{ + "description": "test-runOnRequirements-type", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [], + "runOnRequirements": 0 + } + ] +} diff --git a/test/unified-test-format/invalid/test-skipReason-type.json b/test/unified-test-format/invalid/test-skipReason-type.json new file mode 100644 index 0000000000..0408e76834 --- /dev/null +++ b/test/unified-test-format/invalid/test-skipReason-type.json @@ -0,0 +1,11 @@ +{ + "description": "test-skipReason-type", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [], + "skipReason": 0 + } + ] +} diff --git a/test/unified-test-format/invalid/tests-items.json b/test/unified-test-format/invalid/tests-items.json new file mode 100644 index 0000000000..11f37469e4 --- /dev/null +++ b/test/unified-test-format/invalid/tests-items.json @@ -0,0 +1,7 @@ +{ + "description": "tests-items", + "schemaVersion": "1.0", + "tests": [ + 0 + ] +} diff --git a/test/unified-test-format/invalid/tests-minItems.json b/test/unified-test-format/invalid/tests-minItems.json new file mode 100644 index 0000000000..3f74f94af7 --- /dev/null +++ b/test/unified-test-format/invalid/tests-minItems.json @@ -0,0 +1,5 @@ +{ + "description": "tests-minItems", + "schemaVersion": "1.0", + "tests": [] +} diff --git a/test/unified-test-format/invalid/tests-required.json b/test/unified-test-format/invalid/tests-required.json new file mode 100644 index 0000000000..de4b2fd063 --- /dev/null +++ b/test/unified-test-format/invalid/tests-required.json @@ -0,0 +1,4 @@ +{ + "description": "tests-required", + "schemaVersion": "1.0" +} diff --git a/test/unified-test-format/invalid/tests-type.json b/test/unified-test-format/invalid/tests-type.json new file mode 100644 index 0000000000..62d8194a41 --- /dev/null +++ b/test/unified-test-format/invalid/tests-type.json @@ -0,0 +1,5 @@ +{ + "description": "tests-type", + "schemaVersion": "1.0", + "tests": 0 +} diff --git a/test/unified-test-format/valid-fail/assertNumberConnectionsCheckedOut.json b/test/unified-test-format/valid-fail/assertNumberConnectionsCheckedOut.json new file mode 100644 index 0000000000..9799bb2f65 --- /dev/null +++ b/test/unified-test-format/valid-fail/assertNumberConnectionsCheckedOut.json @@ -0,0 +1,63 @@ +{ + "description": "assertNumberConnectionsCheckedOut", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true + } + } + ], + "tests": [ + { + "description": "operation fails if client field is not specified", + "operations": [ + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "connections": 1 + } + } + ] + }, + { + "description": "operation fails if connections field is not specified", + "operations": [ + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ] + }, + { + "description": "operation fails if client entity does not exist", + "operations": [ + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client1" + } + } + ] + }, + { + "description": "operation fails if number of connections is incorrect", + "operations": [ + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-fail/entity-bucket-database-undefined.json b/test/unified-test-format/valid-fail/entity-bucket-database-undefined.json new file mode 100644 index 0000000000..7f7f1978c3 --- /dev/null +++ b/test/unified-test-format/valid-fail/entity-bucket-database-undefined.json @@ -0,0 +1,18 @@ +{ + "description": "entity-bucket-database-undefined", + "schemaVersion": "1.0", + "createEntities": [ + { + "bucket": { + "id": "bucket0", + "database": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/entity-client-apiVersion-unsupported.json b/test/unified-test-format/valid-fail/entity-client-apiVersion-unsupported.json new file mode 100644 index 0000000000..d92d23dcaf --- /dev/null +++ b/test/unified-test-format/valid-fail/entity-client-apiVersion-unsupported.json @@ -0,0 +1,20 @@ +{ + "description": "entity-client-apiVersion-unsupported", + "schemaVersion": "1.1", + "createEntities": [ + { + "client": { + "id": "client0", + "serverApi": { + "version": "server_will_never_support_this_api_version" + } + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_with_client_id.json b/test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_with_client_id.json new file mode 100644 index 0000000000..8c0c4d2041 --- /dev/null +++ b/test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_with_client_id.json @@ -0,0 +1,28 @@ +{ + "description": "entity-client-storeEventsAsEntities-conflict_with_client_id", + "schemaVersion": "1.2", + "createEntities": [ + { + "client": { + "id": "client0", + "storeEventsAsEntities": [ + { + "id": "client0", + "events": [ + "PoolCreatedEvent", + "PoolReadyEvent", + "PoolClearedEvent", + "PoolClosedEvent" + ] + } + ] + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_within_different_array.json b/test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_within_different_array.json new file mode 100644 index 0000000000..77bc4abf2e --- /dev/null +++ b/test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_within_different_array.json @@ -0,0 +1,43 @@ +{ + "description": "entity-client-storeEventsAsEntities-conflict_within_different_array", + "schemaVersion": "1.2", + "createEntities": [ + { + "client": { + "id": "client0", + "storeEventsAsEntities": [ + { + "id": "events", + "events": [ + "PoolCreatedEvent", + "PoolReadyEvent", + "PoolClearedEvent", + "PoolClosedEvent" + ] + } + ] + } + }, + { + "client": { + "id": "client1", + "storeEventsAsEntities": [ + { + "id": "events", + "events": [ + "CommandStartedEvent", + "CommandSucceededEvent", + "CommandFailedEvent" + ] + } + ] + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_within_same_array.json b/test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_within_same_array.json new file mode 100644 index 0000000000..e1a9499883 --- /dev/null +++ b/test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_within_same_array.json @@ -0,0 +1,36 @@ +{ + "description": "entity-client-storeEventsAsEntities-conflict_within_same_array", + "schemaVersion": "1.2", + "createEntities": [ + { + "client": { + "id": "client0", + "storeEventsAsEntities": [ + { + "id": "events", + "events": [ + "PoolCreatedEvent", + "PoolReadyEvent", + "PoolClearedEvent", + "PoolClosedEvent" + ] + }, + { + "id": "events", + "events": [ + "CommandStartedEvent", + "CommandSucceededEvent", + "CommandFailedEvent" + ] + } + ] + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/entity-collection-database-undefined.json b/test/unified-test-format/valid-fail/entity-collection-database-undefined.json new file mode 100644 index 0000000000..20b0733e34 --- /dev/null +++ b/test/unified-test-format/valid-fail/entity-collection-database-undefined.json @@ -0,0 +1,19 @@ +{ + "description": "entity-collection-database-undefined", + "schemaVersion": "1.0", + "createEntities": [ + { + "collection": { + "id": "collection0", + "database": "foo", + "collectionName": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/entity-database-client-undefined.json b/test/unified-test-format/valid-fail/entity-database-client-undefined.json new file mode 100644 index 0000000000..0f8110e6d3 --- /dev/null +++ b/test/unified-test-format/valid-fail/entity-database-client-undefined.json @@ -0,0 +1,19 @@ +{ + "description": "entity-database-client-undefined", + "schemaVersion": "1.0", + "createEntities": [ + { + "database": { + "id": "database0", + "client": "foo", + "databaseName": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/entity-findCursor-malformed.json b/test/unified-test-format/valid-fail/entity-findCursor-malformed.json new file mode 100644 index 0000000000..0956efa4c8 --- /dev/null +++ b/test/unified-test-format/valid-fail/entity-findCursor-malformed.json @@ -0,0 +1,44 @@ +{ + "description": "entity-findCursor-malformed", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "databaseName": "database0Name", + "collectionName": "coll0", + "documents": [] + } + ], + "tests": [ + { + "description": "createFindCursor fails if filter is not specified", + "operations": [ + { + "name": "createFindCursor", + "object": "collection0", + "saveResultAsEntity": "cursor0" + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-fail/entity-findCursor.json b/test/unified-test-format/valid-fail/entity-findCursor.json new file mode 100644 index 0000000000..389e448c06 --- /dev/null +++ b/test/unified-test-format/valid-fail/entity-findCursor.json @@ -0,0 +1,52 @@ +{ + "description": "entity-findCursor", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "databaseName": "database0Name", + "collectionName": "coll0", + "documents": [] + } + ], + "tests": [ + { + "description": "iterateUntilDocumentOrError fails if it references a nonexistent entity", + "operations": [ + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0" + } + ] + }, + { + "description": "close fails if it references a nonexistent entity", + "operations": [ + { + "name": "close", + "object": "cursor0" + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-fail/entity-session-client-undefined.json b/test/unified-test-format/valid-fail/entity-session-client-undefined.json new file mode 100644 index 0000000000..260356436a --- /dev/null +++ b/test/unified-test-format/valid-fail/entity-session-client-undefined.json @@ -0,0 +1,18 @@ +{ + "description": "entity-session-client-undefined", + "schemaVersion": "1.0", + "createEntities": [ + { + "session": { + "id": "session0", + "client": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/ignoreResultAndError-malformed.json b/test/unified-test-format/valid-fail/ignoreResultAndError-malformed.json new file mode 100644 index 0000000000..b64779c723 --- /dev/null +++ b/test/unified-test-format/valid-fail/ignoreResultAndError-malformed.json @@ -0,0 +1,48 @@ +{ + "description": "ignoreResultAndError-malformed", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [] + } + ], + "tests": [ + { + "description": "malformed operation fails if ignoreResultAndError is true", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "foo": "bar" + }, + "ignoreResultAndError": true + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-fail/ignoreResultAndError.json b/test/unified-test-format/valid-fail/ignoreResultAndError.json new file mode 100644 index 0000000000..01b2421a9f --- /dev/null +++ b/test/unified-test-format/valid-fail/ignoreResultAndError.json @@ -0,0 +1,59 @@ +{ + "description": "ignoreResultAndError", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [] + } + ], + "tests": [ + { + "description": "operation errors are not ignored if ignoreResultAndError is false", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1 + } + }, + "ignoreResultAndError": false + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-fail/kmsProviders-missing_aws_kms_credentials.json b/test/unified-test-format/valid-fail/kmsProviders-missing_aws_kms_credentials.json new file mode 100644 index 0000000000..e62de80033 --- /dev/null +++ b/test/unified-test-format/valid-fail/kmsProviders-missing_aws_kms_credentials.json @@ -0,0 +1,36 @@ +{ + "description": "kmsProviders-missing_aws_kms_credentials", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": "accessKeyId" + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/kmsProviders-missing_azure_kms_credentials.json b/test/unified-test-format/valid-fail/kmsProviders-missing_azure_kms_credentials.json new file mode 100644 index 0000000000..8ef805d0fa --- /dev/null +++ b/test/unified-test-format/valid-fail/kmsProviders-missing_azure_kms_credentials.json @@ -0,0 +1,36 @@ +{ + "description": "kmsProviders-missing_azure_kms_credentials", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "azure": { + "tenantId": "tenantId" + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/kmsProviders-missing_gcp_kms_credentials.json b/test/unified-test-format/valid-fail/kmsProviders-missing_gcp_kms_credentials.json new file mode 100644 index 0000000000..c6da1ce58c --- /dev/null +++ b/test/unified-test-format/valid-fail/kmsProviders-missing_gcp_kms_credentials.json @@ -0,0 +1,36 @@ +{ + "description": "kmsProviders-missing_gcp_kms_credentials", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "gcp": { + "email": "email" + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/kmsProviders-no_kms.json b/test/unified-test-format/valid-fail/kmsProviders-no_kms.json new file mode 100644 index 0000000000..57499b4eaf --- /dev/null +++ b/test/unified-test-format/valid-fail/kmsProviders-no_kms.json @@ -0,0 +1,32 @@ +{ + "description": "clientEncryptionOpts-no_kms", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": {} + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/operation-failure.json b/test/unified-test-format/valid-fail/operation-failure.json new file mode 100644 index 0000000000..8f6cae1521 --- /dev/null +++ b/test/unified-test-format/valid-fail/operation-failure.json @@ -0,0 +1,56 @@ +{ + "description": "operation-failure", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "operation-failure" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "tests": [ + { + "description": "Unsupported command", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "unsupportedCommand", + "command": { + "unsupportedCommand": 1 + } + } + } + ] + }, + { + "description": "Unsupported query operator", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "$unsupportedQueryOperator": 1 + } + } + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-fail/operation-unsupported.json b/test/unified-test-format/valid-fail/operation-unsupported.json new file mode 100644 index 0000000000..d8ef5ab1c8 --- /dev/null +++ b/test/unified-test-format/valid-fail/operation-unsupported.json @@ -0,0 +1,22 @@ +{ + "description": "operation-unsupported", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "Unsupported operation", + "operations": [ + { + "name": "unsupportedOperation", + "object": "client0" + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-fail/returnDocument-enum-invalid.json b/test/unified-test-format/valid-fail/returnDocument-enum-invalid.json new file mode 100644 index 0000000000..ea425fb568 --- /dev/null +++ b/test/unified-test-format/valid-fail/returnDocument-enum-invalid.json @@ -0,0 +1,66 @@ +{ + "description": "returnDocument-enum-invalid", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "tests": [ + { + "description": "FindOneAndReplace returnDocument invalid enum value", + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + }, + "returnDocument": "invalid" + } + } + ] + }, + { + "description": "FindOneAndUpdate returnDocument invalid enum value", + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "invalid" + } + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-fail/schemaVersion-unsupported.json b/test/unified-test-format/valid-fail/schemaVersion-unsupported.json new file mode 100644 index 0000000000..ceb5532917 --- /dev/null +++ b/test/unified-test-format/valid-fail/schemaVersion-unsupported.json @@ -0,0 +1,10 @@ +{ + "description": "schemaVersion-unsupported", + "schemaVersion": "0.1", + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-pass/assertNumberConnectionsCheckedOut.json b/test/unified-test-format/valid-pass/assertNumberConnectionsCheckedOut.json new file mode 100644 index 0000000000..a9fc063f33 --- /dev/null +++ b/test/unified-test-format/valid-pass/assertNumberConnectionsCheckedOut.json @@ -0,0 +1,27 @@ +{ + "description": "assertNumberConnectionsCheckedOut", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true + } + } + ], + "tests": [ + { + "description": "basic assertion succeeds", + "operations": [ + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/collectionData-createOptions.json b/test/unified-test-format/valid-pass/collectionData-createOptions.json new file mode 100644 index 0000000000..19edc2247b --- /dev/null +++ b/test/unified-test-format/valid-pass/collectionData-createOptions.json @@ -0,0 +1,79 @@ +{ + "description": "collectionData-createOptions", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0", + "createOptions": { + "capped": true, + "size": 4096 + }, + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "collection is created with the correct options", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$collStats": { + "storageStats": {} + } + }, + { + "$project": { + "capped": "$storageStats.capped", + "maxSize": "$storageStats.maxSize" + } + } + ] + }, + "expectResult": [ + { + "capped": true, + "maxSize": 4096 + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/createEntities-operation.json b/test/unified-test-format/valid-pass/createEntities-operation.json new file mode 100644 index 0000000000..3fde42919d --- /dev/null +++ b/test/unified-test-format/valid-pass/createEntities-operation.json @@ -0,0 +1,74 @@ +{ + "description": "createEntities-operation", + "schemaVersion": "1.9", + "tests": [ + { + "description": "createEntities operation", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "database1" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll1" + } + } + ] + } + }, + { + "name": "deleteOne", + "object": "collection1", + "arguments": { + "filter": { + "_id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll1", + "deletes": [ + { + "q": { + "_id": 1 + }, + "limit": 1 + } + ] + }, + "commandName": "delete", + "databaseName": "database1" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/entity-client-cmap-events.json b/test/unified-test-format/valid-pass/entity-client-cmap-events.json new file mode 100644 index 0000000000..3209033def --- /dev/null +++ b/test/unified-test-format/valid-pass/entity-client-cmap-events.json @@ -0,0 +1,71 @@ +{ + "description": "entity-client-cmap-events", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "connectionReadyEvent", + "connectionCheckedOutEvent", + "connectionCheckedInEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [] + } + ], + "tests": [ + { + "description": "events are captured during an operation", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/entity-client-storeEventsAsEntities.json b/test/unified-test-format/valid-pass/entity-client-storeEventsAsEntities.json new file mode 100644 index 0000000000..e37e5a1acd --- /dev/null +++ b/test/unified-test-format/valid-pass/entity-client-storeEventsAsEntities.json @@ -0,0 +1,67 @@ +{ + "description": "entity-client-storeEventsAsEntities", + "schemaVersion": "1.2", + "createEntities": [ + { + "client": { + "id": "client0", + "storeEventsAsEntities": [ + { + "id": "client0_events", + "events": [ + "CommandStartedEvent", + "CommandSucceededEvent", + "CommandFailedEvent" + ] + } + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "test", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "storeEventsAsEntities captures events", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/entity-cursor-iterateOnce.json b/test/unified-test-format/valid-pass/entity-cursor-iterateOnce.json new file mode 100644 index 0000000000..88fc28e34e --- /dev/null +++ b/test/unified-test-format/valid-pass/entity-cursor-iterateOnce.json @@ -0,0 +1,108 @@ +{ + "description": "entity-cursor-iterateOnce", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "databaseName": "database0", + "collectionName": "coll0", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ], + "tests": [ + { + "description": "iterateOnce", + "operations": [ + { + "name": "createFindCursor", + "object": "collection0", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 1 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 2 + } + }, + { + "name": "iterateOnce", + "object": "cursor0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": {}, + "batchSize": 2 + }, + "commandName": "find", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": "long" + }, + "collection": "coll0" + }, + "commandName": "getMore" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/entity-find-cursor.json b/test/unified-test-format/valid-pass/entity-find-cursor.json new file mode 100644 index 0000000000..85b8f69d7f --- /dev/null +++ b/test/unified-test-format/valid-pass/entity-find-cursor.json @@ -0,0 +1,182 @@ +{ + "description": "entity-find-cursor", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "databaseName": "database0Name", + "collectionName": "coll0", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + }, + { + "_id": 5 + } + ] + } + ], + "tests": [ + { + "description": "cursors can be created, iterated, and closed", + "operations": [ + { + "name": "createFindCursor", + "object": "collection0", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 1 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 2 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 3 + } + }, + { + "name": "close", + "object": "cursor0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": {}, + "batchSize": 2 + }, + "commandName": "find", + "databaseName": "database0Name" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursor": { + "id": { + "$$type": "long" + }, + "ns": { + "$$type": "string" + }, + "firstBatch": { + "$$type": "array" + } + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": "long" + }, + "collection": "coll0" + }, + "commandName": "getMore" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursor": { + "id": { + "$$type": "long" + }, + "ns": { + "$$type": "string" + }, + "nextBatch": { + "$$type": "array" + } + } + }, + "commandName": "getMore" + } + }, + { + "commandStartedEvent": { + "command": { + "killCursors": "coll0", + "cursors": { + "$$type": "array" + } + }, + "commandName": "killCursors" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursorsKilled": { + "$$unsetOrMatches": { + "$$type": "array" + } + } + }, + "commandName": "killCursors" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/expectedEventsForClient-eventType.json b/test/unified-test-format/valid-pass/expectedEventsForClient-eventType.json new file mode 100644 index 0000000000..fe308df965 --- /dev/null +++ b/test/unified-test-format/valid-pass/expectedEventsForClient-eventType.json @@ -0,0 +1,126 @@ +{ + "description": "expectedEventsForClient-eventType", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent", + "connectionReadyEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [] + } + ], + "tests": [ + { + "description": "eventType can be set to command and cmap", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1 + } + ] + }, + "commandName": "insert" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + } + ] + } + ] + }, + { + "description": "eventType defaults to command if unset", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1 + } + ] + }, + "commandName": "insert" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/expectedEventsForClient-ignoreExtraEvents.json b/test/unified-test-format/valid-pass/expectedEventsForClient-ignoreExtraEvents.json new file mode 100644 index 0000000000..178b756c2c --- /dev/null +++ b/test/unified-test-format/valid-pass/expectedEventsForClient-ignoreExtraEvents.json @@ -0,0 +1,151 @@ +{ + "description": "expectedEventsForClient-ignoreExtraEvents", + "schemaVersion": "1.7", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [] + } + ], + "tests": [ + { + "description": "ignoreExtraEvents can be set to false", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": false, + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1 + } + ] + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "ignoreExtraEvents can be set to true", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 2 + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 3 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 2 + } + ] + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "ignoreExtraEvents defaults to false if unset", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 4 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 4 + } + ] + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/ignoreResultAndError.json b/test/unified-test-format/valid-pass/ignoreResultAndError.json new file mode 100644 index 0000000000..2e9b1c58ab --- /dev/null +++ b/test/unified-test-format/valid-pass/ignoreResultAndError.json @@ -0,0 +1,59 @@ +{ + "description": "ignoreResultAndError", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [] + } + ], + "tests": [ + { + "description": "operation errors are ignored if ignoreResultAndError is true", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1 + } + }, + "ignoreResultAndError": true + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/kmsProviders-explicit_kms_credentials.json b/test/unified-test-format/valid-pass/kmsProviders-explicit_kms_credentials.json new file mode 100644 index 0000000000..7cc74939eb --- /dev/null +++ b/test/unified-test-format/valid-pass/kmsProviders-explicit_kms_credentials.json @@ -0,0 +1,52 @@ +{ + "description": "kmsProviders-explicit_kms_credentials", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": "accessKeyId", + "secretAccessKey": "secretAccessKey" + }, + "azure": { + "tenantId": "tenantId", + "clientId": "clientId", + "clientSecret": "clientSecret" + }, + "gcp": { + "email": "email", + "privateKey": "cHJpdmF0ZUtleQo=" + }, + "kmip": { + "endpoint": "endpoint" + }, + "local": { + "key": "a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5" + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-pass/kmsProviders-mixed_kms_credential_fields.json b/test/unified-test-format/valid-pass/kmsProviders-mixed_kms_credential_fields.json new file mode 100644 index 0000000000..363f2a4576 --- /dev/null +++ b/test/unified-test-format/valid-pass/kmsProviders-mixed_kms_credential_fields.json @@ -0,0 +1,54 @@ +{ + "description": "kmsProviders-mixed_kms_credential_fields", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": "accessKeyId", + "secretAccessKey": { + "$$placeholder": 1 + } + }, + "azure": { + "tenantId": "tenantId", + "clientId": { + "$$placeholder": 1 + }, + "clientSecret": { + "$$placeholder": 1 + } + }, + "gcp": { + "email": "email", + "privateKey": { + "$$placeholder": 1 + } + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-pass/kmsProviders-placeholder_kms_credentials.json b/test/unified-test-format/valid-pass/kmsProviders-placeholder_kms_credentials.json new file mode 100644 index 0000000000..3f7721f01d --- /dev/null +++ b/test/unified-test-format/valid-pass/kmsProviders-placeholder_kms_credentials.json @@ -0,0 +1,70 @@ +{ + "description": "kmsProviders-placeholder_kms_credentials", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + }, + "azure": { + "tenantId": { + "$$placeholder": 1 + }, + "clientId": { + "$$placeholder": 1 + }, + "clientSecret": { + "$$placeholder": 1 + } + }, + "gcp": { + "email": { + "$$placeholder": 1 + }, + "privateKey": { + "$$placeholder": 1 + } + }, + "kmip": { + "endpoint": { + "$$placeholder": 1 + } + }, + "local": { + "key": { + "$$placeholder": 1 + } + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-pass/kmsProviders-unconfigured_kms.json b/test/unified-test-format/valid-pass/kmsProviders-unconfigured_kms.json new file mode 100644 index 0000000000..12ca580941 --- /dev/null +++ b/test/unified-test-format/valid-pass/kmsProviders-unconfigured_kms.json @@ -0,0 +1,39 @@ +{ + "description": "kmsProviders-unconfigured_kms", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": {}, + "azure": {}, + "gcp": {}, + "kmip": {}, + "local": {} + } + } + } + } + ], + "tests": [ + { + "description": "", + "skipReason": "DRIVERS-2280: waiting on driver support for on-demand credentials", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-pass/matches-lte-operator.json b/test/unified-test-format/valid-pass/matches-lte-operator.json new file mode 100644 index 0000000000..4de65c5838 --- /dev/null +++ b/test/unified-test-format/valid-pass/matches-lte-operator.json @@ -0,0 +1,78 @@ +{ + "description": "matches-lte-operator", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [] + } + ], + "tests": [ + { + "description": "special lte matching operator", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "y": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": { + "$$lte": 1 + }, + "y": { + "$$lte": 2 + } + } + ] + }, + "commandName": "insert", + "databaseName": "database0Name" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/observeSensitiveCommands.json b/test/unified-test-format/valid-pass/observeSensitiveCommands.json new file mode 100644 index 0000000000..d3ae5665be --- /dev/null +++ b/test/unified-test-format/valid-pass/observeSensitiveCommands.json @@ -0,0 +1,706 @@ +{ + "description": "observeSensitiveCommands", + "schemaVersion": "1.5", + "runOnRequirements": [ + { + "auth": false + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ], + "observeSensitiveCommands": true + } + }, + { + "client": { + "id": "client1", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ], + "observeSensitiveCommands": false + } + }, + { + "client": { + "id": "client2", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "observeSensitiveCommands" + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "observeSensitiveCommands" + } + }, + { + "database": { + "id": "database2", + "client": "client2", + "databaseName": "observeSensitiveCommands" + } + } + ], + "tests": [ + { + "description": "getnonce is observed with observeSensitiveCommands=true", + "runOnRequirements": [ + { + "maxServerVersion": "6.1.99" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "getnonce", + "command": { + "getnonce": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "getnonce", + "command": { + "getnonce": { + "$$exists": false + } + } + } + }, + { + "commandSucceededEvent": { + "commandName": "getnonce", + "reply": { + "ok": { + "$$exists": false + }, + "nonce": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "getnonce is not observed with observeSensitiveCommands=false", + "runOnRequirements": [ + { + "maxServerVersion": "6.1.99" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database1", + "arguments": { + "commandName": "getnonce", + "command": { + "getnonce": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [] + } + ] + }, + { + "description": "getnonce is not observed by default", + "runOnRequirements": [ + { + "maxServerVersion": "6.1.99" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database2", + "arguments": { + "commandName": "getnonce", + "command": { + "getnonce": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client2", + "events": [] + } + ] + }, + { + "description": "hello with speculativeAuthenticate", + "runOnRequirements": [ + { + "minServerVersion": "4.9" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "hello", + "command": { + "hello": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + }, + { + "name": "runCommand", + "object": "database1", + "arguments": { + "commandName": "hello", + "command": { + "hello": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + }, + { + "name": "runCommand", + "object": "database2", + "arguments": { + "commandName": "hello", + "command": { + "hello": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "hello", + "command": { + "hello": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + }, + { + "commandSucceededEvent": { + "commandName": "hello", + "reply": { + "isWritablePrimary": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + } + ] + }, + { + "client": "client1", + "events": [] + }, + { + "client": "client2", + "events": [] + } + ] + }, + { + "description": "hello without speculativeAuthenticate is always observed", + "runOnRequirements": [ + { + "minServerVersion": "4.9" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "hello", + "command": { + "hello": 1 + } + } + }, + { + "name": "runCommand", + "object": "database1", + "arguments": { + "commandName": "hello", + "command": { + "hello": 1 + } + } + }, + { + "name": "runCommand", + "object": "database2", + "arguments": { + "commandName": "hello", + "command": { + "hello": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "hello", + "command": { + "hello": 1 + } + } + }, + { + "commandSucceededEvent": { + "commandName": "hello", + "reply": { + "isWritablePrimary": { + "$$exists": true + } + } + } + } + ] + }, + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "commandName": "hello", + "command": { + "hello": 1 + } + } + }, + { + "commandSucceededEvent": { + "commandName": "hello", + "reply": { + "isWritablePrimary": { + "$$exists": true + } + } + } + } + ] + }, + { + "client": "client2", + "events": [ + { + "commandStartedEvent": { + "commandName": "hello", + "command": { + "hello": 1 + } + } + }, + { + "commandSucceededEvent": { + "commandName": "hello", + "reply": { + "isWritablePrimary": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "legacy hello with speculativeAuthenticate", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "ismaster", + "command": { + "ismaster": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "isMaster", + "command": { + "isMaster": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + }, + { + "name": "runCommand", + "object": "database1", + "arguments": { + "commandName": "ismaster", + "command": { + "ismaster": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + }, + { + "name": "runCommand", + "object": "database1", + "arguments": { + "commandName": "isMaster", + "command": { + "isMaster": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + }, + { + "name": "runCommand", + "object": "database2", + "arguments": { + "commandName": "ismaster", + "command": { + "ismaster": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + }, + { + "name": "runCommand", + "object": "database2", + "arguments": { + "commandName": "isMaster", + "command": { + "isMaster": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "ismaster", + "command": { + "ismaster": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + }, + { + "commandSucceededEvent": { + "commandName": "ismaster", + "reply": { + "ismaster": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "isMaster", + "command": { + "isMaster": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + }, + { + "commandSucceededEvent": { + "commandName": "isMaster", + "reply": { + "ismaster": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + } + ] + }, + { + "client": "client1", + "events": [] + }, + { + "client": "client2", + "events": [] + } + ] + }, + { + "description": "legacy hello without speculativeAuthenticate is always observed", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "ismaster", + "command": { + "ismaster": 1 + } + } + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "isMaster", + "command": { + "isMaster": 1 + } + } + }, + { + "name": "runCommand", + "object": "database1", + "arguments": { + "commandName": "ismaster", + "command": { + "ismaster": 1 + } + } + }, + { + "name": "runCommand", + "object": "database1", + "arguments": { + "commandName": "isMaster", + "command": { + "isMaster": 1 + } + } + }, + { + "name": "runCommand", + "object": "database2", + "arguments": { + "commandName": "ismaster", + "command": { + "ismaster": 1 + } + } + }, + { + "name": "runCommand", + "object": "database2", + "arguments": { + "commandName": "isMaster", + "command": { + "isMaster": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "ismaster", + "command": { + "ismaster": 1 + } + } + }, + { + "commandSucceededEvent": { + "commandName": "ismaster", + "reply": { + "ismaster": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "isMaster", + "command": { + "isMaster": 1 + } + } + }, + { + "commandSucceededEvent": { + "commandName": "isMaster", + "reply": { + "ismaster": { + "$$exists": true + } + } + } + } + ] + }, + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "commandName": "ismaster", + "command": { + "ismaster": 1 + } + } + }, + { + "commandSucceededEvent": { + "commandName": "ismaster", + "reply": { + "ismaster": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "isMaster", + "command": { + "isMaster": 1 + } + } + }, + { + "commandSucceededEvent": { + "commandName": "isMaster", + "reply": { + "ismaster": { + "$$exists": true + } + } + } + } + ] + }, + { + "client": "client2", + "events": [ + { + "commandStartedEvent": { + "commandName": "ismaster", + "command": { + "ismaster": 1 + } + } + }, + { + "commandSucceededEvent": { + "commandName": "ismaster", + "reply": { + "ismaster": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "isMaster", + "command": { + "isMaster": 1 + } + } + }, + { + "commandSucceededEvent": { + "commandName": "isMaster", + "reply": { + "ismaster": { + "$$exists": true + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/poc-change-streams.json b/test/unified-test-format/valid-pass/poc-change-streams.json new file mode 100644 index 0000000000..50f0d06f08 --- /dev/null +++ b/test/unified-test-format/valid-pass/poc-change-streams.json @@ -0,0 +1,455 @@ +{ + "description": "poc-change-streams", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "getMore", + "killCursors" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "change-stream-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "client": { + "id": "client1", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "change-stream-tests" + } + }, + { + "database": { + "id": "database2", + "client": "client1", + "databaseName": "change-stream-tests-2" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "collection": { + "id": "collection2", + "database": "database1", + "collectionName": "test2" + } + }, + { + "collection": { + "id": "collection3", + "database": "database2", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "change-stream-tests", + "documents": [] + }, + { + "collectionName": "test2", + "databaseName": "change-stream-tests", + "documents": [] + }, + { + "collectionName": "test", + "databaseName": "change-stream-tests-2", + "documents": [] + } + ], + "tests": [ + { + "description": "saveResultAsEntity is optional for createChangeStream", + "runOnRequirements": [ + { + "minServerVersion": "3.8.0", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "client0", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1 + }, + "commandName": "aggregate", + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "Executing a watch helper on a MongoClient results in notifications for changes to all collections in all databases in the cluster.", + "runOnRequirements": [ + { + "minServerVersion": "3.8.0", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "client0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection2", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "insertOne", + "object": "collection3", + "arguments": { + "document": { + "y": 1 + } + } + }, + { + "name": "insertOne", + "object": "collection1", + "arguments": { + "document": { + "z": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test2" + }, + "fullDocument": { + "_id": { + "$$type": "objectId" + }, + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "change-stream-tests-2", + "coll": "test" + }, + "fullDocument": { + "_id": { + "$$type": "objectId" + }, + "y": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "_id": { + "$$type": "objectId" + }, + "z": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true, + "fullDocument": { + "$$unsetOrMatches": "default" + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "Test consecutive resume", + "runOnRequirements": [ + { + "minServerVersion": "4.1.7", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "getMore" + ], + "closeConnection": true + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "batchSize": 1, + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection1", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "insertOne", + "object": "collection1", + "arguments": { + "document": { + "x": 2 + } + } + }, + { + "name": "insertOne", + "object": "collection1", + "arguments": { + "document": { + "x": 3 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "_id": { + "$$type": "objectId" + }, + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "_id": { + "$$type": "objectId" + }, + "x": 2 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "_id": { + "$$type": "objectId" + }, + "x": 3 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "cursor": { + "batchSize": 1 + }, + "pipeline": [ + { + "$changeStream": { + "fullDocument": { + "$$unsetOrMatches": "default" + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "change-stream-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "cursor": { + "batchSize": 1 + }, + "pipeline": [ + { + "$changeStream": { + "fullDocument": { + "$$unsetOrMatches": "default" + }, + "resumeAfter": { + "$$exists": true + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "change-stream-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "cursor": { + "batchSize": 1 + }, + "pipeline": [ + { + "$changeStream": { + "fullDocument": { + "$$unsetOrMatches": "default" + }, + "resumeAfter": { + "$$exists": true + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "change-stream-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/poc-command-monitoring.json b/test/unified-test-format/valid-pass/poc-command-monitoring.json new file mode 100644 index 0000000000..fe0a5ae991 --- /dev/null +++ b/test/unified-test-format/valid-pass/poc-command-monitoring.json @@ -0,0 +1,223 @@ +{ + "description": "poc-command-monitoring", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "A successful find event with a getmore and the server kills the cursor (<= 4.4)", + "runOnRequirements": [ + { + "minServerVersion": "3.1", + "maxServerVersion": "4.4.99", + "topologies": [ + "single", + "replicaset" + ] + } + ], + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": { + "$gte": 1 + } + }, + "sort": { + "_id": 1 + }, + "batchSize": 3, + "limit": 4 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": { + "$gte": 1 + } + }, + "sort": { + "_id": 1 + }, + "batchSize": 3, + "limit": 4 + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": { + "$$type": [ + "int", + "long" + ] + }, + "ns": "command-monitoring-tests.test", + "firstBatch": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 1 + }, + "commandName": "getMore", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": 0, + "ns": "command-monitoring-tests.test", + "nextBatch": [ + { + "_id": 4, + "x": 44 + } + ] + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "A failed find event", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "$or": true + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "$or": true + } + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/poc-crud.json b/test/unified-test-format/valid-pass/poc-crud.json new file mode 100644 index 0000000000..0790d9b789 --- /dev/null +++ b/test/unified-test-format/valid-pass/poc-crud.json @@ -0,0 +1,450 @@ +{ + "description": "poc-crud", + "schemaVersion": "1.4", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "database": { + "id": "database1", + "client": "client0", + "databaseName": "admin" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + }, + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "coll1" + } + }, + { + "collection": { + "id": "collection2", + "database": "database0", + "collectionName": "coll2", + "collectionOptions": { + "readConcern": { + "level": "majority" + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + }, + { + "collectionName": "coll1", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + }, + { + "collectionName": "coll2", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + }, + { + "collectionName": "aggregate_out", + "databaseName": "crud-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "BulkWrite with mixed ordered operations", + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "insertOne": { + "document": { + "_id": 4, + "x": 44 + } + } + }, + { + "deleteMany": { + "filter": { + "x": { + "$nin": [ + 24, + 34 + ] + } + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 4 + }, + "replacement": { + "_id": 4, + "x": 44 + }, + "upsert": true + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 2, + "insertedCount": 2, + "insertedIds": { + "$$unsetOrMatches": { + "0": 3, + "3": 4 + } + }, + "matchedCount": 3, + "modifiedCount": 3, + "upsertedCount": 1, + "upsertedIds": { + "5": 4 + } + } + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 34 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "InsertMany continue-on-error behavior with unordered (duplicate key in requests)", + "operations": [ + { + "name": "insertMany", + "object": "collection1", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "ordered": false + }, + "expectError": { + "expectResult": { + "$$unsetOrMatches": { + "deletedCount": 0, + "insertedCount": 2, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll1", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "ReplaceOne prohibits atomic modifiers", + "operations": [ + { + "name": "replaceOne", + "object": "collection1", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "$set": { + "x": 22 + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll1", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "readConcern majority with out stage", + "runOnRequirements": [ + { + "minServerVersion": "4.1.0", + "topologies": [ + "replicaset", + "sharded-replicaset" + ], + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "aggregate", + "object": "collection2", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "aggregate_out" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll2", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "aggregate_out" + } + ], + "readConcern": { + "level": "majority" + } + }, + "commandName": "aggregate", + "databaseName": "crud-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "aggregate_out", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "Aggregate with $listLocalSessions", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "aggregate", + "object": "database1", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + }, + { + "$addFields": { + "dummy": "dummy field" + } + }, + { + "$project": { + "_id": 0, + "dummy": 1 + } + } + ] + }, + "expectResult": [ + { + "dummy": "dummy field" + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/poc-gridfs.json b/test/unified-test-format/valid-pass/poc-gridfs.json new file mode 100644 index 0000000000..1f07a19bf6 --- /dev/null +++ b/test/unified-test-format/valid-pass/poc-gridfs.json @@ -0,0 +1,301 @@ +{ + "description": "poc-gridfs", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "gridfs-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + }, + { + "collection": { + "id": "bucket0_files_collection", + "database": "database0", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "bucket0_chunks_collection", + "database": "database0", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000005" + }, + "length": 10, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "57d83cd477bfb1ccd975ab33d827a92b", + "filename": "length-10", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000005" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000006" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 1, + "data": { + "$binary": { + "base64": "VWZ3iA==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000007" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 2, + "data": { + "$binary": { + "base64": "mao=", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "Delete when length is 10", + "operations": [ + { + "name": "delete", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + } + } + ], + "outcome": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [] + } + ] + }, + { + "description": "Download when there are three chunks", + "operations": [ + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectResult": { + "$$matchesHexBytes": "112233445566778899aa" + } + } + ] + }, + { + "description": "Download when files entry does not exist", + "operations": [ + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000000" + } + }, + "expectError": { + "isError": true + } + } + ] + }, + { + "description": "Download when an intermediate chunk is missing", + "operations": [ + { + "name": "deleteOne", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": { + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 1 + } + }, + "expectResult": { + "deletedCount": 1 + } + }, + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isError": true + } + } + ] + }, + { + "description": "Upload when length is 5", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + }, + "chunkSizeBytes": 4 + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "oid0" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {}, + "sort": { + "uploadDate": -1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "oid0" + }, + "length": 5, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$unsetOrMatches": "283d4fea5dded59cf837d3047328f5af" + }, + "filename": "filename" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": { + "_id": { + "$gt": { + "$oid": "000000000000000000000007" + } + } + }, + "sort": { + "n": 1 + } + }, + "expectResult": [ + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "oid0" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + }, + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "oid0" + }, + "n": 1, + "data": { + "$binary": { + "base64": "VQ==", + "subType": "00" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/poc-retryable-reads.json b/test/unified-test-format/valid-pass/poc-retryable-reads.json new file mode 100644 index 0000000000..2b65d501a7 --- /dev/null +++ b/test/unified-test-format/valid-pass/poc-retryable-reads.json @@ -0,0 +1,433 @@ +{ + "description": "poc-retryable-reads", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "client": { + "id": "client1", + "uriOptions": { + "retryReads": false + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Aggregate succeeds after InterruptedAtShutdown", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11600 + } + } + } + }, + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds on second attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 2 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 2 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 2 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find fails on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "name": "find", + "object": "collection1", + "arguments": { + "filter": {} + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {} + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find fails on second attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {} + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {} + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {} + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListDatabases succeeds on second attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "closeConnection": true + } + } + } + }, + { + "name": "listDatabases", + "object": "client0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/poc-retryable-writes.json b/test/unified-test-format/valid-pass/poc-retryable-writes.json new file mode 100644 index 0000000000..50160799f3 --- /dev/null +++ b/test/unified-test-format/valid-pass/poc-retryable-writes.json @@ -0,0 +1,482 @@ +{ + "description": "poc-retryable-writes", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "client": { + "id": "client1", + "uriOptions": { + "retryWrites": false + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndUpdate is committed on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate is not committed on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate is never committed", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 2 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectError": { + "isError": true + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "InsertMany succeeds after PrimarySteppedDown", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded-replicaset" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "documents": [ + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ], + "ordered": true + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 3, + "1": 4 + } + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "InsertOne fails after connection failure when retryWrites option is false", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded-replicaset" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "name": "insertOne", + "object": "collection1", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectError": { + "errorLabelsOmit": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "InsertOne fails after multiple retryable writeConcernErrors", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded-replicaset" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectError": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/poc-sessions.json b/test/unified-test-format/valid-pass/poc-sessions.json new file mode 100644 index 0000000000..75f3489428 --- /dev/null +++ b/test/unified-test-format/valid-pass/poc-sessions.json @@ -0,0 +1,466 @@ +{ + "description": "poc-sessions", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "session-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "Server supports explicit sessions", + "operations": [ + { + "name": "assertSessionNotDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "name": "assertSessionNotDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "endSession", + "object": "session0" + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertSameLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + } + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$sessionLsid": "session0" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "Server supports implicit sessions", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertSameLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$type": "object" + } + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$type": "object" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "Dirty explicit session is discarded", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded-replicaset" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "name": "assertSessionNotDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "name": "assertSessionDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "assertSessionDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "endSession", + "object": "session0" + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertDifferentLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1 + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1 + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 3 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 2 + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$type": "object" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/poc-transactions-convenient-api.json b/test/unified-test-format/valid-pass/poc-transactions-convenient-api.json new file mode 100644 index 0000000000..820ed65927 --- /dev/null +++ b/test/unified-test-format/valid-pass/poc-transactions-convenient-api.json @@ -0,0 +1,505 @@ +{ + "description": "poc-transactions-convenient-api", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "client": { + "id": "client1", + "uriOptions": { + "readConcernLevel": "local", + "w": 1 + }, + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + }, + { + "session": { + "id": "session2", + "client": "client0", + "sessionOptions": { + "defaultTransactionOptions": { + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "w": 1 + } + } + } + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "withTransaction and no transaction options set", + "operations": [ + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "withTransaction inherits transaction options from client", + "operations": [ + { + "name": "withTransaction", + "object": "session1", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection1", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": 1, + "autocommit": false, + "writeConcern": { + "w": 1 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "withTransaction inherits transaction options from defaultTransactionOptions", + "operations": [ + { + "name": "withTransaction", + "object": "session2", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session2", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session2" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session2" + }, + "txnNumber": 1, + "autocommit": false, + "writeConcern": { + "w": 1 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "withTransaction explicit transaction options", + "operations": [ + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "w": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "autocommit": false, + "writeConcern": { + "w": 1 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json b/test/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json new file mode 100644 index 0000000000..a0b297d59a --- /dev/null +++ b/test/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json @@ -0,0 +1,409 @@ +{ + "description": "poc-transactions-mongos-pin-auto", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "remain pinned after non-transient Interrupted error on insertOne", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 4 + } + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError", + "UnknownTransactionCommitResult" + ], + "errorCodeName": "Interrupted" + } + }, + { + "name": "assertSessionPinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "commitTransaction", + "object": "session0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 3 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 4 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "recoveryToken": { + "$$type": "object" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ] + }, + { + "description": "unpin after transient error within a transaction", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 4 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "UnknownTransactionCommitResult" + ] + } + }, + { + "name": "assertSessionUnpinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "abortTransaction", + "object": "session0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 3 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 4 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "recoveryToken": { + "$$type": "object" + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/poc-transactions.json b/test/unified-test-format/valid-pass/poc-transactions.json new file mode 100644 index 0000000000..0355ca2060 --- /dev/null +++ b/test/unified-test-format/valid-pass/poc-transactions.json @@ -0,0 +1,323 @@ +{ + "description": "poc-transactions", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Client side error in command starting transaction", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + }, + { + "name": "assertSessionTransactionState", + "object": "testRunner", + "arguments": { + "session": "session0", + "state": "starting" + } + } + ] + }, + { + "description": "explicitly create collection using create command", + "runOnRequirements": [ + { + "minServerVersion": "4.3.4", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "test" + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "session": "session0", + "collection": "test" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "transaction-tests", + "collectionName": "test" + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "transaction-tests", + "collectionName": "test" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test", + "writeConcern": { + "$$exists": false + } + }, + "commandName": "drop", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "create", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "create index on a non-existing collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.4", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "test" + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "createIndex", + "object": "collection0", + "arguments": { + "session": "session0", + "name": "x_1", + "keys": { + "x": 1 + } + } + }, + { + "name": "assertIndexNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "transaction-tests", + "collectionName": "test", + "indexName": "x_1" + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "databaseName": "transaction-tests", + "collectionName": "test", + "indexName": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test", + "writeConcern": { + "$$exists": false + } + }, + "commandName": "drop", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "createIndexes": "test", + "indexes": [ + { + "name": "x_1", + "key": { + "x": 1 + } + } + ], + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "createIndexes", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified_format.py b/test/unified_format.py new file mode 100644 index 0000000000..68ce36e6fa --- /dev/null +++ b/test/unified_format.py @@ -0,0 +1,1833 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unified test format runner. + +https://github.com/mongodb/specifications/blob/master/source/unified-test-format/unified-test-format.rst +""" +from __future__ import annotations + +import binascii +import collections +import copy +import datetime +import functools +import os +import re +import sys +import time +import traceback +import types +from collections import abc +from test import ( + AWS_CREDS, + AZURE_CREDS, + CA_PEM, + CLIENT_PEM, + GCP_CREDS, + KMIP_CREDS, + LOCAL_MASTER_KEY, + IntegrationTest, + client_context, + unittest, +) +from test.utils import ( + CMAPListener, + camel_to_snake, + camel_to_snake_args, + get_pool, + parse_collection_options, + parse_spec_options, + prepare_spec_arguments, + rs_or_single_client, + single_client, + snake_to_camel, + wait_until, +) +from test.utils_spec_runner import SpecRunnerThread +from test.version import Version +from typing import Any, Dict, List, Mapping, Optional + +import pymongo +from bson import SON, Code, DBRef, Decimal128, Int64, MaxKey, MinKey, json_util +from bson.binary import Binary +from bson.codec_options import DEFAULT_CODEC_OPTIONS +from bson.objectid import ObjectId +from bson.regex import RE_TYPE, Regex +from gridfs import GridFSBucket, GridOut +from pymongo import ASCENDING, CursorType, MongoClient, _csot +from pymongo.change_stream import ChangeStream +from pymongo.client_session import ClientSession, TransactionOptions, _TxnState +from pymongo.collection import Collection +from pymongo.command_cursor import CommandCursor +from pymongo.database import Database +from pymongo.encryption import ClientEncryption +from pymongo.encryption_options import _HAVE_PYMONGOCRYPT +from pymongo.errors import ( + BulkWriteError, + ConfigurationError, + ConnectionFailure, + EncryptionError, + InvalidOperation, + NotPrimaryError, + PyMongoError, +) +from pymongo.monitoring import ( + _SENSITIVE_COMMANDS, + CommandFailedEvent, + CommandListener, + CommandStartedEvent, + CommandSucceededEvent, + ConnectionCheckedInEvent, + ConnectionCheckedOutEvent, + ConnectionCheckOutFailedEvent, + ConnectionCheckOutStartedEvent, + ConnectionClosedEvent, + ConnectionCreatedEvent, + ConnectionReadyEvent, + PoolClearedEvent, + PoolClosedEvent, + PoolCreatedEvent, + PoolReadyEvent, + ServerClosedEvent, + ServerDescriptionChangedEvent, + ServerHeartbeatFailedEvent, + ServerHeartbeatListener, + ServerHeartbeatStartedEvent, + ServerHeartbeatSucceededEvent, + ServerListener, + ServerOpeningEvent, + TopologyEvent, + _CommandEvent, + _ConnectionEvent, + _PoolEvent, + _ServerEvent, + _ServerHeartbeatEvent, +) +from pymongo.operations import SearchIndexModel +from pymongo.read_concern import ReadConcern +from pymongo.read_preferences import ReadPreference +from pymongo.results import BulkWriteResult +from pymongo.server_api import ServerApi +from pymongo.server_description import ServerDescription +from pymongo.server_selectors import Selection, writable_server_selector +from pymongo.server_type import SERVER_TYPE +from pymongo.topology_description import TopologyDescription +from pymongo.typings import _Address +from pymongo.write_concern import WriteConcern + +JSON_OPTS = json_util.JSONOptions(tz_aware=False) + +IS_INTERRUPTED = False + +KMS_TLS_OPTS = { + "kmip": { + "tlsCAFile": CA_PEM, + "tlsCertificateKeyFile": CLIENT_PEM, + } +} + + +# Build up a placeholder map. +PLACEHOLDER_MAP = {} +for provider_name, provider_data in [ + ("local", {"key": LOCAL_MASTER_KEY}), + ("aws", AWS_CREDS), + ("azure", AZURE_CREDS), + ("gcp", GCP_CREDS), + ("kmip", KMIP_CREDS), +]: + for key, value in provider_data.items(): + placeholder = f"/clientEncryptionOpts/kmsProviders/{provider_name}/{key}" + PLACEHOLDER_MAP[placeholder] = value + + +def interrupt_loop(): + global IS_INTERRUPTED + IS_INTERRUPTED = True + + +def with_metaclass(meta, *bases): + """Create a base class with a metaclass. + + Vendored from six: https://github.com/benjaminp/six/blob/master/six.py + """ + + # This requires a bit of explanation: the basic idea is to make a dummy + # metaclass for one level of class instantiation that replaces itself with + # the actual metaclass. + class metaclass(type): + def __new__(cls, name, this_bases, d): + if sys.version_info[:2] >= (3, 7): + # This version introduced PEP 560 that requires a bit + # of extra care (we mimic what is done by __build_class__). + resolved_bases = types.resolve_bases(bases) + if resolved_bases is not bases: + d["__orig_bases__"] = bases + else: + resolved_bases = bases + return meta(name, resolved_bases, d) + + @classmethod + def __prepare__(cls, name, this_bases): + return meta.__prepare__(name, bases) + + return type.__new__(metaclass, "temporary_class", (), {}) + + +def is_run_on_requirement_satisfied(requirement): + topology_satisfied = True + req_topologies = requirement.get("topologies") + if req_topologies: + topology_satisfied = client_context.is_topology_type(req_topologies) + + server_version = Version(*client_context.version[:3]) + + min_version_satisfied = True + req_min_server_version = requirement.get("minServerVersion") + if req_min_server_version: + min_version_satisfied = Version.from_string(req_min_server_version) <= server_version + + max_version_satisfied = True + req_max_server_version = requirement.get("maxServerVersion") + if req_max_server_version: + max_version_satisfied = Version.from_string(req_max_server_version) >= server_version + + serverless = requirement.get("serverless") + if serverless == "require": + serverless_satisfied = client_context.serverless + elif serverless == "forbid": + serverless_satisfied = not client_context.serverless + else: # unset or "allow" + serverless_satisfied = True + + params_satisfied = True + params = requirement.get("serverParameters") + if params: + for param, val in params.items(): + if param not in client_context.server_parameters: + params_satisfied = False + elif client_context.server_parameters[param] != val: + params_satisfied = False + + auth_satisfied = True + req_auth = requirement.get("auth") + if req_auth is not None: + if req_auth: + auth_satisfied = client_context.auth_enabled + else: + auth_satisfied = not client_context.auth_enabled + + csfle_satisfied = True + req_csfle = requirement.get("csfle") + if req_csfle is True: + min_version_satisfied = Version.from_string("4.2") <= server_version + csfle_satisfied = _HAVE_PYMONGOCRYPT and min_version_satisfied + + return ( + topology_satisfied + and min_version_satisfied + and max_version_satisfied + and serverless_satisfied + and params_satisfied + and auth_satisfied + and csfle_satisfied + ) + + +def parse_collection_or_database_options(options): + return parse_collection_options(options) + + +def parse_bulk_write_result(result): + upserted_ids = {str(int_idx): result.upserted_ids[int_idx] for int_idx in result.upserted_ids} + return { + "deletedCount": result.deleted_count, + "insertedCount": result.inserted_count, + "matchedCount": result.matched_count, + "modifiedCount": result.modified_count, + "upsertedCount": result.upserted_count, + "upsertedIds": upserted_ids, + } + + +def parse_bulk_write_error_result(error): + write_result = BulkWriteResult(error.details, True) + return parse_bulk_write_result(write_result) + + +class NonLazyCursor: + """A find cursor proxy that creates the remote cursor when initialized.""" + + def __init__(self, find_cursor, client): + self.client = client + self.find_cursor = find_cursor + # Create the server side cursor. + self.first_result = next(find_cursor, None) + + @property + def alive(self): + return self.first_result is not None or self.find_cursor.alive + + def __next__(self): + if self.first_result is not None: + first = self.first_result + self.first_result = None + return first + return next(self.find_cursor) + + # Added to support the iterateOnce operation. + try_next = __next__ + + def close(self): + self.find_cursor.close() + self.client = None + + +class EventListenerUtil(CMAPListener, CommandListener, ServerListener, ServerHeartbeatListener): + def __init__( + self, observe_events, ignore_commands, observe_sensitive_commands, store_events, entity_map + ): + self._event_types = {name.lower() for name in observe_events} + if observe_sensitive_commands: + self._observe_sensitive_commands = True + self._ignore_commands = set(ignore_commands) + else: + self._observe_sensitive_commands = False + self._ignore_commands = _SENSITIVE_COMMANDS | set(ignore_commands) + self._ignore_commands.add("configurefailpoint") + self._event_mapping = collections.defaultdict(list) + self.entity_map = entity_map + if store_events: + for i in store_events: + id = i["id"] + events = (i.lower() for i in i["events"]) + for i in events: + self._event_mapping[i].append(id) + self.entity_map[id] = [] + super().__init__() + + def get_events(self, event_type): + assert event_type in ("command", "cmap", "sdam", "all"), event_type + if event_type == "all": + return list(self.events) + if event_type == "command": + return [e for e in self.events if isinstance(e, _CommandEvent)] + if event_type == "cmap": + return [e for e in self.events if isinstance(e, (_ConnectionEvent, _PoolEvent))] + return [ + e + for e in self.events + if isinstance(e, (_ServerEvent, TopologyEvent, _ServerHeartbeatEvent)) + ] + + def add_event(self, event): + event_name = type(event).__name__.lower() + if event_name in self._event_types: + super().add_event(event) + for id in self._event_mapping[event_name]: + self.entity_map[id].append( + { + "name": type(event).__name__, + "observedAt": time.time(), + "description": repr(event), + } + ) + + def _command_event(self, event): + if event.command_name.lower() not in self._ignore_commands: + self.add_event(event) + + def started(self, event): + if isinstance(event, CommandStartedEvent): + if event.command == {}: + # Command is redacted. Observe only if flag is set. + if self._observe_sensitive_commands: + self._command_event(event) + else: + self._command_event(event) + else: + self.add_event(event) + + def succeeded(self, event): + if isinstance(event, CommandSucceededEvent): + if event.reply == {}: + # Command is redacted. Observe only if flag is set. + if self._observe_sensitive_commands: + self._command_event(event) + else: + self._command_event(event) + else: + self.add_event(event) + + def failed(self, event): + if isinstance(event, CommandFailedEvent): + self._command_event(event) + else: + self.add_event(event) + + def opened(self, event: ServerOpeningEvent) -> None: + self.add_event(event) + + def description_changed(self, event: ServerDescriptionChangedEvent) -> None: + self.add_event(event) + + def closed(self, event: ServerClosedEvent) -> None: + self.add_event(event) + + +class EntityMapUtil: + """Utility class that implements an entity map as per the unified + test format specification. + """ + + def __init__(self, test_class): + self._entities: Dict[str, Any] = {} + self._listeners: Dict[str, EventListenerUtil] = {} + self._session_lsids: Dict[str, Mapping[str, Any]] = {} + self.test: UnifiedSpecTestMixinV1 = test_class + + def __contains__(self, item): + return item in self._entities + + def __len__(self): + return len(self._entities) + + def __getitem__(self, item): + try: + return self._entities[item] + except KeyError: + self.test.fail(f"Could not find entity named {item} in map") + + def __setitem__(self, key, value): + if not isinstance(key, str): + self.test.fail("Expected entity name of type str, got %s" % (type(key))) + + if key in self._entities: + self.test.fail(f"Entity named {key} already in map") + + self._entities[key] = value + + def _handle_placeholders(self, spec: dict, current: dict, path: str) -> Any: + if "$$placeholder" in current: + if path not in PLACEHOLDER_MAP: + raise ValueError(f"Could not find a placeholder value for {path}") + return PLACEHOLDER_MAP[path] + + for key in list(current): + value = current[key] + if isinstance(value, dict): + subpath = f"{path}/{key}" + current[key] = self._handle_placeholders(spec, value, subpath) + return current + + def _create_entity(self, entity_spec, uri=None): + if len(entity_spec) != 1: + self.test.fail(f"Entity spec {entity_spec} did not contain exactly one top-level key") + + entity_type, spec = next(iter(entity_spec.items())) + spec = self._handle_placeholders(spec, spec, "") + if entity_type == "client": + kwargs: dict = {} + observe_events = spec.get("observeEvents", []) + ignore_commands = spec.get("ignoreCommandMonitoringEvents", []) + observe_sensitive_commands = spec.get("observeSensitiveCommands", False) + ignore_commands = [cmd.lower() for cmd in ignore_commands] + listener = EventListenerUtil( + observe_events, + ignore_commands, + observe_sensitive_commands, + spec.get("storeEventsAsEntities"), + self, + ) + self._listeners[spec["id"]] = listener + kwargs["event_listeners"] = [listener] + if spec.get("useMultipleMongoses"): + if client_context.load_balancer or client_context.serverless: + kwargs["h"] = client_context.MULTI_MONGOS_LB_URI + elif client_context.is_mongos: + kwargs["h"] = client_context.mongos_seeds() + kwargs.update(spec.get("uriOptions", {})) + server_api = spec.get("serverApi") + if server_api: + kwargs["server_api"] = ServerApi( + server_api["version"], + strict=server_api.get("strict"), + deprecation_errors=server_api.get("deprecationErrors"), + ) + if uri: + kwargs["h"] = uri + client = rs_or_single_client(**kwargs) + self[spec["id"]] = client + self.test.addCleanup(client.close) + return + elif entity_type == "database": + client = self[spec["client"]] + if not isinstance(client, MongoClient): + self.test.fail( + "Expected entity {} to be of type MongoClient, got {}".format( + spec["client"], type(client) + ) + ) + options = parse_collection_or_database_options(spec.get("databaseOptions", {})) + self[spec["id"]] = client.get_database(spec["databaseName"], **options) + return + elif entity_type == "collection": + database = self[spec["database"]] + if not isinstance(database, Database): + self.test.fail( + "Expected entity {} to be of type Database, got {}".format( + spec["database"], type(database) + ) + ) + options = parse_collection_or_database_options(spec.get("collectionOptions", {})) + self[spec["id"]] = database.get_collection(spec["collectionName"], **options) + return + elif entity_type == "session": + client = self[spec["client"]] + if not isinstance(client, MongoClient): + self.test.fail( + "Expected entity {} to be of type MongoClient, got {}".format( + spec["client"], type(client) + ) + ) + opts = camel_to_snake_args(spec.get("sessionOptions", {})) + if "default_transaction_options" in opts: + txn_opts = parse_spec_options(opts["default_transaction_options"]) + txn_opts = TransactionOptions(**txn_opts) + opts = copy.deepcopy(opts) + opts["default_transaction_options"] = txn_opts + session = client.start_session(**dict(opts)) + self[spec["id"]] = session + self._session_lsids[spec["id"]] = copy.deepcopy(session.session_id) + self.test.addCleanup(session.end_session) + return + elif entity_type == "bucket": + db = self[spec["database"]] + kwargs = parse_spec_options(spec.get("bucketOptions", {}).copy()) + bucket = GridFSBucket(db, **kwargs) + + # PyMongo does not support GridFSBucket.drop(), emulate it. + @_csot.apply + def drop(self: GridFSBucket, *args: Any, **kwargs: Any) -> None: + self._files.drop(*args, **kwargs) + self._chunks.drop(*args, **kwargs) + + if not hasattr(bucket, "drop"): + bucket.drop = drop.__get__(bucket) + self[spec["id"]] = bucket + return + elif entity_type == "clientEncryption": + opts = camel_to_snake_args(spec["clientEncryptionOpts"].copy()) + if isinstance(opts["key_vault_client"], str): + opts["key_vault_client"] = self[opts["key_vault_client"]] + self[spec["id"]] = ClientEncryption( + opts["kms_providers"], + opts["key_vault_namespace"], + opts["key_vault_client"], + DEFAULT_CODEC_OPTIONS, + opts.get("kms_tls_options", KMS_TLS_OPTS), + ) + return + elif entity_type == "thread": + name = spec["id"] + thread = SpecRunnerThread(name) + thread.start() + self[name] = thread + return + + self.test.fail(f"Unable to create entity of unknown type {entity_type}") + + def create_entities_from_spec(self, entity_spec, uri=None): + for spec in entity_spec: + self._create_entity(spec, uri=uri) + + def get_listener_for_client(self, client_name: str) -> EventListenerUtil: + client = self[client_name] + if not isinstance(client, MongoClient): + self.test.fail( + f"Expected entity {client_name} to be of type MongoClient, got {type(client)}" + ) + + listener = self._listeners.get(client_name) + if not listener: + self.test.fail(f"No listeners configured for client {client_name}") + + return listener + + def get_lsid_for_session(self, session_name): + session = self[session_name] + if not isinstance(session, ClientSession): + self.test.fail( + f"Expected entity {session_name} to be of type ClientSession, got {type(session)}" + ) + + try: + return session.session_id + except InvalidOperation: + # session has been closed. + return self._session_lsids[session_name] + + +binary_types = (Binary, bytes) +long_types = (Int64,) +unicode_type = str + + +BSON_TYPE_ALIAS_MAP = { + # https://mongodb.com/docs/manual/reference/operator/query/type/ + # https://pymongo.readthedocs.io/en/stable/api/bson/index.html + "double": (float,), + "string": (str,), + "object": (abc.Mapping,), + "array": (abc.MutableSequence,), + "binData": binary_types, + "undefined": (type(None),), + "objectId": (ObjectId,), + "bool": (bool,), + "date": (datetime.datetime,), + "null": (type(None),), + "regex": (Regex, RE_TYPE), + "dbPointer": (DBRef,), + "javascript": (unicode_type, Code), + "symbol": (unicode_type,), + "javascriptWithScope": (unicode_type, Code), + "int": (int,), + "long": (Int64,), + "decimal": (Decimal128,), + "maxKey": (MaxKey,), + "minKey": (MinKey,), +} + + +class MatchEvaluatorUtil: + """Utility class that implements methods for evaluating matches as per + the unified test format specification. + """ + + def __init__(self, test_class): + self.test = test_class + + def _operation_exists(self, spec, actual, key_to_compare): + if spec is True: + if key_to_compare is None: + assert actual is not None + else: + self.test.assertIn(key_to_compare, actual) + elif spec is False: + if key_to_compare is None: + assert actual is None + else: + self.test.assertNotIn(key_to_compare, actual) + else: + self.test.fail(f"Expected boolean value for $$exists operator, got {spec}") + + def __type_alias_to_type(self, alias): + if alias not in BSON_TYPE_ALIAS_MAP: + self.test.fail(f"Unrecognized BSON type alias {alias}") + return BSON_TYPE_ALIAS_MAP[alias] + + def _operation_type(self, spec, actual, key_to_compare): + if isinstance(spec, abc.MutableSequence): + permissible_types = tuple( + [t for alias in spec for t in self.__type_alias_to_type(alias)] + ) + else: + permissible_types = self.__type_alias_to_type(spec) + value = actual[key_to_compare] if key_to_compare else actual + self.test.assertIsInstance(value, permissible_types) + + def _operation_matchesEntity(self, spec, actual, key_to_compare): + expected_entity = self.test.entity_map[spec] + self.test.assertEqual(expected_entity, actual[key_to_compare]) + + def _operation_matchesHexBytes(self, spec, actual, key_to_compare): + expected = binascii.unhexlify(spec) + value = actual[key_to_compare] if key_to_compare else actual + self.test.assertEqual(value, expected) + + def _operation_unsetOrMatches(self, spec, actual, key_to_compare): + if key_to_compare is None and not actual: + # top-level document can be None when unset + return + + if key_to_compare not in actual: + # we add a dummy value for the compared key to pass map size check + actual[key_to_compare] = "dummyValue" + return + self.match_result(spec, actual[key_to_compare], in_recursive_call=True) + + def _operation_sessionLsid(self, spec, actual, key_to_compare): + expected_lsid = self.test.entity_map.get_lsid_for_session(spec) + self.test.assertEqual(expected_lsid, actual[key_to_compare]) + + def _operation_lte(self, spec, actual, key_to_compare): + if key_to_compare not in actual: + self.test.fail(f"Actual command is missing the {key_to_compare} field: {spec}") + self.test.assertLessEqual(actual[key_to_compare], spec) + + def _evaluate_special_operation(self, opname, spec, actual, key_to_compare): + method_name = "_operation_{}".format(opname.strip("$")) + try: + method = getattr(self, method_name) + except AttributeError: + self.test.fail(f"Unsupported special matching operator {opname}") + else: + method(spec, actual, key_to_compare) + + def _evaluate_if_special_operation(self, expectation, actual, key_to_compare=None): + """Returns True if a special operation is evaluated, False + otherwise. If the ``expectation`` map contains a single key, + value pair we check it for a special operation. + If given, ``key_to_compare`` is assumed to be the key in + ``expectation`` whose corresponding value needs to be + evaluated for a possible special operation. ``key_to_compare`` + is ignored when ``expectation`` has only one key. + """ + if not isinstance(expectation, abc.Mapping): + return False + + is_special_op, opname, spec = False, False, False + + if key_to_compare is not None: + if key_to_compare.startswith("$$"): + is_special_op = True + opname = key_to_compare + spec = expectation[key_to_compare] + key_to_compare = None + else: + nested = expectation[key_to_compare] + if isinstance(nested, abc.Mapping) and len(nested) == 1: + opname, spec = next(iter(nested.items())) + if opname.startswith("$$"): + is_special_op = True + elif len(expectation) == 1: + opname, spec = next(iter(expectation.items())) + if opname.startswith("$$"): + is_special_op = True + key_to_compare = None + + if is_special_op: + self._evaluate_special_operation( + opname=opname, spec=spec, actual=actual, key_to_compare=key_to_compare + ) + return True + + return False + + def _match_document(self, expectation, actual, is_root): + if self._evaluate_if_special_operation(expectation, actual): + return + + self.test.assertIsInstance(actual, abc.Mapping) + for key, value in expectation.items(): + if self._evaluate_if_special_operation(expectation, actual, key): + continue + + self.test.assertIn(key, actual) + self.match_result(value, actual[key], in_recursive_call=True) + + if not is_root: + expected_keys = set(expectation.keys()) + for key, value in expectation.items(): + if value == {"$$exists": False}: + expected_keys.remove(key) + self.test.assertEqual(expected_keys, set(actual.keys())) + + def match_result(self, expectation, actual, in_recursive_call=False): + if isinstance(expectation, abc.Mapping): + return self._match_document(expectation, actual, is_root=not in_recursive_call) + + if isinstance(expectation, abc.MutableSequence): + self.test.assertIsInstance(actual, abc.MutableSequence) + for e, a in zip(expectation, actual): + if isinstance(e, abc.Mapping): + self._match_document(e, a, is_root=not in_recursive_call) + else: + self.match_result(e, a, in_recursive_call=True) + return None + + # account for flexible numerics in element-wise comparison + if isinstance(expectation, int) or isinstance(expectation, float): + self.test.assertEqual(expectation, actual) + return None + else: + self.test.assertIsInstance(actual, type(expectation)) + self.test.assertEqual(expectation, actual) + return None + + def assertHasDatabaseName(self, spec, actual): + if "databaseName" in spec: + self.test.assertEqual(spec["databaseName"], actual.database_name) + + def assertHasServiceId(self, spec, actual): + if "hasServiceId" in spec: + if spec.get("hasServiceId"): + self.test.assertIsNotNone(actual.service_id) + self.test.assertIsInstance(actual.service_id, ObjectId) + else: + self.test.assertIsNone(actual.service_id) + + def match_server_description(self, actual: ServerDescription, spec: dict) -> None: + if "type" in spec: + self.test.assertEqual(actual.server_type_name, spec["type"]) + if "error" in spec: + self.test.process_error(actual.error, spec["error"]) + if "minWireVersion" in spec: + self.test.assertEqual(actual.min_wire_version, spec["minWireVersion"]) + if "maxWireVersion" in spec: + self.test.assertEqual(actual.max_wire_version, spec["maxWireVersion"]) + if "topologyVersion" in spec: + self.test.assertEqual(actual.topology_version, spec["topologyVersion"]) + + def match_event(self, event_type, expectation, actual): + name, spec = next(iter(expectation.items())) + + # every command event has the commandName field + if event_type == "command": + command_name = spec.get("commandName") + if command_name: + self.test.assertEqual(command_name, actual.command_name) + + if name == "commandStartedEvent": + self.test.assertIsInstance(actual, CommandStartedEvent) + command = spec.get("command") + if command: + self.match_result(command, actual.command) + self.assertHasDatabaseName(spec, actual) + self.assertHasServiceId(spec, actual) + elif name == "commandSucceededEvent": + self.test.assertIsInstance(actual, CommandSucceededEvent) + reply = spec.get("reply") + if reply: + self.match_result(reply, actual.reply) + self.assertHasDatabaseName(spec, actual) + self.assertHasServiceId(spec, actual) + elif name == "commandFailedEvent": + self.test.assertIsInstance(actual, CommandFailedEvent) + self.assertHasServiceId(spec, actual) + self.assertHasDatabaseName(spec, actual) + elif name == "poolCreatedEvent": + self.test.assertIsInstance(actual, PoolCreatedEvent) + elif name == "poolReadyEvent": + self.test.assertIsInstance(actual, PoolReadyEvent) + elif name == "poolClearedEvent": + self.test.assertIsInstance(actual, PoolClearedEvent) + self.assertHasServiceId(spec, actual) + elif name == "poolClosedEvent": + self.test.assertIsInstance(actual, PoolClosedEvent) + elif name == "connectionCreatedEvent": + self.test.assertIsInstance(actual, ConnectionCreatedEvent) + elif name == "connectionReadyEvent": + self.test.assertIsInstance(actual, ConnectionReadyEvent) + elif name == "connectionClosedEvent": + self.test.assertIsInstance(actual, ConnectionClosedEvent) + if "reason" in spec: + self.test.assertEqual(actual.reason, spec["reason"]) + elif name == "connectionCheckOutStartedEvent": + self.test.assertIsInstance(actual, ConnectionCheckOutStartedEvent) + elif name == "connectionCheckOutFailedEvent": + self.test.assertIsInstance(actual, ConnectionCheckOutFailedEvent) + if "reason" in spec: + self.test.assertEqual(actual.reason, spec["reason"]) + elif name == "connectionCheckedOutEvent": + self.test.assertIsInstance(actual, ConnectionCheckedOutEvent) + elif name == "connectionCheckedInEvent": + self.test.assertIsInstance(actual, ConnectionCheckedInEvent) + elif name == "serverDescriptionChangedEvent": + self.test.assertIsInstance(actual, ServerDescriptionChangedEvent) + if "previousDescription" in spec: + self.match_server_description( + actual.previous_description, spec["previousDescription"] + ) + if "newDescription" in spec: + self.match_server_description(actual.new_description, spec["newDescription"]) + elif name == "serverHeartbeatStartedEvent": + self.test.assertIsInstance(actual, ServerHeartbeatStartedEvent) + if "awaited" in spec: + self.test.assertEqual(actual.awaited, spec["awaited"]) + elif name == "serverHeartbeatSucceededEvent": + self.test.assertIsInstance(actual, ServerHeartbeatSucceededEvent) + if "awaited" in spec: + self.test.assertEqual(actual.awaited, spec["awaited"]) + elif name == "serverHeartbeatFailedEvent": + self.test.assertIsInstance(actual, ServerHeartbeatFailedEvent) + if "awaited" in spec: + self.test.assertEqual(actual.awaited, spec["awaited"]) + else: + raise Exception(f"Unsupported event type {name}") + + +def coerce_result(opname, result): + """Convert a pymongo result into the spec's result format.""" + if hasattr(result, "acknowledged") and not result.acknowledged: + return {"acknowledged": False} + if opname == "bulkWrite": + return parse_bulk_write_result(result) + if opname == "insertOne": + return {"insertedId": result.inserted_id} + if opname == "insertMany": + return dict(enumerate(result.inserted_ids)) + if opname in ("deleteOne", "deleteMany"): + return {"deletedCount": result.deleted_count} + if opname in ("updateOne", "updateMany", "replaceOne"): + return { + "matchedCount": result.matched_count, + "modifiedCount": result.modified_count, + "upsertedCount": 0 if result.upserted_id is None else 1, + } + return result + + +class UnifiedSpecTestMixinV1(IntegrationTest): + """Mixin class to run test cases from test specification files. + + Assumes that tests conform to the `unified test format + `_. + + Specification of the test suite being currently run is available as + a class attribute ``TEST_SPEC``. + """ + + SCHEMA_VERSION = Version.from_string("1.17") + RUN_ON_LOAD_BALANCER = True + RUN_ON_SERVERLESS = True + TEST_SPEC: Any + + @staticmethod + def should_run_on(run_on_spec): + if not run_on_spec: + # Always run these tests. + return True + + for req in run_on_spec: + if is_run_on_requirement_satisfied(req): + return True + return False + + def insert_initial_data(self, initial_data): + for i, collection_data in enumerate(initial_data): + coll_name = collection_data["collectionName"] + db_name = collection_data["databaseName"] + opts = collection_data.get("createOptions", {}) + documents = collection_data["documents"] + + # Setup the collection with as few majority writes as possible. + db = self.client[db_name] + db.drop_collection(coll_name) + # Only use majority wc only on the final write. + if i == len(initial_data) - 1: + wc = WriteConcern(w="majority") + else: + wc = WriteConcern(w=1) + if documents: + if opts: + db.create_collection(coll_name, **opts) + db.get_collection(coll_name, write_concern=wc).insert_many(documents) + else: + # Ensure collection exists + db.create_collection(coll_name, write_concern=wc, **opts) + + @classmethod + def setUpClass(cls): + # super call creates internal client cls.client + super().setUpClass() + # process file-level runOnRequirements + run_on_spec = cls.TEST_SPEC.get("runOnRequirements", []) + if not cls.should_run_on(run_on_spec): + raise unittest.SkipTest(f"{cls.__name__} runOnRequirements not satisfied") + + # add any special-casing for skipping tests here + if client_context.storage_engine == "mmapv1": + if "retryable-writes" in cls.TEST_SPEC["description"]: + raise unittest.SkipTest("MMAPv1 does not support retryWrites=True") + + def setUp(self): + super().setUp() + # process schemaVersion + # note: we check major schema version during class generation + # note: we do this here because we cannot run assertions in setUpClass + version = Version.from_string(self.TEST_SPEC["schemaVersion"]) + self.assertLessEqual( + version, + self.SCHEMA_VERSION, + f"expected schema version {self.SCHEMA_VERSION} or lower, got {version}", + ) + + # initialize internals + self.match_evaluator = MatchEvaluatorUtil(self) + + def maybe_skip_test(self, spec): + # add any special-casing for skipping tests here + if client_context.storage_engine == "mmapv1": + if ( + "Dirty explicit session is discarded" in spec["description"] + or "Dirty implicit session is discarded" in spec["description"] + or "Cancel server check" in spec["description"] + ): + self.skipTest("MMAPv1 does not support retryWrites=True") + if "Client side error in command starting transaction" in spec["description"]: + self.skipTest("Implement PYTHON-1894") + if "timeoutMS applied to entire download" in spec["description"]: + self.skipTest("PyMongo's open_download_stream does not cap the stream's lifetime") + + class_name = self.__class__.__name__.lower() + description = spec["description"].lower() + if "csot" in class_name: + if "gridfs" in class_name and sys.platform == "win32": + self.skipTest("PYTHON-3522 CSOT GridFS tests are flaky on Windows") + if client_context.storage_engine == "mmapv1": + self.skipTest( + "MMAPv1 does not support retryable writes which is required for CSOT tests" + ) + if "change" in description or "change" in class_name: + self.skipTest("CSOT not implemented for watch()") + if "cursors" in class_name: + self.skipTest("CSOT not implemented for cursors") + if "tailable" in class_name: + self.skipTest("CSOT not implemented for tailable cursors") + if "sessions" in class_name: + self.skipTest("CSOT not implemented for sessions") + if "withtransaction" in description: + self.skipTest("CSOT not implemented for with_transaction") + if "transaction" in class_name or "transaction" in description: + self.skipTest("CSOT not implemented for transactions") + + # Some tests need to be skipped based on the operations they try to run. + for op in spec["operations"]: + name = op["name"] + if name == "count": + self.skipTest("PyMongo does not support count()") + if name == "listIndexNames": + self.skipTest("PyMongo does not support list_index_names()") + if client_context.storage_engine == "mmapv1": + if name == "createChangeStream": + self.skipTest("MMAPv1 does not support change streams") + if name == "withTransaction" or name == "startTransaction": + self.skipTest("MMAPv1 does not support document-level locking") + if not client_context.test_commands_enabled: + if name == "failPoint" or name == "targetedFailPoint": + self.skipTest("Test commands must be enabled to use fail points") + if name == "modifyCollection": + self.skipTest("PyMongo does not support modifyCollection") + if "timeoutMode" in op.get("arguments", {}): + self.skipTest("PyMongo does not support timeoutMode") + + def process_error(self, exception, spec): + is_error = spec.get("isError") + is_client_error = spec.get("isClientError") + is_timeout_error = spec.get("isTimeoutError") + error_contains = spec.get("errorContains") + error_code = spec.get("errorCode") + error_code_name = spec.get("errorCodeName") + error_labels_contain = spec.get("errorLabelsContain") + error_labels_omit = spec.get("errorLabelsOmit") + expect_result = spec.get("expectResult") + error_response = spec.get("errorResponse") + if error_response: + for k in error_response.keys(): + self.assertEqual(error_response[k], exception.details[k]) + + if is_error: + # already satisfied because exception was raised + pass + + if is_client_error: + # Connection errors are considered client errors. + if isinstance(exception, ConnectionFailure): + self.assertNotIsInstance(exception, NotPrimaryError) + elif isinstance(exception, (InvalidOperation, ConfigurationError, EncryptionError)): + pass + else: + self.assertNotIsInstance(exception, PyMongoError) + + if is_timeout_error: + self.assertIsInstance(exception, PyMongoError) + if not exception.timeout: + # Re-raise the exception for better diagnostics. + raise exception + + if error_contains: + if isinstance(exception, BulkWriteError): + errmsg = str(exception.details).lower() + else: + errmsg = str(exception).lower() + self.assertIn(error_contains.lower(), errmsg) + + if error_code: + self.assertEqual(error_code, exception.details.get("code")) + + if error_code_name: + self.assertEqual(error_code_name, exception.details.get("codeName")) + + if error_labels_contain: + labels = [ + err_label + for err_label in error_labels_contain + if exception.has_error_label(err_label) + ] + self.assertEqual(labels, error_labels_contain) + + if error_labels_omit: + for err_label in error_labels_omit: + if exception.has_error_label(err_label): + self.fail(f"Exception '{exception}' unexpectedly had label '{err_label}'") + + if expect_result: + if isinstance(exception, BulkWriteError): + result = parse_bulk_write_error_result(exception) + self.match_evaluator.match_result(expect_result, result) + else: + self.fail(f"expectResult can only be specified with {BulkWriteError} exceptions") + + def __raise_if_unsupported(self, opname, target, *target_types): + if not isinstance(target, target_types): + self.fail(f"Operation {opname} not supported for entity of type {type(target)}") + + def __entityOperation_createChangeStream(self, target, *args, **kwargs): + if client_context.storage_engine == "mmapv1": + self.skipTest("MMAPv1 does not support change streams") + self.__raise_if_unsupported("createChangeStream", target, MongoClient, Database, Collection) + stream = target.watch(*args, **kwargs) + self.addCleanup(stream.close) + return stream + + def _clientOperation_createChangeStream(self, target, *args, **kwargs): + return self.__entityOperation_createChangeStream(target, *args, **kwargs) + + def _databaseOperation_createChangeStream(self, target, *args, **kwargs): + return self.__entityOperation_createChangeStream(target, *args, **kwargs) + + def _collectionOperation_createChangeStream(self, target, *args, **kwargs): + return self.__entityOperation_createChangeStream(target, *args, **kwargs) + + def _databaseOperation_runCommand(self, target, **kwargs): + self.__raise_if_unsupported("runCommand", target, Database) + # Ensure the first key is the command name. + ordered_command = SON([(kwargs.pop("command_name"), 1)]) + ordered_command.update(kwargs["command"]) + kwargs["command"] = ordered_command + return target.command(**kwargs) + + def _databaseOperation_runCursorCommand(self, target, **kwargs): + return list(self._databaseOperation_createCommandCursor(target, **kwargs)) + + def _databaseOperation_createCommandCursor(self, target, **kwargs): + self.__raise_if_unsupported("createCommandCursor", target, Database) + # Ensure the first key is the command name. + ordered_command = SON([(kwargs.pop("command_name"), 1)]) + ordered_command.update(kwargs["command"]) + kwargs["command"] = ordered_command + batch_size = 0 + + cursor_type = kwargs.pop("cursor_type", "nonTailable") + if cursor_type == CursorType.TAILABLE: + ordered_command["tailable"] = True + elif cursor_type == CursorType.TAILABLE_AWAIT: + ordered_command["tailable"] = True + ordered_command["awaitData"] = True + elif cursor_type != "nonTailable": + self.fail(f"unknown cursorType: {cursor_type}") + + if "maxTimeMS" in kwargs: + kwargs["max_await_time_ms"] = kwargs.pop("maxTimeMS") + + if "batch_size" in kwargs: + batch_size = kwargs.pop("batch_size") + + cursor = target.cursor_command(**kwargs) + + if batch_size > 0: + cursor.batch_size(batch_size) + + return cursor + + def _databaseOperation_listCollections(self, target, *args, **kwargs): + if "batch_size" in kwargs: + kwargs["cursor"] = {"batchSize": kwargs.pop("batch_size")} + cursor = target.list_collections(*args, **kwargs) + return list(cursor) + + def _databaseOperation_createCollection(self, target, *args, **kwargs): + # PYTHON-1936 Ignore the listCollections event from create_collection. + kwargs["check_exists"] = False + ret = target.create_collection(*args, **kwargs) + return ret + + def __entityOperation_aggregate(self, target, *args, **kwargs): + self.__raise_if_unsupported("aggregate", target, Database, Collection) + return list(target.aggregate(*args, **kwargs)) + + def _databaseOperation_aggregate(self, target, *args, **kwargs): + return self.__entityOperation_aggregate(target, *args, **kwargs) + + def _collectionOperation_aggregate(self, target, *args, **kwargs): + return self.__entityOperation_aggregate(target, *args, **kwargs) + + def _collectionOperation_find(self, target, *args, **kwargs): + self.__raise_if_unsupported("find", target, Collection) + find_cursor = target.find(*args, **kwargs) + return list(find_cursor) + + def _collectionOperation_createFindCursor(self, target, *args, **kwargs): + self.__raise_if_unsupported("find", target, Collection) + if "filter" not in kwargs: + self.fail('createFindCursor requires a "filter" argument') + cursor = NonLazyCursor(target.find(*args, **kwargs), target.database.client) + self.addCleanup(cursor.close) + return cursor + + def _collectionOperation_count(self, target, *args, **kwargs): + self.skipTest("PyMongo does not support collection.count()") + + def _collectionOperation_listIndexes(self, target, *args, **kwargs): + if "batch_size" in kwargs: + self.skipTest("PyMongo does not support batch_size for list_indexes") + return list(target.list_indexes(*args, **kwargs)) + + def _collectionOperation_listIndexNames(self, target, *args, **kwargs): + self.skipTest("PyMongo does not support list_index_names") + + def _collectionOperation_createSearchIndexes(self, target, *args, **kwargs): + models = [SearchIndexModel(**i) for i in kwargs["models"]] + return target.create_search_indexes(models) + + def _collectionOperation_listSearchIndexes(self, target, *args, **kwargs): + name = kwargs.get("name") + agg_kwargs = kwargs.get("aggregation_options", dict()) + return list(target.list_search_indexes(name, **agg_kwargs)) + + def _sessionOperation_withTransaction(self, target, *args, **kwargs): + if client_context.storage_engine == "mmapv1": + self.skipTest("MMAPv1 does not support document-level locking") + self.__raise_if_unsupported("withTransaction", target, ClientSession) + return target.with_transaction(*args, **kwargs) + + def _sessionOperation_startTransaction(self, target, *args, **kwargs): + if client_context.storage_engine == "mmapv1": + self.skipTest("MMAPv1 does not support document-level locking") + self.__raise_if_unsupported("startTransaction", target, ClientSession) + return target.start_transaction(*args, **kwargs) + + def _changeStreamOperation_iterateUntilDocumentOrError(self, target, *args, **kwargs): + self.__raise_if_unsupported("iterateUntilDocumentOrError", target, ChangeStream) + return next(target) + + def _cursor_iterateUntilDocumentOrError(self, target, *args, **kwargs): + self.__raise_if_unsupported( + "iterateUntilDocumentOrError", target, NonLazyCursor, CommandCursor + ) + while target.alive: + try: + return next(target) + except StopIteration: + pass + return None + + def _cursor_close(self, target, *args, **kwargs): + self.__raise_if_unsupported("close", target, NonLazyCursor, CommandCursor) + return target.close() + + def _clientEncryptionOperation_createDataKey(self, target, *args, **kwargs): + if "opts" in kwargs: + opts = kwargs.pop("opts") + kwargs["master_key"] = opts.get("masterKey") + kwargs["key_alt_names"] = opts.get("keyAltNames") + kwargs["key_material"] = opts.get("keyMaterial") + return target.create_data_key(*args, **kwargs) + + def _clientEncryptionOperation_getKeys(self, target, *args, **kwargs): + return list(target.get_keys(*args, **kwargs)) + + def _clientEncryptionOperation_deleteKey(self, target, *args, **kwargs): + result = target.delete_key(*args, **kwargs) + response = result.raw_result + response["deletedCount"] = result.deleted_count + return response + + def _clientEncryptionOperation_rewrapManyDataKey(self, target, *args, **kwargs): + if "opts" in kwargs: + opts = kwargs.pop("opts") + kwargs["provider"] = opts.get("provider") + kwargs["master_key"] = opts.get("masterKey") + data = target.rewrap_many_data_key(*args, **kwargs) + if data.bulk_write_result: + return {"bulkWriteResult": parse_bulk_write_result(data.bulk_write_result)} + return {} + + def _bucketOperation_download(self, target: GridFSBucket, *args: Any, **kwargs: Any) -> bytes: + with target.open_download_stream(*args, **kwargs) as gout: + return gout.read() + + def _bucketOperation_downloadByName( + self, target: GridFSBucket, *args: Any, **kwargs: Any + ) -> bytes: + with target.open_download_stream_by_name(*args, **kwargs) as gout: + return gout.read() + + def _bucketOperation_upload(self, target: GridFSBucket, *args: Any, **kwargs: Any) -> ObjectId: + kwargs["source"] = binascii.unhexlify(kwargs.pop("source")["$$hexBytes"]) + if "content_type" in kwargs: + kwargs.setdefault("metadata", {})["contentType"] = kwargs.pop("content_type") + return target.upload_from_stream(*args, **kwargs) + + def _bucketOperation_uploadWithId(self, target: GridFSBucket, *args: Any, **kwargs: Any) -> Any: + kwargs["source"] = binascii.unhexlify(kwargs.pop("source")["$$hexBytes"]) + if "content_type" in kwargs: + kwargs.setdefault("metadata", {})["contentType"] = kwargs.pop("content_type") + return target.upload_from_stream_with_id(*args, **kwargs) + + def _bucketOperation_find( + self, target: GridFSBucket, *args: Any, **kwargs: Any + ) -> List[GridOut]: + return list(target.find(*args, **kwargs)) + + def run_entity_operation(self, spec): + target = self.entity_map[spec["object"]] + opname = spec["name"] + opargs = spec.get("arguments") + expect_error = spec.get("expectError") + save_as_entity = spec.get("saveResultAsEntity") + expect_result = spec.get("expectResult") + ignore = spec.get("ignoreResultAndError") + if ignore and (expect_error or save_as_entity or expect_result): + raise ValueError( + "ignoreResultAndError is incompatible with saveResultAsEntity" + ", expectError, and expectResult" + ) + if opargs: + arguments = parse_spec_options(copy.deepcopy(opargs)) + prepare_spec_arguments( + spec, arguments, camel_to_snake(opname), self.entity_map, self.run_operations + ) + else: + arguments = {} + + if isinstance(target, MongoClient): + method_name = f"_clientOperation_{opname}" + elif isinstance(target, Database): + method_name = f"_databaseOperation_{opname}" + elif isinstance(target, Collection): + method_name = f"_collectionOperation_{opname}" + # contentType is always stored in metadata in pymongo. + if target.name.endswith(".files") and opname == "find": + for doc in spec.get("expectResult", []): + if "contentType" in doc: + doc.setdefault("metadata", {})["contentType"] = doc.pop("contentType") + elif isinstance(target, ChangeStream): + method_name = f"_changeStreamOperation_{opname}" + elif isinstance(target, (NonLazyCursor, CommandCursor)): + method_name = f"_cursor_{opname}" + elif isinstance(target, ClientSession): + method_name = f"_sessionOperation_{opname}" + elif isinstance(target, GridFSBucket): + method_name = f"_bucketOperation_{opname}" + if "id" in arguments: + arguments["file_id"] = arguments.pop("id") + # MD5 is always disabled in pymongo. + arguments.pop("disable_md5", None) + elif isinstance(target, ClientEncryption): + method_name = f"_clientEncryptionOperation_{opname}" + else: + method_name = "doesNotExist" + + try: + method = getattr(self, method_name) + except AttributeError: + target_opname = camel_to_snake(opname) + if target_opname == "iterate_once": + target_opname = "try_next" + try: + cmd = getattr(target, target_opname) + except AttributeError: + self.fail(f"Unsupported operation {opname} on entity {target}") + else: + cmd = functools.partial(method, target) + + try: + # CSOT: Translate the spec test "timeout" arg into pymongo's context timeout API. + if "timeout" in arguments: + timeout = arguments.pop("timeout") + with pymongo.timeout(timeout): + result = cmd(**dict(arguments)) + else: + result = cmd(**dict(arguments)) + except Exception as exc: + # Ignore all operation errors but to avoid masking bugs don't + # ignore things like TypeError and ValueError. + if ignore and isinstance(exc, (PyMongoError,)): + return None + if expect_error: + return self.process_error(exc, expect_error) + raise + else: + if expect_error: + self.fail(f'Excepted error {expect_error} but "{opname}" succeeded: {result}') + + if expect_result: + actual = coerce_result(opname, result) + self.match_evaluator.match_result(expect_result, actual) + + if save_as_entity: + self.entity_map[save_as_entity] = result + return None + return None + + def __set_fail_point(self, client, command_args): + if not client_context.test_commands_enabled: + self.skipTest("Test commands must be enabled") + + cmd_on = SON([("configureFailPoint", "failCommand")]) + cmd_on.update(command_args) + client.admin.command(cmd_on) + self.addCleanup( + client.admin.command, "configureFailPoint", cmd_on["configureFailPoint"], mode="off" + ) + + def _testOperation_failPoint(self, spec): + self.__set_fail_point( + client=self.entity_map[spec["client"]], command_args=spec["failPoint"] + ) + + def _testOperation_targetedFailPoint(self, spec): + session = self.entity_map[spec["session"]] + if not session._pinned_address: + self.fail( + "Cannot use targetedFailPoint operation with unpinned " + "session {}".format(spec["session"]) + ) + + client = single_client("{}:{}".format(*session._pinned_address)) + self.addCleanup(client.close) + self.__set_fail_point(client=client, command_args=spec["failPoint"]) + + def _testOperation_createEntities(self, spec): + self.entity_map.create_entities_from_spec(spec["entities"], uri=self._uri) + + def _testOperation_assertSessionTransactionState(self, spec): + session = self.entity_map[spec["session"]] + expected_state = getattr(_TxnState, spec["state"].upper()) + self.assertEqual(expected_state, session._transaction.state) + + def _testOperation_assertSessionPinned(self, spec): + session = self.entity_map[spec["session"]] + self.assertIsNotNone(session._transaction.pinned_address) + + def _testOperation_assertSessionUnpinned(self, spec): + session = self.entity_map[spec["session"]] + self.assertIsNone(session._pinned_address) + self.assertIsNone(session._transaction.pinned_address) + + def __get_last_two_command_lsids(self, listener): + cmd_started_events = [] + for event in reversed(listener.events): + if isinstance(event, CommandStartedEvent): + cmd_started_events.append(event) + if len(cmd_started_events) < 2: + self.fail( + "Needed 2 CommandStartedEvents to compare lsids, " + "got %s" % (len(cmd_started_events)) + ) + return tuple([e.command["lsid"] for e in cmd_started_events][:2]) + + def _testOperation_assertDifferentLsidOnLastTwoCommands(self, spec): + listener = self.entity_map.get_listener_for_client(spec["client"]) + self.assertNotEqual(*self.__get_last_two_command_lsids(listener)) + + def _testOperation_assertSameLsidOnLastTwoCommands(self, spec): + listener = self.entity_map.get_listener_for_client(spec["client"]) + self.assertEqual(*self.__get_last_two_command_lsids(listener)) + + def _testOperation_assertSessionDirty(self, spec): + session = self.entity_map[spec["session"]] + self.assertTrue(session._server_session.dirty) + + def _testOperation_assertSessionNotDirty(self, spec): + session = self.entity_map[spec["session"]] + return self.assertFalse(session._server_session.dirty) + + def _testOperation_assertCollectionExists(self, spec): + database_name = spec["databaseName"] + collection_name = spec["collectionName"] + collection_name_list = list(self.client.get_database(database_name).list_collection_names()) + self.assertIn(collection_name, collection_name_list) + + def _testOperation_assertCollectionNotExists(self, spec): + database_name = spec["databaseName"] + collection_name = spec["collectionName"] + collection_name_list = list(self.client.get_database(database_name).list_collection_names()) + self.assertNotIn(collection_name, collection_name_list) + + def _testOperation_assertIndexExists(self, spec): + collection = self.client[spec["databaseName"]][spec["collectionName"]] + index_names = [idx["name"] for idx in collection.list_indexes()] + self.assertIn(spec["indexName"], index_names) + + def _testOperation_assertIndexNotExists(self, spec): + collection = self.client[spec["databaseName"]][spec["collectionName"]] + for index in collection.list_indexes(): + self.assertNotEqual(spec["indexName"], index["name"]) + + def _testOperation_assertNumberConnectionsCheckedOut(self, spec): + client = self.entity_map[spec["client"]] + pool = get_pool(client) + self.assertEqual(spec["connections"], pool.active_sockets) + + def _event_count(self, client_name, event): + listener = self.entity_map.get_listener_for_client(client_name) + actual_events = listener.get_events("all") + count = 0 + for actual in actual_events: + try: + self.match_evaluator.match_event("all", event, actual) + except AssertionError: + continue + else: + count += 1 + return count + + def _testOperation_assertEventCount(self, spec): + """Run the assertEventCount test operation. + + Assert the given event was published exactly `count` times. + """ + client, event, count = spec["client"], spec["event"], spec["count"] + self.assertEqual(self._event_count(client, event), count, f"expected {count} not {event!r}") + + def _testOperation_waitForEvent(self, spec): + """Run the waitForEvent test operation. + + Wait for a number of events to be published, or fail. + """ + client, event, count = spec["client"], spec["event"], spec["count"] + wait_until( + lambda: self._event_count(client, event) >= count, + f"find {count} {event} event(s)", + ) + + def _testOperation_wait(self, spec): + """Run the "wait" test operation.""" + time.sleep(spec["ms"] / 1000.0) + + def _testOperation_recordTopologyDescription(self, spec): + """Run the recordTopologyDescription test operation.""" + self.entity_map[spec["id"]] = self.entity_map[spec["client"]].topology_description + + def _testOperation_assertTopologyType(self, spec): + """Run the assertTopologyType test operation.""" + description = self.entity_map[spec["topologyDescription"]] + self.assertIsInstance(description, TopologyDescription) + self.assertEqual(description.topology_type_name, spec["topologyType"]) + + def _testOperation_waitForPrimaryChange(self, spec: dict) -> None: + """Run the waitForPrimaryChange test operation.""" + client = self.entity_map[spec["client"]] + old_description: TopologyDescription = self.entity_map[spec["priorTopologyDescription"]] + timeout = spec["timeoutMS"] / 1000.0 + + def get_primary(td: TopologyDescription) -> Optional[_Address]: + servers = writable_server_selector(Selection.from_topology_description(td)) + if servers and servers[0].server_type == SERVER_TYPE.RSPrimary: + return servers[0].address + return None + + old_primary = get_primary(old_description) + + def primary_changed() -> bool: + primary = client.primary + if primary is None: + return False + return primary != old_primary + + wait_until(primary_changed, "change primary", timeout=timeout) + + def _testOperation_runOnThread(self, spec): + """Run the 'runOnThread' operation.""" + thread = self.entity_map[spec["thread"]] + thread.schedule(lambda: self.run_entity_operation(spec["operation"])) + + def _testOperation_waitForThread(self, spec): + """Run the 'waitForThread' operation.""" + thread = self.entity_map[spec["thread"]] + thread.stop() + thread.join(10) + if thread.exc: + raise thread.exc + self.assertFalse(thread.is_alive(), "Thread {} is still running".format(spec["thread"])) + + def _testOperation_loop(self, spec): + failure_key = spec.get("storeFailuresAsEntity") + error_key = spec.get("storeErrorsAsEntity") + successes_key = spec.get("storeSuccessesAsEntity") + iteration_key = spec.get("storeIterationsAsEntity") + iteration_limiter_key = spec.get("numIterations") + for i in [failure_key, error_key]: + if i: + self.entity_map[i] = [] + for i in [successes_key, iteration_key]: + if i: + self.entity_map[i] = 0 + i = 0 + global IS_INTERRUPTED + while True: + if iteration_limiter_key and i >= iteration_limiter_key: + break + i += 1 + if IS_INTERRUPTED: + break + try: + if iteration_key: + self.entity_map._entities[iteration_key] += 1 + for op in spec["operations"]: + self.run_entity_operation(op) + if successes_key: + self.entity_map._entities[successes_key] += 1 + except Exception as exc: + if isinstance(exc, AssertionError): + key = failure_key or error_key + else: + key = error_key or failure_key + if not key: + raise + self.entity_map[key].append( + {"error": str(exc), "time": time.time(), "type": type(exc).__name__} + ) + + def run_special_operation(self, spec): + opname = spec["name"] + method_name = f"_testOperation_{opname}" + try: + method = getattr(self, method_name) + except AttributeError: + self.fail(f"Unsupported special test operation {opname}") + else: + method(spec["arguments"]) + + def run_operations(self, spec): + for op in spec: + if op["object"] == "testRunner": + self.run_special_operation(op) + else: + self.run_entity_operation(op) + + def check_events(self, spec): + for event_spec in spec: + client_name = event_spec["client"] + events = event_spec["events"] + event_type = event_spec.get("eventType", "command") + ignore_extra_events = event_spec.get("ignoreExtraEvents", False) + server_connection_id = event_spec.get("serverConnectionId") + has_server_connection_id = event_spec.get("hasServerConnectionId", False) + listener = self.entity_map.get_listener_for_client(client_name) + actual_events = listener.get_events(event_type) + if ignore_extra_events: + actual_events = actual_events[: len(events)] + + if len(events) == 0: + self.assertEqual(actual_events, []) + continue + + self.assertEqual(len(actual_events), len(events), actual_events) + + for idx, expected_event in enumerate(events): + self.match_evaluator.match_event(event_type, expected_event, actual_events[idx]) + + if has_server_connection_id: + assert server_connection_id is not None + assert server_connection_id >= 0 + else: + assert server_connection_id is None + + def verify_outcome(self, spec): + for collection_data in spec: + coll_name = collection_data["collectionName"] + db_name = collection_data["databaseName"] + expected_documents = collection_data["documents"] + + coll = self.client.get_database(db_name).get_collection( + coll_name, + read_preference=ReadPreference.PRIMARY, + read_concern=ReadConcern(level="local"), + ) + + if expected_documents: + sorted_expected_documents = sorted(expected_documents, key=lambda doc: doc["_id"]) + actual_documents = list(coll.find({}, sort=[("_id", ASCENDING)])) + self.assertListEqual(sorted_expected_documents, actual_documents) + + def run_scenario(self, spec, uri=None): + if "csot" in self.id().lower(): + # Retry CSOT tests up to 2 times to deal with flakey tests. + attempts = 3 + for i in range(attempts): + try: + return self._run_scenario(spec, uri) + except AssertionError: + if i < attempts - 1: + print( + f"Retrying after attempt {i+1} of {self.id()} failed with:\n" + f"{traceback.format_exc()}", + file=sys.stderr, + ) + self.setUp() + continue + raise + return None + else: + self._run_scenario(spec, uri) + return None + + def _run_scenario(self, spec, uri=None): + # maybe skip test manually + self.maybe_skip_test(spec) + + # process test-level runOnRequirements + run_on_spec = spec.get("runOnRequirements", []) + if not self.should_run_on(run_on_spec): + raise unittest.SkipTest("runOnRequirements not satisfied") + + # process skipReason + skip_reason = spec.get("skipReason", None) + if skip_reason is not None: + raise unittest.SkipTest(f"{skip_reason}") + + # process createEntities + self._uri = uri + self.entity_map = EntityMapUtil(self) + self.entity_map.create_entities_from_spec(self.TEST_SPEC.get("createEntities", []), uri=uri) + # process initialData + self.insert_initial_data(self.TEST_SPEC.get("initialData", [])) + + # process operations + self.run_operations(spec["operations"]) + + # process expectEvents + if "expectEvents" in spec: + expect_events = spec["expectEvents"] + self.assertTrue(expect_events, "expectEvents must be non-empty") + self.check_events(expect_events) + + # process outcome + self.verify_outcome(spec.get("outcome", [])) + + +class UnifiedSpecTestMeta(type): + """Metaclass for generating test classes.""" + + TEST_SPEC: Any + EXPECTED_FAILURES: Any + + def __init__(cls, *args, **kwargs): + super().__init__(*args, **kwargs) + + def create_test(spec): + def test_case(self): + self.run_scenario(spec) + + return test_case + + for test_spec in cls.TEST_SPEC["tests"]: + description = test_spec["description"] + test_name = "test_{}".format( + description.strip(". ").replace(" ", "_").replace(".", "_") + ) + test_method = create_test(copy.deepcopy(test_spec)) + test_method.__name__ = str(test_name) + + for fail_pattern in cls.EXPECTED_FAILURES: + if re.search(fail_pattern, description): + test_method = unittest.expectedFailure(test_method) + break + + setattr(cls, test_name, test_method) + + +_ALL_MIXIN_CLASSES = [ + UnifiedSpecTestMixinV1, + # add mixin classes for new schema major versions here +] + + +_SCHEMA_VERSION_MAJOR_TO_MIXIN_CLASS = { + KLASS.SCHEMA_VERSION[0]: KLASS for KLASS in _ALL_MIXIN_CLASSES +} + + +def generate_test_classes( + test_path, + module=__name__, + class_name_prefix="", + expected_failures=[], # noqa: B006 + bypass_test_generation_errors=False, + **kwargs, +): + """Method for generating test classes. Returns a dictionary where keys are + the names of test classes and values are the test class objects. + """ + test_klasses = {} + + def test_base_class_factory(test_spec): + """Utility that creates the base class to use for test generation. + This is needed to ensure that cls.TEST_SPEC is appropriately set when + the metaclass __init__ is invoked. + """ + + class SpecTestBase(with_metaclass(UnifiedSpecTestMeta)): # type: ignore + TEST_SPEC = test_spec + EXPECTED_FAILURES = expected_failures + + return SpecTestBase + + for dirpath, _, filenames in os.walk(test_path): + dirname = os.path.split(dirpath)[-1] + + for filename in filenames: + fpath = os.path.join(dirpath, filename) + with open(fpath) as scenario_stream: + # Use tz_aware=False to match how CodecOptions decodes + # dates. + opts = json_util.JSONOptions(tz_aware=False) + scenario_def = json_util.loads(scenario_stream.read(), json_options=opts) + + test_type = os.path.splitext(filename)[0] + snake_class_name = "Test{}_{}_{}".format( + class_name_prefix, + dirname.replace("-", "_"), + test_type.replace("-", "_").replace(".", "_"), + ) + class_name = snake_to_camel(snake_class_name) + + try: + schema_version = Version.from_string(scenario_def["schemaVersion"]) + mixin_class = _SCHEMA_VERSION_MAJOR_TO_MIXIN_CLASS.get(schema_version[0]) + if mixin_class is None: + raise ValueError( + f"test file '{fpath}' has unsupported schemaVersion '{schema_version}'" + ) + module_dict = {"__module__": module} + module_dict.update(kwargs) + test_klasses[class_name] = type( + class_name, + ( + mixin_class, + test_base_class_factory(scenario_def), + ), + module_dict, + ) + except Exception: + if bypass_test_generation_errors: + continue + raise + + return test_klasses diff --git a/test/uri_options/auth-options.json b/test/uri_options/auth-options.json index 65a168b334..fadbac35d2 100644 --- a/test/uri_options/auth-options.json +++ b/test/uri_options/auth-options.json @@ -1,7 +1,7 @@ { "tests": [ { - "description": "Valid auth options are parsed correctly", + "description": "Valid auth options are parsed correctly (GSSAPI)", "uri": "mongodb://foo:bar@example.com/?authMechanism=GSSAPI&authMechanismProperties=SERVICE_NAME:other,CANONICALIZE_HOST_NAME:true&authSource=$external", "valid": true, "warning": false, @@ -15,6 +15,18 @@ }, "authSource": "$external" } + }, + { + "description": "Valid auth options are parsed correctly (SCRAM-SHA-1)", + "uri": "mongodb://foo:bar@example.com/?authMechanism=SCRAM-SHA-1&authSource=authSourceDB", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "authMechanism": "SCRAM-SHA-1", + "authSource": "authSourceDB" + } } ] } diff --git a/test/uri_options/concern-options.json b/test/uri_options/concern-options.json index 2b3783746c..5a8ef6c272 100644 --- a/test/uri_options/concern-options.json +++ b/test/uri_options/concern-options.json @@ -36,15 +36,6 @@ "w": "arbitraryButStillValid" } }, - { - "description": "Too low w causes a warning", - "uri": "mongodb://example.com/?w=-2", - "valid": true, - "warning": true, - "hosts": null, - "auth": null, - "options": {} - }, { "description": "Non-numeric wTimeoutMS causes a warning", "uri": "mongodb://example.com/?wTimeoutMS=invalid", diff --git a/test/uri_options/connection-options.json b/test/uri_options/connection-options.json index 1e2dccd6e2..b2669b6cf1 100644 --- a/test/uri_options/connection-options.json +++ b/test/uri_options/connection-options.json @@ -2,7 +2,7 @@ "tests": [ { "description": "Valid connection and timeout options are parsed correctly", - "uri": "mongodb://example.com/?appname=URI-OPTIONS-SPEC-TEST&connectTimeoutMS=20000&heartbeatFrequencyMS=5000&localThresholdMS=3000&maxIdleTimeMS=50000&replicaSet=uri-options-spec&retryWrites=true&serverSelectionTimeoutMS=15000&socketTimeoutMS=7500", + "uri": "mongodb://example.com/?appname=URI-OPTIONS-SPEC-TEST&connectTimeoutMS=20000&heartbeatFrequencyMS=5000&localThresholdMS=3000&maxIdleTimeMS=50000&replicaSet=uri-options-spec&retryWrites=true&serverSelectionTimeoutMS=15000&socketTimeoutMS=7500&timeoutMS=100", "valid": true, "warning": false, "hosts": null, @@ -16,7 +16,8 @@ "replicaSet": "uri-options-spec", "retryWrites": true, "serverSelectionTimeoutMS": 15000, - "socketTimeoutMS": 7500 + "socketTimeoutMS": 7500, + "timeoutMS": 100 } }, { @@ -117,6 +118,156 @@ "hosts": null, "auth": null, "options": {} + }, + { + "description": "directConnection=true", + "uri": "mongodb://example.com/?directConnection=true", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "directConnection": true + } + }, + { + "description": "directConnection=true with multiple seeds", + "uri": "mongodb://example1.com,example2.com/?directConnection=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "directConnection=false", + "uri": "mongodb://example.com/?directConnection=false", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "directConnection": false + } + }, + { + "description": "directConnection=false with multiple seeds", + "uri": "mongodb://example1.com,example2.com/?directConnection=false", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "directConnection": false + } + }, + { + "description": "Invalid directConnection value", + "uri": "mongodb://example.com/?directConnection=invalid", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "loadBalanced=true", + "uri": "mongodb://example.com/?loadBalanced=true", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "loadBalanced": true + } + }, + { + "description": "loadBalanced=true with directConnection=false", + "uri": "mongodb://example.com/?loadBalanced=true&directConnection=false", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "loadBalanced": true, + "directConnection": false + } + }, + { + "description": "loadBalanced=false", + "uri": "mongodb://example.com/?loadBalanced=false", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "loadBalanced": false + } + }, + { + "description": "Invalid loadBalanced value", + "uri": "mongodb://example.com/?loadBalanced=1", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "loadBalanced=true with multiple hosts causes an error", + "uri": "mongodb://example1,example2/?loadBalanced=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "loadBalanced=true with directConnection=true causes an error", + "uri": "mongodb://example.com/?loadBalanced=true&directConnection=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "loadBalanced=true with replicaSet causes an error", + "uri": "mongodb://example.com/?loadBalanced=true&replicaSet=replset", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "timeoutMS=0", + "uri": "mongodb://example.com/?timeoutMS=0", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "timeoutMS": 0 + } + }, + { + "description": "Non-numeric timeoutMS causes a warning", + "uri": "mongodb://example.com/?timeoutMS=invalid", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "Too low timeoutMS causes a warning", + "uri": "mongodb://example.com/?timeoutMS=-2", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": {} } ] } diff --git a/test/uri_options/connection-pool-options.json b/test/uri_options/connection-pool-options.json index be401f55d5..118b2f6783 100644 --- a/test/uri_options/connection-pool-options.json +++ b/test/uri_options/connection-pool-options.json @@ -2,13 +2,16 @@ "tests": [ { "description": "Valid connection pool options are parsed correctly", - "uri": "mongodb://example.com/?maxIdleTimeMS=50000", + "uri": "mongodb://example.com/?maxIdleTimeMS=50000&maxPoolSize=5&minPoolSize=3&maxConnecting=1", "valid": true, "warning": false, "hosts": null, "auth": null, "options": { - "maxIdleTimeMS": 50000 + "maxIdleTimeMS": 50000, + "maxPoolSize": 5, + "minPoolSize": 3, + "maxConnecting": 1 } }, { @@ -28,6 +31,46 @@ "hosts": null, "auth": null, "options": {} + }, + { + "description": "maxPoolSize=0 does not error", + "uri": "mongodb://example.com/?maxPoolSize=0", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "maxPoolSize": 0 + } + }, + { + "description": "minPoolSize=0 does not error", + "uri": "mongodb://example.com/?minPoolSize=0", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "minPoolSize": 0 + } + }, + { + "description": "maxConnecting=0 causes a warning", + "uri": "mongodb://example.com/?maxConnecting=0", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "maxConnecting<0 causes a warning", + "uri": "mongodb://example.com/?maxConnecting=-1", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": {} } ] } diff --git a/test/uri_options/read-preference-options.json b/test/uri_options/read-preference-options.json index e62ce4fa75..cdac6a63c3 100644 --- a/test/uri_options/read-preference-options.json +++ b/test/uri_options/read-preference-options.json @@ -21,6 +21,21 @@ "maxStalenessSeconds": 120 } }, + { + "description": "Single readPreferenceTags is parsed as array of size one", + "uri": "mongodb://example.com/?readPreference=secondary&readPreferenceTags=dc:ny", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "readPreferenceTags": [ + { + "dc": "ny" + } + ] + } + }, { "description": "Invalid readPreferenceTags causes a warning", "uri": "mongodb://example.com/?readPreferenceTags=invalid", diff --git a/test/uri_options/sdam-options.json b/test/uri_options/sdam-options.json new file mode 100644 index 0000000000..673f5607ee --- /dev/null +++ b/test/uri_options/sdam-options.json @@ -0,0 +1,46 @@ +{ + "tests": [ + { + "description": "serverMonitoringMode=auto", + "uri": "mongodb://example.com/?serverMonitoringMode=auto", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "serverMonitoringMode": "auto" + } + }, + { + "description": "serverMonitoringMode=stream", + "uri": "mongodb://example.com/?serverMonitoringMode=stream", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "serverMonitoringMode": "stream" + } + }, + { + "description": "serverMonitoringMode=poll", + "uri": "mongodb://example.com/?serverMonitoringMode=poll", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "serverMonitoringMode": "poll" + } + }, + { + "description": "invalid serverMonitoringMode", + "uri": "mongodb://example.com/?serverMonitoringMode=invalid", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": {} + } + ] +} diff --git a/test/uri_options/srv-options.json b/test/uri_options/srv-options.json new file mode 100644 index 0000000000..ffc356f12f --- /dev/null +++ b/test/uri_options/srv-options.json @@ -0,0 +1,116 @@ +{ + "tests": [ + { + "description": "SRV URI with custom srvServiceName", + "uri": "mongodb+srv://test22.test.build.10gen.cc/?srvServiceName=customname", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "srvServiceName": "customname" + } + }, + { + "description": "Non-SRV URI with custom srvServiceName", + "uri": "mongodb://example.com/?srvServiceName=customname", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "SRV URI with srvMaxHosts", + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=2", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "srvMaxHosts": 2 + } + }, + { + "description": "SRV URI with negative integer for srvMaxHosts", + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=-1", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "SRV URI with invalid type for srvMaxHosts", + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=foo", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "Non-SRV URI with srvMaxHosts", + "uri": "mongodb://example.com/?srvMaxHosts=2", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "SRV URI with positive srvMaxHosts and replicaSet", + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=2&replicaSet=foo", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "SRV URI with positive srvMaxHosts and loadBalanced=true", + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=2&loadBalanced=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "SRV URI with positive srvMaxHosts and loadBalanced=false", + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=2&loadBalanced=false", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "loadBalanced": false, + "srvMaxHosts": 2 + } + }, + { + "description": "SRV URI with srvMaxHosts=0 and replicaSet", + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=0&replicaSet=foo", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "replicaSet": "foo", + "srvMaxHosts": 0 + } + }, + { + "description": "SRV URI with srvMaxHosts=0 and loadBalanced=true", + "uri": "mongodb+srv://test3.test.build.10gen.cc/?srvMaxHosts=0&loadBalanced=true", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "loadBalanced": true, + "srvMaxHosts": 0 + } + } + ] +} diff --git a/test/uri_options/tls-options.json b/test/uri_options/tls-options.json index 6db80ed623..8beaaddd86 100644 --- a/test/uri_options/tls-options.json +++ b/test/uri_options/tls-options.json @@ -2,7 +2,7 @@ "tests": [ { "description": "Valid required tls options are parsed correctly", - "uri": "mongodb://example.com/?tls=true&tlsCAFile=ca.pem&tlsCertificateKeyFile=cert.pem&tlsCertificateKeyFilePassword=hunter2", + "uri": "mongodb://example.com/?tls=true&tlsCAFile=ca.pem&tlsCertificateKeyFile=cert.pem", "valid": true, "warning": false, "hosts": null, @@ -10,7 +10,17 @@ "options": { "tls": true, "tlsCAFile": "ca.pem", - "tlsCertificateKeyFile": "cert.pem", + "tlsCertificateKeyFile": "cert.pem" + } + }, + { + "description": "Valid tlsCertificateKeyFilePassword is parsed correctly", + "uri": "mongodb://example.com/?tlsCertificateKeyFilePassword=hunter2", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { "tlsCertificateKeyFilePassword": "hunter2" } }, @@ -34,15 +44,6 @@ "tlsAllowInvalidCertificates": true } }, - { - "description": "Invalid tlsAllowInvalidCertificates causes a warning", - "uri": "mongodb://example.com/?tlsAllowInvalidCertificates=invalid", - "valid": true, - "warning": true, - "hosts": null, - "auth": null, - "options": {} - }, { "description": "tlsAllowInvalidHostnames is parsed correctly", "uri": "mongodb://example.com/?tlsAllowInvalidHostnames=true", @@ -75,8 +76,8 @@ } }, { - "description": "Invalid tlsAllowInsecure causes a warning", - "uri": "mongodb://example.com/?tlsAllowInsecure=invalid", + "description": "Invalid tlsInsecure causes a warning", + "uri": "mongodb://example.com/?tlsInsecure=invalid", "valid": true, "warning": true, "hosts": null, @@ -226,6 +227,414 @@ "hosts": null, "auth": null, "options": {} + }, + { + "description": "tlsDisableCertificateRevocationCheck can be set to true", + "uri": "mongodb://example.com/?tls=true&tlsDisableCertificateRevocationCheck=true", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "tls": true, + "tlsDisableCertificateRevocationCheck": true + } + }, + { + "description": "tlsDisableCertificateRevocationCheck can be set to false", + "uri": "mongodb://example.com/?tls=true&tlsDisableCertificateRevocationCheck=false", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "tls": true, + "tlsDisableCertificateRevocationCheck": false + } + }, + { + "description": "tlsAllowInvalidCertificates and tlsDisableCertificateRevocationCheck both present (and true) raises an error", + "uri": "mongodb://example.com/?tlsAllowInvalidCertificates=true&tlsDisableCertificateRevocationCheck=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsAllowInvalidCertificates=true and tlsDisableCertificateRevocationCheck=false raises an error", + "uri": "mongodb://example.com/?tlsAllowInvalidCertificates=true&tlsDisableCertificateRevocationCheck=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsAllowInvalidCertificates=false and tlsDisableCertificateRevocationCheck=true raises an error", + "uri": "mongodb://example.com/?tlsAllowInvalidCertificates=false&tlsDisableCertificateRevocationCheck=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsAllowInvalidCertificates and tlsDisableCertificateRevocationCheck both present (and false) raises an error", + "uri": "mongodb://example.com/?tlsAllowInvalidCertificates=false&tlsDisableCertificateRevocationCheck=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableCertificateRevocationCheck and tlsAllowInvalidCertificates both present (and true) raises an error", + "uri": "mongodb://example.com/?tlsDisableCertificateRevocationCheck=true&tlsAllowInvalidCertificates=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableCertificateRevocationCheck=true and tlsAllowInvalidCertificates=false raises an error", + "uri": "mongodb://example.com/?tlsDisableCertificateRevocationCheck=true&tlsAllowInvalidCertificates=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableCertificateRevocationCheck=false and tlsAllowInvalidCertificates=true raises an error", + "uri": "mongodb://example.com/?tlsDisableCertificateRevocationCheck=false&tlsAllowInvalidCertificates=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableCertificateRevocationCheck and tlsAllowInvalidCertificates both present (and false) raises an error", + "uri": "mongodb://example.com/?tlsDisableCertificateRevocationCheck=false&tlsAllowInvalidCertificates=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsInsecure and tlsDisableCertificateRevocationCheck both present (and true) raises an error", + "uri": "mongodb://example.com/?tlsInsecure=true&tlsDisableCertificateRevocationCheck=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsInsecure=true and tlsDisableCertificateRevocationCheck=false raises an error", + "uri": "mongodb://example.com/?tlsInsecure=true&tlsDisableCertificateRevocationCheck=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsInsecure=false and tlsDisableCertificateRevocationCheck=true raises an error", + "uri": "mongodb://example.com/?tlsInsecure=false&tlsDisableCertificateRevocationCheck=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsInsecure and tlsDisableCertificateRevocationCheck both present (and false) raises an error", + "uri": "mongodb://example.com/?tlsInsecure=false&tlsDisableCertificateRevocationCheck=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableCertificateRevocationCheck and tlsInsecure both present (and true) raises an error", + "uri": "mongodb://example.com/?tlsDisableCertificateRevocationCheck=true&tlsInsecure=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableCertificateRevocationCheck=true and tlsInsecure=false raises an error", + "uri": "mongodb://example.com/?tlsDisableCertificateRevocationCheck=true&tlsInsecure=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableCertificateRevocationCheck=false and tlsInsecure=true raises an error", + "uri": "mongodb://example.com/?tlsDisableCertificateRevocationCheck=false&tlsInsecure=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableCertificateRevocationCheck and tlsInsecure both present (and false) raises an error", + "uri": "mongodb://example.com/?tlsDisableCertificateRevocationCheck=false&tlsInsecure=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableCertificateRevocationCheck and tlsDisableOCSPEndpointCheck both present (and true) raises an error", + "uri": "mongodb://example.com/?tlsDisableCertificateRevocationCheck=true&tlsDisableOCSPEndpointCheck=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableCertificateRevocationCheck=true and tlsDisableOCSPEndpointCheck=false raises an error", + "uri": "mongodb://example.com/?tlsDisableCertificateRevocationCheck=true&tlsDisableOCSPEndpointCheck=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableCertificateRevocationCheck=false and tlsDisableOCSPEndpointCheck=true raises an error", + "uri": "mongodb://example.com/?tlsDisableCertificateRevocationCheck=false&tlsDisableOCSPEndpointCheck=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableCertificateRevocationCheck and tlsDisableOCSPEndpointCheck both present (and false) raises an error", + "uri": "mongodb://example.com/?tlsDisableCertificateRevocationCheck=false&tlsDisableOCSPEndpointCheck=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableOCSPEndpointCheck and tlsDisableCertificateRevocationCheck both present (and true) raises an error", + "uri": "mongodb://example.com/?tlsDisableOCSPEndpointCheck=true&tlsDisableCertificateRevocationCheck=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableOCSPEndpointCheck=true and tlsDisableCertificateRevocationCheck=false raises an error", + "uri": "mongodb://example.com/?tlsDisableOCSPEndpointCheck=true&tlsDisableCertificateRevocationCheck=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableOCSPEndpointCheck=false and tlsDisableCertificateRevocationCheck=true raises an error", + "uri": "mongodb://example.com/?tlsDisableOCSPEndpointCheck=false&tlsDisableCertificateRevocationCheck=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableOCSPEndpointCheck and tlsDisableCertificateRevocationCheck both present (and false) raises an error", + "uri": "mongodb://example.com/?tlsDisableOCSPEndpointCheck=false&tlsDisableCertificateRevocationCheck=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableOCSPEndpointCheck can be set to true", + "uri": "mongodb://example.com/?tls=true&tlsDisableOCSPEndpointCheck=true", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "tls": true, + "tlsDisableOCSPEndpointCheck": true + } + }, + { + "description": "tlsDisableOCSPEndpointCheck can be set to false", + "uri": "mongodb://example.com/?tls=true&tlsDisableOCSPEndpointCheck=false", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "tls": true, + "tlsDisableOCSPEndpointCheck": false + } + }, + { + "description": "tlsInsecure and tlsDisableOCSPEndpointCheck both present (and true) raises an error", + "uri": "mongodb://example.com/?tlsInsecure=true&tlsDisableOCSPEndpointCheck=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsInsecure=true and tlsDisableOCSPEndpointCheck=false raises an error", + "uri": "mongodb://example.com/?tlsInsecure=true&tlsDisableOCSPEndpointCheck=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsInsecure=false and tlsDisableOCSPEndpointCheck=true raises an error", + "uri": "mongodb://example.com/?tlsInsecure=false&tlsDisableOCSPEndpointCheck=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsInsecure and tlsDisableOCSPEndpointCheck both present (and false) raises an error", + "uri": "mongodb://example.com/?tlsInsecure=false&tlsDisableOCSPEndpointCheck=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableOCSPEndpointCheck and tlsInsecure both present (and true) raises an error", + "uri": "mongodb://example.com/?tlsDisableOCSPEndpointCheck=true&tlsInsecure=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableOCSPEndpointCheck=true and tlsInsecure=false raises an error", + "uri": "mongodb://example.com/?tlsDisableOCSPEndpointCheck=true&tlsInsecure=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableOCSPEndpointCheck=false and tlsInsecure=true raises an error", + "uri": "mongodb://example.com/?tlsDisableOCSPEndpointCheck=false&tlsInsecure=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableOCSPEndpointCheck and tlsInsecure both present (and false) raises an error", + "uri": "mongodb://example.com/?tlsDisableOCSPEndpointCheck=false&tlsInsecure=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsAllowInvalidCertificates and tlsDisableOCSPEndpointCheck both present (and true) raises an error", + "uri": "mongodb://example.com/?tlsAllowInvalidCertificates=true&tlsDisableOCSPEndpointCheck=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsAllowInvalidCertificates=true and tlsDisableOCSPEndpointCheck=false raises an error", + "uri": "mongodb://example.com/?tlsAllowInvalidCertificates=true&tlsDisableOCSPEndpointCheck=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsAllowInvalidCertificates=false and tlsDisableOCSPEndpointCheck=true raises an error", + "uri": "mongodb://example.com/?tlsAllowInvalidCertificates=false&tlsDisableOCSPEndpointCheck=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsAllowInvalidCertificates and tlsDisableOCSPEndpointCheck both present (and false) raises an error", + "uri": "mongodb://example.com/?tlsAllowInvalidCertificates=false&tlsDisableOCSPEndpointCheck=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableOCSPEndpointCheck and tlsAllowInvalidCertificates both present (and true) raises an error", + "uri": "mongodb://example.com/?tlsDisableOCSPEndpointCheck=true&tlsAllowInvalidCertificates=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableOCSPEndpointCheck=true and tlsAllowInvalidCertificates=false raises an error", + "uri": "mongodb://example.com/?tlsDisableOCSPEndpointCheck=true&tlsAllowInvalidCertificates=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableOCSPEndpointCheck=false and tlsAllowInvalidCertificates=true raises an error", + "uri": "mongodb://example.com/?tlsDisableOCSPEndpointCheck=false&tlsAllowInvalidCertificates=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableOCSPEndpointCheck and tlsAllowInvalidCertificates both present (and false) raises an error", + "uri": "mongodb://example.com/?tlsDisableOCSPEndpointCheck=false&tlsAllowInvalidCertificates=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} } ] } diff --git a/test/utils.py b/test/utils.py index 81c57b5aab..c8f9197c64 100644 --- a/test/utils.py +++ b/test/utils.py @@ -12,63 +12,60 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Utilities for testing pymongo -""" +"""Utilities for testing pymongo""" +from __future__ import annotations -import collections import contextlib +import copy import functools import os import re +import shutil import sys import threading import time +import unittest import warnings - -from collections import defaultdict +from collections import abc, defaultdict from functools import partial +from test import client_context, db_pwd, db_user +from typing import Any, List -from bson import json_util, py3compat +from bson import json_util from bson.objectid import ObjectId - -from pymongo import (MongoClient, - monitoring, read_preferences) +from bson.son import SON +from pymongo import MongoClient, monitoring, operations, read_preferences +from pymongo.collection import ReturnDocument +from pymongo.cursor import CursorType from pymongo.errors import ConfigurationError, OperationFailure -from pymongo.monitoring import _SENSITIVE_COMMANDS, ConnectionPoolListener -from pymongo.pool import PoolOptions +from pymongo.hello import HelloCompat +from pymongo.lock import _create_lock +from pymongo.monitoring import ( + _SENSITIVE_COMMANDS, + ConnectionCheckedInEvent, + ConnectionCheckedOutEvent, + ConnectionCheckOutFailedEvent, + ConnectionCheckOutStartedEvent, + ConnectionClosedEvent, + ConnectionCreatedEvent, + ConnectionReadyEvent, + PoolClearedEvent, + PoolClosedEvent, + PoolCreatedEvent, + PoolReadyEvent, +) +from pymongo.pool import _CancellationContext, _PoolGeneration from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference -from pymongo.server_selectors import (any_server_selector, - writable_server_selector) +from pymongo.server_selectors import any_server_selector, writable_server_selector +from pymongo.server_type import SERVER_TYPE +from pymongo.uri_parser import parse_uri from pymongo.write_concern import WriteConcern -from test import (client_context, - db_user, - db_pwd) - -IMPOSSIBLE_WRITE_CONCERN = WriteConcern(w=1000) - - -class WhiteListEventListener(monitoring.CommandListener): - - def __init__(self, *commands): - self.commands = set(commands) - self.results = defaultdict(list) - - def started(self, event): - if event.command_name in self.commands: - self.results['started'].append(event) - - def succeeded(self, event): - if event.command_name in self.commands: - self.results['succeeded'].append(event) - - def failed(self, event): - if event.command_name in self.commands: - self.results['failed'].append(event) +IMPOSSIBLE_WRITE_CONCERN = WriteConcern(w=50) -class CMAPListener(ConnectionPoolListener): +class BaseListener: def __init__(self): self.events = [] @@ -79,80 +76,164 @@ def add_event(self, event): self.events.append(event) def event_count(self, event_type): - return len([event for event in self.events[:] - if isinstance(event, event_type)]) + return len(self.events_by_type(event_type)) + + def events_by_type(self, event_type): + """Return the matching events by event class. + + event_type can be a single class or a tuple of classes. + """ + return self.matching(lambda e: isinstance(e, event_type)) + def matching(self, matcher): + """Return the matching events.""" + return [event for event in self.events[:] if matcher(event)] + + def wait_for_event(self, event, count): + """Wait for a number of events to be published, or fail.""" + wait_until(lambda: self.event_count(event) >= count, f"find {count} {event} event(s)") + + +class CMAPListener(BaseListener, monitoring.ConnectionPoolListener): def connection_created(self, event): + assert isinstance(event, ConnectionCreatedEvent) self.add_event(event) def connection_ready(self, event): + assert isinstance(event, ConnectionReadyEvent) self.add_event(event) def connection_closed(self, event): + assert isinstance(event, ConnectionClosedEvent) self.add_event(event) def connection_check_out_started(self, event): + assert isinstance(event, ConnectionCheckOutStartedEvent) self.add_event(event) def connection_check_out_failed(self, event): + assert isinstance(event, ConnectionCheckOutFailedEvent) self.add_event(event) def connection_checked_out(self, event): + assert isinstance(event, ConnectionCheckedOutEvent) self.add_event(event) def connection_checked_in(self, event): + assert isinstance(event, ConnectionCheckedInEvent) self.add_event(event) def pool_created(self, event): + assert isinstance(event, PoolCreatedEvent) + self.add_event(event) + + def pool_ready(self, event): + assert isinstance(event, PoolReadyEvent) self.add_event(event) def pool_cleared(self, event): + assert isinstance(event, PoolClearedEvent) self.add_event(event) def pool_closed(self, event): + assert isinstance(event, PoolClosedEvent) self.add_event(event) -class EventListener(monitoring.CommandListener): - +class EventListener(BaseListener, monitoring.CommandListener): def __init__(self): + super().__init__() self.results = defaultdict(list) - def started(self, event): - self.results['started'].append(event) + @property + def started_events(self) -> List[monitoring.CommandStartedEvent]: + return self.results["started"] - def succeeded(self, event): - self.results['succeeded'].append(event) + @property + def succeeded_events(self) -> List[monitoring.CommandSucceededEvent]: + return self.results["succeeded"] - def failed(self, event): - self.results['failed'].append(event) + @property + def failed_events(self) -> List[monitoring.CommandFailedEvent]: + return self.results["failed"] + + def started(self, event: monitoring.CommandStartedEvent) -> None: + self.started_events.append(event) + self.add_event(event) + + def succeeded(self, event: monitoring.CommandSucceededEvent) -> None: + self.succeeded_events.append(event) + self.add_event(event) + + def failed(self, event: monitoring.CommandFailedEvent) -> None: + self.failed_events.append(event) + self.add_event(event) - def started_command_names(self): + def started_command_names(self) -> List[str]: """Return list of command names started.""" - return [event.command_name for event in self.results['started']] + return [event.command_name for event in self.started_events] + + def reset(self) -> None: + """Reset the state of this listener.""" + self.results.clear() + super().reset() + + +class TopologyEventListener(monitoring.TopologyListener): + def __init__(self): + self.results = defaultdict(list) + + def closed(self, event): + self.results["closed"].append(event) + + def description_changed(self, event): + self.results["description_changed"].append(event) + + def opened(self, event): + self.results["opened"].append(event) def reset(self): """Reset the state of this listener.""" self.results.clear() +class AllowListEventListener(EventListener): + def __init__(self, *commands): + self.commands = set(commands) + super().__init__() + + def started(self, event): + if event.command_name in self.commands: + super().started(event) + + def succeeded(self, event): + if event.command_name in self.commands: + super().succeeded(event) + + def failed(self, event): + if event.command_name in self.commands: + super().failed(event) + + class OvertCommandListener(EventListener): """A CommandListener that ignores sensitive commands.""" + + ignore_list_collections = False + def started(self, event): if event.command_name.lower() not in _SENSITIVE_COMMANDS: - super(OvertCommandListener, self).started(event) + super().started(event) def succeeded(self, event): if event.command_name.lower() not in _SENSITIVE_COMMANDS: - super(OvertCommandListener, self).succeeded(event) + super().succeeded(event) def failed(self, event): if event.command_name.lower() not in _SENSITIVE_COMMANDS: - super(OvertCommandListener, self).failed(event) + super().failed(event) -class ServerAndTopologyEventListener(monitoring.ServerListener, - monitoring.TopologyListener): +class _ServerEventListener: """Listens to all events.""" def __init__(self): @@ -167,25 +248,44 @@ def description_changed(self, event): def closed(self, event): self.results.append(event) + def matching(self, matcher): + """Return the matching events.""" + results = self.results[:] + return [event for event in results if matcher(event)] -class HeartbeatEventListener(monitoring.ServerHeartbeatListener): - """Listens to only server heartbeat events.""" - - def __init__(self): + def reset(self): self.results = [] + +class ServerEventListener(_ServerEventListener, monitoring.ServerListener): + """Listens to Server events.""" + + +class ServerAndTopologyEventListener( # type: ignore[misc] + ServerEventListener, monitoring.TopologyListener +): + """Listens to Server and Topology events.""" + + +class HeartbeatEventListener(BaseListener, monitoring.ServerHeartbeatListener): + """Listens to only server heartbeat events.""" + def started(self, event): - self.results.append(event) + self.add_event(event) def succeeded(self, event): - self.results.append(event) + self.add_event(event) def failed(self, event): - self.results.append(event) + self.add_event(event) -class MockSocketInfo(object): - def close(self): +class MockConnection: + def __init__(self): + self.cancel_context = _CancellationContext() + self.more_to_come = False + + def close_conn(self, reason): pass def __enter__(self): @@ -195,23 +295,34 @@ def __exit__(self, exc_type, exc_val, exc_tb): pass -class MockPool(object): - def __init__(self, *args, **kwargs): - self.pool_id = 0 - self._lock = threading.Lock() - self.opts = PoolOptions() +class MockPool: + def __init__(self, address, options, handshake=True): + self.gen = _PoolGeneration() + self._lock = _create_lock() + self.opts = options + self.operation_count = 0 + self.conns = [] - def get_socket(self, all_credentials): - return MockSocketInfo() + def stale_generation(self, gen, service_id): + return self.gen.stale(gen, service_id) - def return_socket(self, *args, **kwargs): + def checkout(self, handler=None): + return MockConnection() + + def checkin(self, *args, **kwargs): pass - def _reset(self): + def _reset(self, service_id=None): with self._lock: - self.pool_id += 1 + self.gen.inc(service_id) - def reset(self): + def ready(self): + pass + + def reset(self, service_id=None): + self._reset() + + def reset_without_pause(self): self._reset() def close(self): @@ -220,19 +331,20 @@ def close(self): def update_is_writable(self, is_writable): pass - def remove_stale_sockets(self, reference_pool_id): + def remove_stale_sockets(self, *args, **kwargs): pass class ScenarioDict(dict): """Dict that returns {} for any unknown key, recursively.""" + def __init__(self, data): def convert(v): - if isinstance(v, collections.Mapping): + if isinstance(v, abc.Mapping): return ScenarioDict(v) - if isinstance(v, (py3compat.string_type, bytes)): + if isinstance(v, (str, bytes)): return v - if isinstance(v, collections.Sequence): + if isinstance(v, abc.Sequence): return [convert(item) for item in v] return v @@ -246,21 +358,19 @@ def __getitem__(self, item): return ScenarioDict({}) -class CompareType(object): - """Class that compares equal to any object of the given type.""" - def __init__(self, type): - self.type = type +class CompareType: + """Class that compares equal to any object of the given type(s).""" - def __eq__(self, other): - return isinstance(other, self.type) + def __init__(self, types): + self.types = types - def __ne__(self, other): - """Needed for Python 2.""" - return not self.__eq__(other) + def __eq__(self, other): + return isinstance(other, self.types) -class FunctionCallRecorder(object): +class FunctionCallRecorder: """Utility class to wrap a callable and record its invocations.""" + def __init__(self, function): self._function = function self._call_list = [] @@ -283,8 +393,9 @@ def call_count(self): return len(self._call_list) -class TestCreator(object): +class SpecTestCreator: """Class to create test cases from specifications.""" + def __init__(self, create_test, test_class, test_path): """Create a TestCreator object. @@ -298,74 +409,105 @@ def __init__(self, create_test, test_class, test_path): test case. - `test_path`: path to the directory containing the JSON files with the test specifications. - """ + """ self._create_test = create_test self._test_class = test_class self.test_path = test_path def _ensure_min_max_server_version(self, scenario_def, method): """Test modifier that enforces a version range for the server on a - test case.""" - if 'minServerVersion' in scenario_def: - min_ver = tuple( - int(elt) for - elt in scenario_def['minServerVersion'].split('.')) + test case. + """ + if "minServerVersion" in scenario_def: + min_ver = tuple(int(elt) for elt in scenario_def["minServerVersion"].split(".")) if min_ver is not None: method = client_context.require_version_min(*min_ver)(method) - if 'maxServerVersion' in scenario_def: - max_ver = tuple( - int(elt) for - elt in scenario_def['maxServerVersion'].split('.')) + if "maxServerVersion" in scenario_def: + max_ver = tuple(int(elt) for elt in scenario_def["maxServerVersion"].split(".")) if max_ver is not None: method = client_context.require_version_max(*max_ver)(method) + if "serverless" in scenario_def: + serverless = scenario_def["serverless"] + if serverless == "require": + serverless_satisfied = client_context.serverless + elif serverless == "forbid": + serverless_satisfied = not client_context.serverless + else: # unset or "allow" + serverless_satisfied = True + method = unittest.skipUnless( + serverless_satisfied, "Serverless requirement not satisfied" + )(method) + return method @staticmethod def valid_topology(run_on_req): return client_context.is_topology_type( - run_on_req.get('topology', ['single', 'replicaset', 'sharded'])) + run_on_req.get("topology", ["single", "replicaset", "sharded", "load-balanced"]) + ) @staticmethod def min_server_version(run_on_req): - version = run_on_req.get('minServerVersion') + version = run_on_req.get("minServerVersion") if version: - min_ver = tuple(int(elt) for elt in version.split('.')) + min_ver = tuple(int(elt) for elt in version.split(".")) return client_context.version >= min_ver return True @staticmethod def max_server_version(run_on_req): - version = run_on_req.get('maxServerVersion') + version = run_on_req.get("maxServerVersion") if version: - max_ver = tuple(int(elt) for elt in version.split('.')) + max_ver = tuple(int(elt) for elt in version.split(".")) return client_context.version <= max_ver return True + @staticmethod + def valid_auth_enabled(run_on_req): + if "authEnabled" in run_on_req: + if run_on_req["authEnabled"]: + return client_context.auth_enabled + return not client_context.auth_enabled + return True + + @staticmethod + def serverless_ok(run_on_req): + serverless = run_on_req["serverless"] + if serverless == "require": + return client_context.serverless + elif serverless == "forbid": + return not client_context.serverless + else: # unset or "allow" + return True + def should_run_on(self, scenario_def): - run_on = scenario_def.get('runOn', []) + run_on = scenario_def.get("runOn", []) if not run_on: # Always run these tests. return True for req in run_on: - if (self.valid_topology(req) and - self.min_server_version(req) and - self.max_server_version(req)): + if ( + self.valid_topology(req) + and self.min_server_version(req) + and self.max_server_version(req) + and self.valid_auth_enabled(req) + and self.serverless_ok(req) + ): return True return False def ensure_run_on(self, scenario_def, method): """Test modifier that enforces a 'runOn' on a test case.""" return client_context._require( - lambda: self.should_run_on(scenario_def), - "runOn not satisfied", - method) + lambda: self.should_run_on(scenario_def), "runOn not satisfied", method + ) def tests(self, scenario_def): """Allow CMAP spec test to override the location of test.""" - return scenario_def['tests'] + return scenario_def["tests"] def create_tests(self): for dirpath, _, filenames in os.walk(self.test_path): @@ -377,75 +519,81 @@ def create_tests(self): # dates. opts = json_util.JSONOptions(tz_aware=False) scenario_def = ScenarioDict( - json_util.loads(scenario_stream.read(), - json_options=opts)) + json_util.loads(scenario_stream.read(), json_options=opts) + ) test_type = os.path.splitext(filename)[0] # Construct test from scenario. for test_def in self.tests(scenario_def): - test_name = 'test_%s_%s_%s' % ( + test_name = "test_{}_{}_{}".format( dirname, - test_type.replace("-", "_").replace('.', '_'), - str(test_def['description'].replace(" ", "_").replace( - '.', '_'))) + test_type.replace("-", "_").replace(".", "_"), + str(test_def["description"].replace(" ", "_").replace(".", "_")), + ) - new_test = self._create_test( - scenario_def, test_def, test_name) - new_test = self._ensure_min_max_server_version( - scenario_def, new_test) - new_test = self.ensure_run_on( - scenario_def, new_test) + new_test = self._create_test(scenario_def, test_def, test_name) + new_test = self._ensure_min_max_server_version(scenario_def, new_test) + new_test = self.ensure_run_on(scenario_def, new_test) new_test.__name__ = test_name setattr(self._test_class, new_test.__name__, new_test) -def _connection_string(h, authenticate): - if h.startswith("mongodb://"): +def _connection_string(h): + if h.startswith(("mongodb://", "mongodb+srv://")): return h - elif client_context.auth_enabled and authenticate: - return "mongodb://%s:%s@%s" % (db_user, db_pwd, str(h)) - else: - return "mongodb://%s" % (str(h),) + return f"mongodb://{h!s}" -def _mongo_client(host, port, authenticate=True, direct=False, **kwargs): +def _mongo_client(host, port, authenticate=True, directConnection=None, **kwargs): """Create a new client over SSL/TLS if necessary.""" host = host or client_context.host port = port or client_context.port - client_options = client_context.default_client_options.copy() - if client_context.replica_set_name and not direct: - client_options['replicaSet'] = client_context.replica_set_name + client_options: dict = client_context.default_client_options.copy() + if client_context.replica_set_name and not directConnection: + client_options["replicaSet"] = client_context.replica_set_name + if directConnection is not None: + client_options["directConnection"] = directConnection client_options.update(kwargs) - client = MongoClient(_connection_string(host, authenticate), port, - **client_options) + uri = _connection_string(host) + if client_context.auth_enabled and authenticate: + # Only add the default username or password if one is not provided. + res = parse_uri(uri) + if ( + not res["username"] + and not res["password"] + and "username" not in client_options + and "password" not in client_options + ): + client_options["username"] = db_user + client_options["password"] = db_pwd - return client + return MongoClient(uri, port, **client_options) -def single_client_noauth(h=None, p=None, **kwargs): +def single_client_noauth(h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[dict]: """Make a direct connection. Don't authenticate.""" - return _mongo_client(h, p, authenticate=False, direct=True, **kwargs) + return _mongo_client(h, p, authenticate=False, directConnection=True, **kwargs) -def single_client(h=None, p=None, **kwargs): +def single_client(h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[dict]: """Make a direct connection, and authenticate if necessary.""" - return _mongo_client(h, p, direct=True, **kwargs) + return _mongo_client(h, p, directConnection=True, **kwargs) -def rs_client_noauth(h=None, p=None, **kwargs): +def rs_client_noauth(h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[dict]: """Connect to the replica set. Don't authenticate.""" return _mongo_client(h, p, authenticate=False, **kwargs) -def rs_client(h=None, p=None, **kwargs): +def rs_client(h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[dict]: """Connect to the replica set and authenticate if necessary.""" return _mongo_client(h, p, **kwargs) -def rs_or_single_client_noauth(h=None, p=None, **kwargs): +def rs_or_single_client_noauth(h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[dict]: """Connect to the replica set if there is one, otherwise the standalone. Like rs_or_single_client, but does not authenticate. @@ -453,7 +601,7 @@ def rs_or_single_client_noauth(h=None, p=None, **kwargs): return _mongo_client(h, p, authenticate=False, **kwargs) -def rs_or_single_client(h=None, p=None, **kwargs): +def rs_or_single_client(h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[Any]: """Connect to the replica set if there is one, otherwise the standalone. Authenticates if necessary. @@ -461,7 +609,7 @@ def rs_or_single_client(h=None, p=None, **kwargs): return _mongo_client(h, p, **kwargs) -def ensure_all_connected(client): +def ensure_all_connected(client: MongoClient) -> None: """Ensure that the client's connection pool has socket connections to all members of a replica set. Raises ConfigurationError when called with a non-replica set client. @@ -469,19 +617,30 @@ def ensure_all_connected(client): Depending on the use-case, the caller may need to clear any event listeners that are configured on the client. """ - ismaster = client.admin.command("isMaster") - if 'setName' not in ismaster: + hello: dict = client.admin.command(HelloCompat.LEGACY_CMD) + if "setName" not in hello: raise ConfigurationError("cluster is not a replica set") - target_host_list = set(ismaster['hosts']) - connected_host_list = set([ismaster['me']]) - admindb = client.get_database('admin') + target_host_list = set(hello["hosts"] + hello.get("passives", [])) + connected_host_list = {hello["me"]} - # Run isMaster until we have connected to each host at least once. - while connected_host_list != target_host_list: - ismaster = admindb.command("isMaster", - read_preference=ReadPreference.SECONDARY) - connected_host_list.update([ismaster["me"]]) + # Run hello until we have connected to each host at least once. + def discover(): + i = 0 + while i < 100 and connected_host_list != target_host_list: + hello: dict = client.admin.command( + HelloCompat.LEGACY_CMD, read_preference=ReadPreference.SECONDARY + ) + connected_host_list.update([hello["me"]]) + i += 1 + return connected_host_list + + try: + wait_until(lambda: target_host_list == discover(), "connected to all hosts") + except AssertionError as exc: + raise AssertionError( + f"{exc}, {connected_host_list} != {target_host_list}, {client.topology_description}" + ) def one(s): @@ -497,19 +656,19 @@ def oid_generated_on_process(oid): def delay(sec): - return '''function() { sleep(%f * 1000); return true; }''' % sec + return """function() { sleep(%f * 1000); return true; }""" % sec def get_command_line(client): - command_line = client.admin.command('getCmdLineOpts') - assert command_line['ok'] == 1, "getCmdLineOpts() failed" + command_line = client.admin.command("getCmdLineOpts") + assert command_line["ok"] == 1, "getCmdLineOpts() failed" return command_line def camel_to_snake(camel): # Regex to convert CamelCase to snake_case. - snake = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camel) - return re.sub('([a-z0-9])([A-Z])', r'\1_\2', snake).lower() + snake = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", camel) + return re.sub("([a-z0-9])([A-Z])", r"\1_\2", snake).lower() def camel_to_upper_camel(camel): @@ -523,18 +682,23 @@ def camel_to_snake_args(arguments): return arguments +def snake_to_camel(snake): + # Regex to convert snake_case to lowerCamelCase. + return re.sub(r"_([a-z])", lambda m: m.group(1).upper(), snake) + + def parse_collection_options(opts): - if 'readPreference' in opts: - opts['read_preference'] = parse_read_preference( - opts.pop('readPreference')) + if "readPreference" in opts: + opts["read_preference"] = parse_read_preference(opts.pop("readPreference")) + + if "writeConcern" in opts: + opts["write_concern"] = WriteConcern(**dict(opts.pop("writeConcern"))) - if 'writeConcern' in opts: - opts['write_concern'] = WriteConcern( - **dict(opts.pop('writeConcern'))) + if "readConcern" in opts: + opts["read_concern"] = ReadConcern(**dict(opts.pop("readConcern"))) - if 'readConcern' in opts: - opts['read_concern'] = ReadConcern( - **dict(opts.pop('readConcern'))) + if "timeoutMS" in opts: + opts["timeout"] = int(opts.pop("timeoutMS")) / 1000.0 return opts @@ -546,11 +710,11 @@ def server_started_with_option(client, cmdline_opt, config_opt): - `config_opt`: The config file option (i.e. nojournal) """ command_line = get_command_line(client) - if 'parsed' in command_line: - parsed = command_line['parsed'] + if "parsed" in command_line: + parsed = command_line["parsed"] if config_opt in parsed: return parsed[config_opt] - argv = command_line['argv'] + argv = command_line["argv"] return cmdline_opt in argv @@ -558,60 +722,38 @@ def server_started_with_auth(client): try: command_line = get_command_line(client) except OperationFailure as e: - msg = e.details.get('errmsg', '') - if e.code == 13 or 'unauthorized' in msg or 'login' in msg: + assert e.details is not None + msg = e.details.get("errmsg", "") + if e.code == 13 or "unauthorized" in msg or "login" in msg: # Unauthorized. return True raise # MongoDB >= 2.0 - if 'parsed' in command_line: - parsed = command_line['parsed'] + if "parsed" in command_line: + parsed = command_line["parsed"] # MongoDB >= 2.6 - if 'security' in parsed: - security = parsed['security'] + if "security" in parsed: + security = parsed["security"] # >= rc3 - if 'authorization' in security: - return security['authorization'] == 'enabled' + if "authorization" in security: + return security["authorization"] == "enabled" # < rc3 - return security.get('auth', False) or bool(security.get('keyFile')) - return parsed.get('auth', False) or bool(parsed.get('keyFile')) + return security.get("auth", False) or bool(security.get("keyFile")) + return parsed.get("auth", False) or bool(parsed.get("keyFile")) # Legacy - argv = command_line['argv'] - return '--auth' in argv or '--keyFile' in argv - - -def server_started_with_nojournal(client): - command_line = get_command_line(client) - - # MongoDB 2.6. - if 'parsed' in command_line: - parsed = command_line['parsed'] - if 'storage' in parsed: - storage = parsed['storage'] - if 'journal' in storage: - return not storage['journal']['enabled'] - - return server_started_with_option(client, '--nojournal', 'nojournal') - - -def server_is_master_with_slave(client): - command_line = get_command_line(client) - if 'parsed' in command_line: - return command_line['parsed'].get('master', False) - return '--master' in command_line['argv'] + argv = command_line["argv"] + return "--auth" in argv or "--keyFile" in argv def drop_collections(db): # Drop all non-system collections in this database. - for coll in db.list_collection_names( - filter={"name": {"$regex": r"^(?!system\.)"}}): + for coll in db.list_collection_names(filter={"name": {"$regex": r"^(?!system\.)"}}): db.drop_collection(coll) def remove_all_users(db): - db.command("dropAllUsersFromDatabase", 1, - writeConcern={"w": client_context.w}) + db.command("dropAllUsersFromDatabase", 1, writeConcern={"w": client_context.w}) def joinall(threads): @@ -624,10 +766,10 @@ def joinall(threads): def connected(client): """Convenience to wait for a newly-constructed client to connect.""" with warnings.catch_warnings(): - # Ignore warning that "ismaster" is always routed to primary even + # Ignore warning that ping is always routed to primary even # if client's read preference isn't PRIMARY. warnings.simplefilter("ignore", UserWarning) - client.admin.command('ismaster') # Force connection. + client.admin.command("ping") # Force connection. return client @@ -646,7 +788,7 @@ def wait_until(predicate, success_description, timeout=10): Returns the predicate's first true value. """ start = time.time() - interval = min(float(timeout)/100, 0.1) + interval = min(float(timeout) / 100, 0.1) while True: retval = predicate() if retval: @@ -658,9 +800,19 @@ def wait_until(predicate, success_description, timeout=10): time.sleep(interval) +def repl_set_step_down(client, **kwargs): + """Run replSetStepDown, first unfreezing a secondary with replSetFreeze.""" + cmd = SON([("replSetStepDown", 1)]) + cmd.update(kwargs) + + # Unfreeze a secondary to ensure a speedy election. + client.admin.command("replSetFreeze", 0, read_preference=ReadPreference.SECONDARY) + client.admin.command(cmd) + + def is_mongos(client): - res = client.admin.command('ismaster') - return res.get('msg', '') == 'isdbgrid' + res = client.admin.command(HelloCompat.LEGACY_CMD) + return res.get("msg", "") == "isdbgrid" def assertRaisesExactly(cls, fn, *args, **kwargs): @@ -672,8 +824,7 @@ def assertRaisesExactly(cls, fn, *args, **kwargs): try: fn(*args, **kwargs) except Exception as e: - assert e.__class__ == cls, "got %s, expected %s" % ( - e.__class__.__name__, cls.__name__) + assert e.__class__ == cls, f"got {e.__class__.__name__}, expected {cls.__name__}" else: raise AssertionError("%s not raised" % cls) @@ -688,6 +839,7 @@ def _ignore_deprecations(): def ignore_deprecations(wrapped=None): """A context manager or a decorator.""" if wrapped: + @functools.wraps(wrapped) def wrapper(*args, **kwargs): with _ignore_deprecations(): @@ -699,8 +851,7 @@ def wrapper(*args, **kwargs): return _ignore_deprecations() -class DeprecationFilter(object): - +class DeprecationFilter: def __init__(self, action="ignore"): """Start filtering deprecations.""" self.warn_context = warnings.catch_warnings() @@ -709,8 +860,8 @@ def __init__(self, action="ignore"): def stop(self): """Stop filtering deprecations.""" - self.warn_context.__exit__() - self.warn_context = None + self.warn_context.__exit__() # type: ignore + self.warn_context = None # type: ignore def get_pool(client): @@ -722,9 +873,7 @@ def get_pool(client): def get_pools(client): """Get all pools.""" - return [ - server.pool for server in - client._get_topology().select_servers(any_server_selector)] + return [server.pool for server in client._get_topology().select_servers(any_server_selector)] # Constants for run_threads and lazy_client_trial. @@ -753,23 +902,13 @@ def run_threads(collection, target): @contextlib.contextmanager def frequent_thread_switches(): """Make concurrency bugs more likely to manifest.""" - interval = None - if not sys.platform.startswith('java'): - if hasattr(sys, 'getswitchinterval'): - interval = sys.getswitchinterval() - sys.setswitchinterval(1e-6) - else: - interval = sys.getcheckinterval() - sys.setcheckinterval(1) + interval = sys.getswitchinterval() + sys.setswitchinterval(1e-6) try: yield finally: - if not sys.platform.startswith('java'): - if hasattr(sys, 'setswitchinterval'): - sys.setswitchinterval(interval) - else: - sys.setcheckinterval(interval) + sys.setswitchinterval(interval) def lazy_client_trial(reset, target, test, get_client): @@ -786,7 +925,7 @@ def lazy_client_trial(reset, target, test, get_client): collection = client_context.client.pymongo_test.test with frequent_thread_switches(): - for i in range(NTRIALS): + for _i in range(NTRIALS): reset(collection) lazy_client = get_client() lazy_collection = lazy_client.pymongo_test.test @@ -796,57 +935,51 @@ def lazy_client_trial(reset, target, test, get_client): def gevent_monkey_patched(): """Check if gevent's monkey patching is active.""" - # In Python 3.6 importing gevent.socket raises an ImportWarning. - with warnings.catch_warnings(): - warnings.simplefilter("ignore", ImportWarning) - try: - import socket - import gevent.socket - return socket.socket is gevent.socket.socket - except ImportError: - return False + try: + import socket + import gevent.socket -def eventlet_monkey_patched(): - """Check if eventlet's monkey patching is active.""" - try: - import threading - import eventlet - return (threading.current_thread.__module__ == - 'eventlet.green.threading') + return socket.socket is gevent.socket.socket except ImportError: return False +def eventlet_monkey_patched(): + """Check if eventlet's monkey patching is active.""" + import threading + + return threading.current_thread.__module__ == "eventlet.green.threading" + + def is_greenthread_patched(): return gevent_monkey_patched() or eventlet_monkey_patched() def disable_replication(client): - """Disable replication on all secondaries, requires MongoDB 3.2.""" + """Disable replication on all secondaries.""" for host, port in client.secondaries: secondary = single_client(host, port) - secondary.admin.command('configureFailPoint', 'stopReplProducer', - mode='alwaysOn') + secondary.admin.command("configureFailPoint", "stopReplProducer", mode="alwaysOn") def enable_replication(client): - """Enable replication on all secondaries, requires MongoDB 3.2.""" + """Enable replication on all secondaries.""" for host, port in client.secondaries: secondary = single_client(host, port) - secondary.admin.command('configureFailPoint', 'stopReplProducer', - mode='off') + secondary.admin.command("configureFailPoint", "stopReplProducer", mode="off") class ExceptionCatchingThread(threading.Thread): """A thread that stores any exception encountered from run().""" + def __init__(self, *args, **kwargs): self.exc = None - super(ExceptionCatchingThread, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) def run(self): try: - super(ExceptionCatchingThread, self).run() + super().run() except BaseException as exc: self.exc = exc raise @@ -854,10 +987,169 @@ def run(self): def parse_read_preference(pref): # Make first letter lowercase to match read_pref's modes. - mode_string = pref.get('mode', 'primary') + mode_string = pref.get("mode", "primary") mode_string = mode_string[:1].lower() + mode_string[1:] mode = read_preferences.read_pref_mode_from_name(mode_string) - max_staleness = pref.get('maxStalenessSeconds', -1) - tag_sets = pref.get('tag_sets') + max_staleness = pref.get("maxStalenessSeconds", -1) + tag_sets = pref.get("tag_sets") return read_preferences.make_read_preference( - mode, tag_sets=tag_sets, max_staleness=max_staleness) \ No newline at end of file + mode, tag_sets=tag_sets, max_staleness=max_staleness + ) + + +def server_name_to_type(name): + """Convert a ServerType name to the corresponding value. For SDAM tests.""" + # Special case, some tests in the spec include the PossiblePrimary + # type, but only single-threaded drivers need that type. We call + # possible primaries Unknown. + if name == "PossiblePrimary": + return SERVER_TYPE.Unknown + return getattr(SERVER_TYPE, name) + + +def cat_files(dest, *sources): + """Cat multiple files into dest.""" + with open(dest, "wb") as fdst: + for src in sources: + with open(src, "rb") as fsrc: + shutil.copyfileobj(fsrc, fdst) + + +@contextlib.contextmanager +def assertion_context(msg): + """A context manager that adds info to an assertion failure.""" + try: + yield + except AssertionError as exc: + raise AssertionError(f"{msg}: {exc}") + + +def parse_spec_options(opts): + if "readPreference" in opts: + opts["read_preference"] = parse_read_preference(opts.pop("readPreference")) + + if "writeConcern" in opts: + opts["write_concern"] = WriteConcern(**dict(opts.pop("writeConcern"))) + + if "readConcern" in opts: + opts["read_concern"] = ReadConcern(**dict(opts.pop("readConcern"))) + + if "timeoutMS" in opts: + assert isinstance(opts["timeoutMS"], int) + opts["timeout"] = int(opts.pop("timeoutMS")) / 1000.0 + + if "maxTimeMS" in opts: + opts["max_time_ms"] = opts.pop("maxTimeMS") + + if "maxCommitTimeMS" in opts: + opts["max_commit_time_ms"] = opts.pop("maxCommitTimeMS") + + if "hint" in opts: + hint = opts.pop("hint") + if not isinstance(hint, str): + hint = list(hint.items()) + opts["hint"] = hint + + # Properly format 'hint' arguments for the Bulk API tests. + if "requests" in opts: + reqs = opts.pop("requests") + for req in reqs: + if "name" in req: + # CRUD v2 format + args = req.pop("arguments", {}) + if "hint" in args: + hint = args.pop("hint") + if not isinstance(hint, str): + hint = list(hint.items()) + args["hint"] = hint + req["arguments"] = args + else: + # Unified test format + bulk_model, spec = next(iter(req.items())) + if "hint" in spec: + hint = spec.pop("hint") + if not isinstance(hint, str): + hint = list(hint.items()) + spec["hint"] = hint + opts["requests"] = reqs + + return dict(opts) + + +def prepare_spec_arguments(spec, arguments, opname, entity_map, with_txn_callback): + for arg_name in list(arguments): + c2s = camel_to_snake(arg_name) + # PyMongo accepts sort as list of tuples. + if arg_name == "sort": + sort_dict = arguments[arg_name] + arguments[arg_name] = list(sort_dict.items()) + # Named "key" instead not fieldName. + if arg_name == "fieldName": + arguments["key"] = arguments.pop(arg_name) + # Aggregate uses "batchSize", while find uses batch_size. + elif (arg_name == "batchSize" or arg_name == "allowDiskUse") and opname == "aggregate": + continue + elif arg_name == "timeoutMode": + raise unittest.SkipTest("PyMongo does not support timeoutMode") + # Requires boolean returnDocument. + elif arg_name == "returnDocument": + arguments[c2s] = getattr(ReturnDocument, arguments.pop(arg_name).upper()) + elif c2s == "requests": + # Parse each request into a bulk write model. + requests = [] + for request in arguments["requests"]: + if "name" in request: + # CRUD v2 format + bulk_model = camel_to_upper_camel(request["name"]) + bulk_class = getattr(operations, bulk_model) + bulk_arguments = camel_to_snake_args(request["arguments"]) + else: + # Unified test format + bulk_model, spec = next(iter(request.items())) + bulk_class = getattr(operations, camel_to_upper_camel(bulk_model)) + bulk_arguments = camel_to_snake_args(spec) + requests.append(bulk_class(**dict(bulk_arguments))) + arguments["requests"] = requests + elif arg_name == "session": + arguments["session"] = entity_map[arguments["session"]] + elif opname == "open_download_stream" and arg_name == "id": + arguments["file_id"] = arguments.pop(arg_name) + elif opname not in ("find", "find_one") and c2s == "max_time_ms": + # find is the only method that accepts snake_case max_time_ms. + # All other methods take kwargs which must use the server's + # camelCase maxTimeMS. See PYTHON-1855. + arguments["maxTimeMS"] = arguments.pop("max_time_ms") + elif opname == "with_transaction" and arg_name == "callback": + if "operations" in arguments[arg_name]: + # CRUD v2 format + callback_ops = arguments[arg_name]["operations"] + else: + # Unified test format + callback_ops = arguments[arg_name] + arguments["callback"] = lambda _: with_txn_callback(copy.deepcopy(callback_ops)) + elif opname == "drop_collection" and arg_name == "collection": + arguments["name_or_collection"] = arguments.pop(arg_name) + elif opname == "create_collection": + if arg_name == "collection": + arguments["name"] = arguments.pop(arg_name) + arguments["check_exists"] = False + # Any other arguments to create_collection are passed through + # **kwargs. + elif opname == "create_index" and arg_name == "keys": + arguments["keys"] = list(arguments.pop(arg_name).items()) + elif opname == "drop_index" and arg_name == "name": + arguments["index_or_name"] = arguments.pop(arg_name) + elif opname == "rename" and arg_name == "to": + arguments["new_name"] = arguments.pop(arg_name) + elif opname == "rename" and arg_name == "dropTarget": + arguments["dropTarget"] = arguments.pop(arg_name) + elif arg_name == "cursorType": + cursor_type = arguments.pop(arg_name) + if cursor_type == "tailable": + arguments["cursor_type"] = CursorType.TAILABLE + elif cursor_type == "tailableAwait": + arguments["cursor_type"] = CursorType.TAILABLE + else: + raise AssertionError(f"Unsupported cursorType: {cursor_type}") + else: + arguments[c2s] = arguments.pop(arg_name) diff --git a/test/utils_selection_tests.py b/test/utils_selection_tests.py index ad17166807..7952a2862d 100644 --- a/test/utils_selection_tests.py +++ b/test/utils_selection_tests.py @@ -13,6 +13,7 @@ # limitations under the License. """Utilities for testing Server Selection and Max Staleness.""" +from __future__ import annotations import datetime import os @@ -20,50 +21,37 @@ sys.path[0:0] = [""] +from test import unittest +from test.pymongo_mocks import DummyMonitor +from test.utils import MockPool, parse_read_preference + from bson import json_util -from pymongo.common import clean_node, HEARTBEAT_FREQUENCY +from pymongo.common import HEARTBEAT_FREQUENCY, clean_node from pymongo.errors import AutoReconnect, ConfigurationError -from pymongo.ismaster import IsMaster +from pymongo.hello import Hello, HelloCompat from pymongo.server_description import ServerDescription -from pymongo.settings import TopologySettings from pymongo.server_selectors import writable_server_selector +from pymongo.settings import TopologySettings from pymongo.topology import Topology -from test import unittest -from test.utils import MockPool, parse_read_preference - - -class MockMonitor(object): - def __init__(self, server_description, topology, pool, topology_settings): - pass - - def open(self): - pass - - def request_check(self): - pass - - def close(self): - pass def get_addresses(server_list): seeds = [] hosts = [] for server in server_list: - seeds.append(clean_node(server['address'])) - hosts.append(server['address']) + seeds.append(clean_node(server["address"])) + hosts.append(server["address"]) return seeds, hosts def make_last_write_date(server): - epoch = datetime.datetime.utcfromtimestamp(0) - millis = server.get('lastWrite', {}).get('lastWriteDate') + epoch = datetime.datetime.fromtimestamp(0, tz=datetime.timezone.utc).replace(tzinfo=None) + millis = server.get("lastWrite", {}).get("lastWriteDate") if millis: diff = ((millis % 1000) + 1000) % 1000 seconds = (millis - diff) / 1000 micros = diff * 1000 - return epoch + datetime.timedelta( - seconds=seconds, microseconds=micros) + return epoch + datetime.timedelta(seconds=seconds, microseconds=micros) else: # "Unknown" server. return epoch @@ -71,104 +59,122 @@ def make_last_write_date(server): def make_server_description(server, hosts): """Make a ServerDescription from server info in a JSON test.""" - server_type = server['type'] - if server_type == "Unknown": - return ServerDescription(clean_node(server['address']), IsMaster({})) + server_type = server["type"] + if server_type in ("Unknown", "PossiblePrimary"): + return ServerDescription(clean_node(server["address"]), Hello({})) - ismaster_response = {'ok': True, 'hosts': hosts} - if server_type != "Standalone" and server_type != "Mongos": - ismaster_response['setName'] = "rs" + hello_response = {"ok": True, "hosts": hosts} + if server_type not in ("Standalone", "Mongos", "RSGhost"): + hello_response["setName"] = "rs" if server_type == "RSPrimary": - ismaster_response['ismaster'] = True + hello_response[HelloCompat.LEGACY_CMD] = True elif server_type == "RSSecondary": - ismaster_response['secondary'] = True + hello_response["secondary"] = True elif server_type == "Mongos": - ismaster_response['msg'] = 'isdbgrid' + hello_response["msg"] = "isdbgrid" + elif server_type == "RSGhost": + hello_response["isreplicaset"] = True + elif server_type == "RSArbiter": + hello_response["arbiterOnly"] = True - ismaster_response['lastWrite'] = { - 'lastWriteDate': make_last_write_date(server) - } + hello_response["lastWrite"] = {"lastWriteDate": make_last_write_date(server)} - for field in 'maxWireVersion', 'tags', 'idleWritePeriodMillis': + for field in "maxWireVersion", "tags", "idleWritePeriodMillis": if field in server: - ismaster_response[field] = server[field] + hello_response[field] = server[field] - ismaster_response.setdefault('maxWireVersion', 6) + hello_response.setdefault("maxWireVersion", 6) # Sets _last_update_time to now. - sd = ServerDescription(clean_node(server['address']), - IsMaster(ismaster_response), - round_trip_time=server['avg_rtt_ms'] / 1000.0) + sd = ServerDescription( + clean_node(server["address"]), + Hello(hello_response), + round_trip_time=server["avg_rtt_ms"] / 1000.0, + ) - if 'lastUpdateTime' in server: - sd._last_update_time = server['lastUpdateTime'] / 1000.0 # ms to sec. + if "lastUpdateTime" in server: + sd._last_update_time = server["lastUpdateTime"] / 1000.0 # ms to sec. return sd def get_topology_type_name(scenario_def): - td = scenario_def['topology_description'] - name = td['type'] - if name == 'Unknown': + td = scenario_def["topology_description"] + name = td["type"] + if name == "Unknown": # PyMongo never starts a topology in type Unknown. - return 'Sharded' if len(td['servers']) > 1 else 'Single' + return "Sharded" if len(td["servers"]) > 1 else "Single" else: return name def get_topology_settings_dict(**kwargs): - settings = dict( - monitor_class=MockMonitor, - heartbeat_frequency=HEARTBEAT_FREQUENCY, - pool_class=MockPool - ) + settings = { + "monitor_class": DummyMonitor, + "heartbeat_frequency": HEARTBEAT_FREQUENCY, + "pool_class": MockPool, + } settings.update(kwargs) return settings -def create_test(scenario_def): - def run_scenario(self): - # Initialize topologies. - if 'heartbeatFrequencyMS' in scenario_def: - frequency = int(scenario_def['heartbeatFrequencyMS']) / 1000.0 - else: - frequency = HEARTBEAT_FREQUENCY +def create_topology(scenario_def, **kwargs): + # Initialize topologies. + if "heartbeatFrequencyMS" in scenario_def: + frequency = int(scenario_def["heartbeatFrequencyMS"]) / 1000.0 + else: + frequency = HEARTBEAT_FREQUENCY + + seeds, hosts = get_addresses(scenario_def["topology_description"]["servers"]) - seeds, hosts = get_addresses( - scenario_def['topology_description']['servers']) + topology_type = get_topology_type_name(scenario_def) + if topology_type == "LoadBalanced": + kwargs.setdefault("load_balanced", True) + # Force topology description to ReplicaSet + elif topology_type in ["ReplicaSetNoPrimary", "ReplicaSetWithPrimary"]: + kwargs.setdefault("replica_set_name", "rs") + settings = get_topology_settings_dict(heartbeat_frequency=frequency, seeds=seeds, **kwargs) - settings = get_topology_settings_dict( - heartbeat_frequency=frequency, - seeds=seeds - ) + # "Eligible servers" is defined in the server selection spec as + # the set of servers matching both the ReadPreference's mode + # and tag sets. + topology = Topology(TopologySettings(**settings)) + topology.open() + # Update topologies with server descriptions. + for server in scenario_def["topology_description"]["servers"]: + server_description = make_server_description(server, hosts) + topology.on_change(server_description) + + # Assert that descriptions match + assert ( + scenario_def["topology_description"]["type"] == topology.description.topology_type_name + ), topology.description.topology_type_name + + return topology + + +def create_test(scenario_def): + def run_scenario(self): + _, hosts = get_addresses(scenario_def["topology_description"]["servers"]) # "Eligible servers" is defined in the server selection spec as # the set of servers matching both the ReadPreference's mode # and tag sets. - top_latency = Topology(TopologySettings(**settings)) - top_latency.open() + top_latency = create_topology(scenario_def) # "In latency window" is defined in the server selection # spec as the subset of suitable_servers that falls within the # allowable latency window. - settings['local_threshold_ms'] = 1000000 - top_suitable = Topology(TopologySettings(**settings)) - top_suitable.open() - - # Update topologies with server descriptions. - for server in scenario_def['topology_description']['servers']: - server_description = make_server_description(server, hosts) - top_suitable.on_change(server_description) - top_latency.on_change(server_description) + top_suitable = create_topology(scenario_def, local_threshold_ms=1000000) # Create server selector. if scenario_def.get("operation") == "write": pref = writable_server_selector else: # Make first letter lowercase to match read_pref's modes. - pref_def = scenario_def['read_preference'] - if scenario_def.get('error'): + pref_def = scenario_def["read_preference"] + if scenario_def.get("error"): with self.assertRaises((ConfigurationError, ValueError)): # Error can be raised when making Read Pref or selecting. pref = parse_read_preference(pref_def) @@ -178,35 +184,33 @@ def run_scenario(self): pref = parse_read_preference(pref_def) # Select servers. - if not scenario_def.get('suitable_servers'): + if not scenario_def.get("suitable_servers"): with self.assertRaises(AutoReconnect): top_suitable.select_server(pref, server_selection_timeout=0) return - if not scenario_def['in_latency_window']: + if not scenario_def["in_latency_window"]: with self.assertRaises(AutoReconnect): top_latency.select_server(pref, server_selection_timeout=0) return - actual_suitable_s = top_suitable.select_servers( - pref, server_selection_timeout=0) - actual_latency_s = top_latency.select_servers( - pref, server_selection_timeout=0) + actual_suitable_s = top_suitable.select_servers(pref, server_selection_timeout=0) + actual_latency_s = top_latency.select_servers(pref, server_selection_timeout=0) expected_suitable_servers = {} - for server in scenario_def['suitable_servers']: + for server in scenario_def["suitable_servers"]: server_description = make_server_description(server, hosts) - expected_suitable_servers[server['address']] = server_description + expected_suitable_servers[server["address"]] = server_description actual_suitable_servers = {} for s in actual_suitable_s: - actual_suitable_servers["%s:%d" % (s.description.address[0], - s.description.address[1])] = s.description + actual_suitable_servers[ + "%s:%d" % (s.description.address[0], s.description.address[1]) + ] = s.description - self.assertEqual(len(actual_suitable_servers), - len(expected_suitable_servers)) + self.assertEqual(len(actual_suitable_servers), len(expected_suitable_servers)) for k, actual in actual_suitable_servers.items(): expected = expected_suitable_servers[k] self.assertEqual(expected.address, actual.address) @@ -216,18 +220,17 @@ def run_scenario(self): self.assertEqual(expected.all_hosts, actual.all_hosts) expected_latency_servers = {} - for server in scenario_def['in_latency_window']: + for server in scenario_def["in_latency_window"]: server_description = make_server_description(server, hosts) - expected_latency_servers[server['address']] = server_description + expected_latency_servers[server["address"]] = server_description actual_latency_servers = {} for s in actual_latency_s: - actual_latency_servers["%s:%d" % - (s.description.address[0], - s.description.address[1])] = s.description + actual_latency_servers[ + "%s:%d" % (s.description.address[0], s.description.address[1]) + ] = s.description - self.assertEqual(len(actual_latency_servers), - len(expected_latency_servers)) + self.assertEqual(len(actual_latency_servers), len(expected_latency_servers)) for k, actual in actual_latency_servers.items(): expected = expected_latency_servers[k] self.assertEqual(expected.address, actual.address) @@ -245,16 +248,17 @@ class TestAllScenarios(unittest.TestCase): for dirpath, _, filenames in os.walk(test_dir): dirname = os.path.split(dirpath) - dirname = os.path.split(dirname[-2])[-1] + '_' + dirname[-1] + dirname = os.path.split(dirname[-2])[-1] + "_" + dirname[-1] for filename in filenames: + if os.path.splitext(filename)[1] != ".json": + continue with open(os.path.join(dirpath, filename)) as scenario_stream: scenario_def = json_util.loads(scenario_stream.read()) # Construct test from scenario. new_test = create_test(scenario_def) - test_name = 'test_%s_%s' % ( - dirname, os.path.splitext(filename)[0]) + test_name = f"test_{dirname}_{os.path.splitext(filename)[0]}" new_test.__name__ = test_name setattr(TestAllScenarios, new_test.__name__, new_test) diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index a200b41f99..eea96aa1d7 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -13,74 +13,111 @@ # limitations under the License. """Utilities for testing driver specs.""" - -import copy -import sys - - -from bson import decode, encode -from bson.binary import Binary, STANDARD -from bson.codec_options import CodecOptions +from __future__ import annotations + +import functools +import threading +from collections import abc +from test import IntegrationTest, client_context, client_knobs +from test.utils import ( + CMAPListener, + CompareType, + EventListener, + OvertCommandListener, + ServerAndTopologyEventListener, + camel_to_snake, + camel_to_snake_args, + parse_spec_options, + prepare_spec_arguments, + rs_client, +) +from typing import List + +from bson import ObjectId, decode, encode +from bson.binary import Binary from bson.int64 import Int64 -from bson.py3compat import iteritems, abc, text_type from bson.son import SON - from gridfs import GridFSBucket - -from pymongo import (client_session, - operations) +from pymongo import client_session from pymongo.command_cursor import CommandCursor from pymongo.cursor import Cursor -from pymongo.errors import (OperationFailure, PyMongoError) +from pymongo.errors import BulkWriteError, OperationFailure, PyMongoError from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference -from pymongo.results import _WriteResult, BulkWriteResult +from pymongo.results import BulkWriteResult, _WriteResult from pymongo.write_concern import WriteConcern -from test import (client_context, - client_knobs, - IntegrationTest, - unittest) -from test.utils import (camel_to_snake, - camel_to_snake_args, - camel_to_upper_camel, - CompareType, - OvertCommandListener, - rs_client, parse_read_preference) + +class SpecRunnerThread(threading.Thread): + def __init__(self, name): + super().__init__() + self.name = name + self.exc = None + self.daemon = True + self.cond = threading.Condition() + self.ops = [] + self.stopped = False + + def schedule(self, work): + self.ops.append(work) + with self.cond: + self.cond.notify() + + def stop(self): + self.stopped = True + with self.cond: + self.cond.notify() + + def run(self): + while not self.stopped or self.ops: + if not self.ops: + with self.cond: + self.cond.wait(10) + if self.ops: + try: + work = self.ops.pop(0) + work() + except Exception as exc: + self.exc = exc + self.stop() class SpecRunner(IntegrationTest): + mongos_clients: List + knobs: client_knobs + listener: EventListener @classmethod def setUpClass(cls): - super(SpecRunner, cls).setUpClass() + super().setUpClass() cls.mongos_clients = [] # Speed up the tests by decreasing the heartbeat frequency. - cls.knobs = client_knobs(min_heartbeat_interval=0.1) + cls.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) cls.knobs.enable() @classmethod def tearDownClass(cls): cls.knobs.disable() - super(SpecRunner, cls).tearDownClass() + super().tearDownClass() def setUp(self): - super(SpecRunner, self).setUp() - self.listener = None + super().setUp() + self.targets = {} + self.listener = None # type: ignore + self.pool_listener = None + self.server_listener = None self.maxDiff = None def _set_fail_point(self, client, command_args): - cmd = SON([('configureFailPoint', 'failCommand')]) + cmd = SON([("configureFailPoint", "failCommand")]) cmd.update(command_args) client.admin.command(cmd) def set_fail_point(self, command_args): - cmd = SON([('configureFailPoint', 'failCommand')]) - cmd.update(command_args) clients = self.mongos_clients if self.mongos_clients else [self.client] for client in clients: - self._set_fail_point(client, cmd) + self._set_fail_point(client, command_args) def targeted_fail_point(self, session, fail_point): """Run the targetedFailPoint test operation. @@ -90,7 +127,7 @@ def targeted_fail_point(self, session, fail_point): clients = {c.address: c for c in self.mongos_clients} client = clients[session._pinned_address] self._set_fail_point(client, fail_point) - self.addCleanup(self.set_fail_point, {'mode': 'off'}) + self.addCleanup(self.set_fail_point, {"mode": "off"}) def assert_session_pinned(self, session): """Run the assertSessionPinned test operation. @@ -107,6 +144,26 @@ def assert_session_unpinned(self, session): self.assertIsNone(session._pinned_address) self.assertIsNone(session._transaction.pinned_address) + def assert_collection_exists(self, database, collection): + """Run the assertCollectionExists test operation.""" + db = self.client[database] + self.assertIn(collection, db.list_collection_names()) + + def assert_collection_not_exists(self, database, collection): + """Run the assertCollectionNotExists test operation.""" + db = self.client[database] + self.assertNotIn(collection, db.list_collection_names()) + + def assert_index_exists(self, database, collection, index): + """Run the assertIndexExists test operation.""" + coll = self.client[database][collection] + self.assertIn(index, [doc["name"] for doc in coll.list_indexes()]) + + def assert_index_not_exists(self, database, collection, index): + """Run the assertIndexNotExists test operation.""" + coll = self.client[database][collection] + self.assertNotIn(index, [doc["name"] for doc in coll.list_indexes()]) + def assertErrorLabelsContain(self, exc, expected_labels): labels = [l for l in expected_labels if exc.has_error_label(l)] self.assertEqual(labels, expected_labels) @@ -114,14 +171,14 @@ def assertErrorLabelsContain(self, exc, expected_labels): def assertErrorLabelsOmit(self, exc, omit_labels): for label in omit_labels: self.assertFalse( - exc.has_error_label(label), - msg='error labels should not contain %s' % (label,)) + exc.has_error_label(label), msg=f"error labels should not contain {label}" + ) def kill_all_sessions(self): clients = self.mongos_clients if self.mongos_clients else [self.client] for client in clients: try: - client.admin.command('killAllSessions', []) + client.admin.command("killAllSessions", []) except OperationFailure: # "operation was interrupted" by killing the command's # own session. @@ -143,8 +200,7 @@ def check_result(self, expected_result, result): for res in expected_result: prop = camel_to_snake(res) # SPEC-869: Only BulkWriteResult has upserted_count. - if (prop == "upserted_count" - and not isinstance(result, BulkWriteResult)): + if prop == "upserted_count" and not isinstance(result, BulkWriteResult): if result.upserted_id is not None: upserted_count = 1 else: @@ -153,14 +209,14 @@ def check_result(self, expected_result, result): elif prop == "inserted_ids": # BulkWriteResult does not have inserted_ids. if isinstance(result, BulkWriteResult): - self.assertEqual(len(expected_result[res]), - result.inserted_count) + self.assertEqual(len(expected_result[res]), result.inserted_count) else: # InsertManyResult may be compared to [id1] from the # crud spec or {"0": id1} from the retryable write spec. ids = expected_result[res] if isinstance(ids, dict): ids = [ids[str(i)] for i in range(len(ids))] + self.assertEqual(ids, result.inserted_ids, prop) elif prop == "upserted_ids": # Convert indexes from strings to integers. @@ -170,142 +226,102 @@ def check_result(self, expected_result, result): expected_ids[int(str_index)] = ids[str_index] self.assertEqual(expected_ids, result.upserted_ids, prop) else: - self.assertEqual( - getattr(result, prop), expected_result[res], prop) + self.assertEqual(getattr(result, prop), expected_result[res], prop) return True else: - self.assertEqual(result, expected_result) + + def _helper(expected_result, result): + if isinstance(expected_result, abc.Mapping): + for i in expected_result.keys(): + self.assertEqual(expected_result[i], result[i]) + + elif isinstance(expected_result, list): + for i, k in zip(expected_result, result): + _helper(i, k) + else: + self.assertEqual(expected_result, result) + + _helper(expected_result, result) + return None def get_object_name(self, op): """Allow subclasses to override handling of 'object' Transaction spec says 'object' is required. """ - return op['object'] + return op["object"] @staticmethod def parse_options(opts): - if 'readPreference' in opts: - opts['read_preference'] = parse_read_preference( - opts.pop('readPreference')) - - if 'writeConcern' in opts: - opts['write_concern'] = WriteConcern( - **dict(opts.pop('writeConcern'))) - - if 'readConcern' in opts: - opts['read_concern'] = ReadConcern( - **dict(opts.pop('readConcern'))) - - if 'maxTimeMS' in opts: - opts['max_time_ms'] = opts.pop('maxTimeMS') - - if 'maxCommitTimeMS' in opts: - opts['max_commit_time_ms'] = opts.pop('maxCommitTimeMS') - - return dict(opts) + return parse_spec_options(opts) def run_operation(self, sessions, collection, operation): original_collection = collection - name = camel_to_snake(operation['name']) - if name == 'run_command': - name = 'command' - elif name == 'download_by_name': - name = 'open_download_stream_by_name' - elif name == 'download': - name = 'open_download_stream' + name = camel_to_snake(operation["name"]) + if name == "run_command": + name = "command" + elif name == "download_by_name": + name = "open_download_stream_by_name" + elif name == "download": + name = "open_download_stream" + elif name == "map_reduce": + self.skipTest("PyMongo does not support mapReduce") + elif name == "count": + self.skipTest("PyMongo does not support count") database = collection.database collection = database.get_collection(collection.name) - if 'collectionOptions' in operation: + if "collectionOptions" in operation: collection = collection.with_options( - **self.parse_options(operation['collectionOptions'])) + **self.parse_options(operation["collectionOptions"]) + ) object_name = self.get_object_name(operation) - if object_name == 'gridfsbucket': + if object_name == "gridfsbucket": # Only create the GridFSBucket when we need it (for the gridfs # retryable reads tests). - obj = GridFSBucket( - database, bucket_name=collection.name, - disable_md5=True) + obj = GridFSBucket(database, bucket_name=collection.name) else: objects = { - 'client': database.client, - 'database': database, - 'collection': collection, - 'testRunner': self + "client": database.client, + "database": database, + "collection": collection, + "testRunner": self, } objects.update(sessions) obj = objects[object_name] # Combine arguments with options and handle special cases. - arguments = operation.get('arguments', {}) + arguments = operation.get("arguments", {}) arguments.update(arguments.pop("options", {})) self.parse_options(arguments) cmd = getattr(obj, name) - for arg_name in list(arguments): - c2s = camel_to_snake(arg_name) - # PyMongo accepts sort as list of tuples. - if arg_name == "sort": - sort_dict = arguments[arg_name] - arguments[arg_name] = list(iteritems(sort_dict)) - # Named "key" instead not fieldName. - if arg_name == "fieldName": - arguments["key"] = arguments.pop(arg_name) - # Aggregate uses "batchSize", while find uses batch_size. - elif ((arg_name == "batchSize" or arg_name == "allowDiskUse") and - name == "aggregate"): - continue - # Requires boolean returnDocument. - elif arg_name == "returnDocument": - arguments[c2s] = arguments.pop(arg_name) == "After" - elif c2s == "requests": - # Parse each request into a bulk write model. - requests = [] - for request in arguments["requests"]: - bulk_model = camel_to_upper_camel(request["name"]) - bulk_class = getattr(operations, bulk_model) - bulk_arguments = camel_to_snake_args(request["arguments"]) - requests.append(bulk_class(**dict(bulk_arguments))) - arguments["requests"] = requests - elif arg_name == "session": - arguments['session'] = sessions[arguments['session']] - elif name == 'command' and arg_name == 'command': - # Ensure the first key is the command name. - ordered_command = SON([(operation['command_name'], 1)]) - ordered_command.update(arguments['command']) - arguments['command'] = ordered_command - elif name == 'open_download_stream' and arg_name == 'id': - arguments['file_id'] = arguments.pop(arg_name) - elif name != 'find' and c2s == 'max_time_ms': - # find is the only method that accepts snake_case max_time_ms. - # All other methods take kwargs which must use the server's - # camelCase maxTimeMS. See PYTHON-1855. - arguments['maxTimeMS'] = arguments.pop('max_time_ms') - elif name == 'with_transaction' and arg_name == 'callback': - callback_ops = arguments[arg_name]['operations'] - arguments['callback'] = lambda _: self.run_operations( - sessions, original_collection, copy.deepcopy(callback_ops), - in_with_transaction=True) - else: - arguments[c2s] = arguments.pop(arg_name) + with_txn_callback = functools.partial( + self.run_operations, sessions, original_collection, in_with_transaction=True + ) + prepare_spec_arguments(operation, arguments, name, sessions, with_txn_callback) + + if name == "run_on_thread": + args = {"sessions": sessions, "collection": collection} + args.update(arguments) + arguments = args result = cmd(**dict(arguments)) + # Cleanup open change stream cursors. + if name == "watch": + self.addCleanup(result.close) if name == "aggregate": if arguments["pipeline"] and "$out" in arguments["pipeline"][-1]: # Read from the primary to ensure causal consistency. out = collection.database.get_collection( - arguments["pipeline"][-1]["$out"], - read_preference=ReadPreference.PRIMARY) + arguments["pipeline"][-1]["$out"], read_preference=ReadPreference.PRIMARY + ) return out.find() - if name == "map_reduce": - if isinstance(result, dict) and 'results' in result: - return result['results'] - if 'download' in name: + if "download" in name: result = Binary(result.read()) if isinstance(result, Cursor) or isinstance(result, CommandCursor): @@ -317,90 +333,91 @@ def allowable_errors(self, op): """Allow encryption spec to override expected error classes.""" return (PyMongoError,) - def run_operations(self, sessions, collection, ops, - in_with_transaction=False): + def _run_op(self, sessions, collection, op, in_with_transaction): + expected_result = op.get("result") + if expect_error(op): + with self.assertRaises(self.allowable_errors(op), msg=op["name"]) as context: + self.run_operation(sessions, collection, op.copy()) + exc = context.exception + if expect_error_message(expected_result): + if isinstance(exc, BulkWriteError): + errmsg = str(exc.details).lower() + else: + errmsg = str(exc).lower() + self.assertIn(expected_result["errorContains"].lower(), errmsg) + if expect_error_code(expected_result): + self.assertEqual(expected_result["errorCodeName"], exc.details.get("codeName")) + if expect_error_labels_contain(expected_result): + self.assertErrorLabelsContain(exc, expected_result["errorLabelsContain"]) + if expect_error_labels_omit(expected_result): + self.assertErrorLabelsOmit(exc, expected_result["errorLabelsOmit"]) + if expect_timeout_error(expected_result): + self.assertIsInstance(exc, PyMongoError) + if not exc.timeout: + # Re-raise the exception for better diagnostics. + raise exc + + # Reraise the exception if we're in the with_transaction + # callback. + if in_with_transaction: + raise context.exception + else: + result = self.run_operation(sessions, collection, op.copy()) + if "result" in op: + if op["name"] == "runCommand": + self.check_command_result(expected_result, result) + else: + self.check_result(expected_result, result) + + def run_operations(self, sessions, collection, ops, in_with_transaction=False): for op in ops: - expected_result = op.get('result') - if expect_error(op): - with self.assertRaises(self.allowable_errors(op), - msg=op['name']) as context: - self.run_operation(sessions, collection, op.copy()) - - if expect_error_message(expected_result): - self.assertIn(expected_result['errorContains'].lower(), - str(context.exception).lower()) - if expect_error_code(expected_result): - self.assertEqual(expected_result['errorCodeName'], - context.exception.details.get('codeName')) - if expect_error_labels_contain(expected_result): - self.assertErrorLabelsContain( - context.exception, - expected_result['errorLabelsContain']) - if expect_error_labels_omit(expected_result): - self.assertErrorLabelsOmit( - context.exception, - expected_result['errorLabelsOmit']) - - # Reraise the exception if we're in the with_transaction - # callback. - if in_with_transaction: - raise context.exception - else: - result = self.run_operation(sessions, collection, op.copy()) - if 'result' in op: - if op['name'] == 'runCommand': - self.check_command_result(expected_result, result) - else: - self.check_result(expected_result, result) + self._run_op(sessions, collection, op, in_with_transaction) # TODO: factor with test_command_monitoring.py def check_events(self, test, listener, session_ids): - res = listener.results - if not len(test['expectations']): + events = listener.started_events + if not len(test["expectations"]): return # Give a nicer message when there are missing or extra events - cmds = decode_raw([event.command for event in res['started']]) - self.assertEqual( - len(res['started']), len(test['expectations']), cmds) - for i, expectation in enumerate(test['expectations']): + cmds = decode_raw([event.command for event in events]) + self.assertEqual(len(events), len(test["expectations"]), cmds) + for i, expectation in enumerate(test["expectations"]): event_type = next(iter(expectation)) - event = res['started'][i] + event = events[i] # The tests substitute 42 for any number other than 0. - if (event.command_name == 'getMore' - and event.command['getMore']): - event.command['getMore'] = Int64(42) - elif event.command_name == 'killCursors': - event.command['cursors'] = [Int64(42)] - elif event.command_name == 'update': + if event.command_name == "getMore" and event.command["getMore"]: + event.command["getMore"] = Int64(42) + elif event.command_name == "killCursors": + event.command["cursors"] = [Int64(42)] + elif event.command_name == "update": # TODO: remove this once PYTHON-1744 is done. # Add upsert and multi fields back into expectations. - updates = expectation[event_type]['command']['updates'] + updates = expectation[event_type]["command"]["updates"] for update in updates: - update.setdefault('upsert', False) - update.setdefault('multi', False) + update.setdefault("upsert", False) + update.setdefault("multi", False) # Replace afterClusterTime: 42 with actual afterClusterTime. - expected_cmd = expectation[event_type]['command'] - expected_read_concern = expected_cmd.get('readConcern') + expected_cmd = expectation[event_type]["command"] + expected_read_concern = expected_cmd.get("readConcern") if expected_read_concern is not None: - time = expected_read_concern.get('afterClusterTime') + time = expected_read_concern.get("afterClusterTime") if time == 42: - actual_time = event.command.get( - 'readConcern', {}).get('afterClusterTime') + actual_time = event.command.get("readConcern", {}).get("afterClusterTime") if actual_time is not None: - expected_read_concern['afterClusterTime'] = actual_time + expected_read_concern["afterClusterTime"] = actual_time - recovery_token = expected_cmd.get('recoveryToken') + recovery_token = expected_cmd.get("recoveryToken") if recovery_token == 42: - expected_cmd['recoveryToken'] = CompareType(dict) + expected_cmd["recoveryToken"] = CompareType(dict) # Replace lsid with a name like "session0" to match test. - if 'lsid' in event.command: + if "lsid" in event.command: for name, lsid in session_ids.items(): - if event.command['lsid'] == lsid: - event.command['lsid'] = name + if event.command["lsid"] == lsid: + event.command["lsid"] = name break for attr, expected in expectation[event_type].items(): @@ -410,28 +427,27 @@ def check_events(self, test, listener, session_ids): for key, val in expected.items(): if val is None: if key in actual: - self.fail("Unexpected key [%s] in %r" % ( - key, actual)) + self.fail(f"Unexpected key [{key}] in {actual!r}") elif key not in actual: - self.fail("Expected key [%s] in %r" % ( - key, actual)) + self.fail(f"Expected key [{key}] in {actual!r}") else: - self.assertEqual(val, decode_raw(actual[key]), - "Key [%s] in %s" % (key, actual)) + self.assertEqual( + val, decode_raw(actual[key]), f"Key [{key}] in {actual}" + ) else: self.assertEqual(actual, expected) def maybe_skip_scenario(self, test): - if test.get('skipReason'): - raise unittest.SkipTest(test.get('skipReason')) + if test.get("skipReason"): + self.skipTest(test.get("skipReason")) def get_scenario_db_name(self, scenario_def): """Allow subclasses to override a test's database name.""" - return scenario_def['database_name'] + return scenario_def["database_name"] def get_scenario_coll_name(self, scenario_def): """Allow subclasses to override a test's collection name.""" - return scenario_def['collection_name'] + return scenario_def["collection_name"] def get_outcome_coll_name(self, outcome, collection): """Allow subclasses to override outcome collection.""" @@ -439,8 +455,9 @@ def get_outcome_coll_name(self, outcome, collection): def run_test_ops(self, sessions, collection, test): """Added to allow retryable writes spec to override a test's - operation.""" - self.run_operations(sessions, collection, test['operations']) + operation. + """ + self.run_operations(sessions, collection, test["operations"]) def parse_client_options(self, opts): """Allow encryption spec to override a clientOptions parsing.""" @@ -452,59 +469,83 @@ def setup_scenario(self, scenario_def): """Allow specs to override a test's setup.""" db_name = self.get_scenario_db_name(scenario_def) coll_name = self.get_scenario_coll_name(scenario_def) - db = client_context.client.get_database( - db_name, write_concern=WriteConcern(w='majority')) - coll = db[coll_name] - coll.drop() - db.create_collection(coll_name) - if scenario_def['data']: - # Load data. - coll.insert_many(scenario_def['data']) + documents = scenario_def["data"] + + # Setup the collection with as few majority writes as possible. + db = client_context.client.get_database(db_name) + coll_exists = bool(db.list_collection_names(filter={"name": coll_name})) + if coll_exists: + db[coll_name].delete_many({}) + # Only use majority wc only on the final write. + wc = WriteConcern(w="majority") + if documents: + db.get_collection(coll_name, write_concern=wc).insert_many(documents) + elif not coll_exists: + # Ensure collection exists. + db.create_collection(coll_name, write_concern=wc) def run_scenario(self, scenario_def, test): self.maybe_skip_scenario(test) - listener = OvertCommandListener() - # Create a new client, to avoid interference from pooled sessions. - client_options = self.parse_client_options(test['clientOptions']) - # MMAPv1 does not support retryable writes. - if (client_options.get('retryWrites') is True and - client_context.storage_engine == 'mmapv1'): - self.skipTest("MMAPv1 does not support retryWrites=True") - use_multi_mongos = test['useMultipleMongoses'] - if client_context.is_mongos and use_multi_mongos: - client = rs_client(client_context.mongos_seeds(), - event_listeners=[listener], **client_options) - else: - client = rs_client(event_listeners=[listener], **client_options) - self.listener = listener - # Close the client explicitly to avoid having too many threads open. - self.addCleanup(client.close) # Kill all sessions before and after each test to prevent an open # transaction (from a test failure) from blocking collection/database # operations during test set up and tear down. self.kill_all_sessions() self.addCleanup(self.kill_all_sessions) - + self.setup_scenario(scenario_def) database_name = self.get_scenario_db_name(scenario_def) collection_name = self.get_scenario_coll_name(scenario_def) - self.setup_scenario(scenario_def) - # SPEC-1245 workaround StaleDbVersion on distinct for c in self.mongos_clients: c[database_name][collection_name].distinct("x") + # Configure the fail point before creating the client. + if "failPoint" in test: + fp = test["failPoint"] + self.set_fail_point(fp) + self.addCleanup( + self.set_fail_point, {"configureFailPoint": fp["configureFailPoint"], "mode": "off"} + ) + + listener = OvertCommandListener() + pool_listener = CMAPListener() + server_listener = ServerAndTopologyEventListener() + # Create a new client, to avoid interference from pooled sessions. + client_options = self.parse_client_options(test["clientOptions"]) + # MMAPv1 does not support retryable writes. + if client_options.get("retryWrites") is True and client_context.storage_engine == "mmapv1": + self.skipTest("MMAPv1 does not support retryWrites=True") + use_multi_mongos = test["useMultipleMongoses"] + host = None + if use_multi_mongos: + if client_context.load_balancer or client_context.serverless: + host = client_context.MULTI_MONGOS_LB_URI + elif client_context.is_mongos: + host = client_context.mongos_seeds() + client = rs_client( + h=host, event_listeners=[listener, pool_listener, server_listener], **client_options + ) + self.scenario_client = client + self.listener = listener + self.pool_listener = pool_listener + self.server_listener = server_listener + # Close the client explicitly to avoid having too many threads open. + self.addCleanup(client.close) + # Create session0 and session1. sessions = {} session_ids = {} for i in range(2): - session_name = 'session%d' % i - opts = camel_to_snake_args(test['sessionOptions'][session_name]) - if 'default_transaction_options' in opts: - txn_opts = self.parse_options( - opts['default_transaction_options']) + # Don't attempt to create sessions if they are not supported by + # the running server version. + if not client_context.sessions_enabled: + break + session_name = "session%d" % i + opts = camel_to_snake_args(test["sessionOptions"][session_name]) + if "default_transaction_options" in opts: + txn_opts = self.parse_options(opts["default_transaction_options"]) txn_opts = client_session.TransactionOptions(**txn_opts) - opts['default_transaction_options'] = txn_opts + opts["default_transaction_options"] = txn_opts s = client.start_session(**dict(opts)) @@ -514,14 +555,6 @@ def run_scenario(self, scenario_def, test): self.addCleanup(end_sessions, sessions) - if 'failPoint' in test: - fp = test['failPoint'] - self.set_fail_point(fp) - self.addCleanup(self.set_fail_point, { - 'configureFailPoint': fp['configureFailPoint'], 'mode': 'off'}) - - listener.results.clear() - collection = client[database_name][collection_name] self.run_test_ops(sessions, collection, test) @@ -530,74 +563,82 @@ def run_scenario(self, scenario_def, test): self.check_events(test, listener, session_ids) # Disable fail points. - if 'failPoint' in test: - fp = test['failPoint'] - self.set_fail_point({ - 'configureFailPoint': fp['configureFailPoint'], 'mode': 'off'}) + if "failPoint" in test: + fp = test["failPoint"] + self.set_fail_point({"configureFailPoint": fp["configureFailPoint"], "mode": "off"}) # Assert final state is expected. - outcome = test['outcome'] - expected_c = outcome.get('collection') + outcome = test["outcome"] + expected_c = outcome.get("collection") if expected_c is not None: - outcome_coll_name = self.get_outcome_coll_name( - outcome, collection) + outcome_coll_name = self.get_outcome_coll_name(outcome, collection) # Read from the primary with local read concern to ensure causal # consistency. - outcome_coll = client_context.client[ - collection.database.name].get_collection( + outcome_coll = client_context.client[collection.database.name].get_collection( outcome_coll_name, read_preference=ReadPreference.PRIMARY, - read_concern=ReadConcern('local')) + read_concern=ReadConcern("local"), + ) + actual_data = list(outcome_coll.find(sort=[("_id", 1)])) # The expected data needs to be the left hand side here otherwise # CompareType(Binary) doesn't work. - self.assertEqual( - wrap_types(expected_c['data']), list(outcome_coll.find())) + self.assertEqual(wrap_types(expected_c["data"]), actual_data) def expect_any_error(op): if isinstance(op, dict): - return op.get('error') + return op.get("error") return False def expect_error_message(expected_result): if isinstance(expected_result, dict): - return isinstance(expected_result['errorContains'], text_type) + return isinstance(expected_result["errorContains"], str) return False def expect_error_code(expected_result): if isinstance(expected_result, dict): - return expected_result['errorCodeName'] + return expected_result["errorCodeName"] return False def expect_error_labels_contain(expected_result): if isinstance(expected_result, dict): - return expected_result['errorLabelsContain'] + return expected_result["errorLabelsContain"] return False def expect_error_labels_omit(expected_result): if isinstance(expected_result, dict): - return expected_result['errorLabelsOmit'] + return expected_result["errorLabelsOmit"] + + return False + + +def expect_timeout_error(expected_result): + if isinstance(expected_result, dict): + return expected_result["isTimeoutError"] return False def expect_error(op): - expected_result = op.get('result') - return (expect_any_error(op) or - expect_error_message(expected_result) - or expect_error_code(expected_result) - or expect_error_labels_contain(expected_result) - or expect_error_labels_omit(expected_result)) + expected_result = op.get("result") + return ( + expect_any_error(op) + or expect_error_message(expected_result) + or expect_error_code(expected_result) + or expect_error_labels_contain(expected_result) + or expect_error_labels_omit(expected_result) + or expect_timeout_error(expected_result) + ) def end_sessions(sessions): @@ -606,19 +647,21 @@ def end_sessions(sessions): s.end_session() -OPTS = CodecOptions(document_class=dict, uuid_representation=STANDARD) - - def decode_raw(val): """Decode RawBSONDocuments in the given container.""" if isinstance(val, (list, abc.Mapping)): - return decode(encode({'v': val}, codec_options=OPTS), OPTS)['v'] + return decode(encode({"v": val}))["v"] return val TYPES = { - 'binData': Binary, - 'long': Int64, + "binData": Binary, + "long": Int64, + "int": int, + "string": str, + "objectId": ObjectId, + "object": dict, + "array": list, } @@ -627,9 +670,13 @@ def wrap_types(val): if isinstance(val, list): return [wrap_types(v) for v in val] if isinstance(val, abc.Mapping): - typ = val.get('$$type') + typ = val.get("$$type") if typ: - return CompareType(TYPES[typ]) + if isinstance(typ, str): + types = TYPES[typ] + else: + types = tuple(TYPES[t] for t in typ) + return CompareType(types) d = {} for key in val: d[key] = wrap_types(val[key]) diff --git a/test/version.py b/test/version.py index 3348060bfc..043c760cf5 100644 --- a/test/version.py +++ b/test/version.py @@ -13,13 +13,13 @@ # limitations under the License. """Some tools for running tests based on MongoDB server version.""" +from __future__ import annotations class Version(tuple): - def __new__(cls, *version): padded_version = cls._padded(version, 4) - return super(Version, cls).__new__(cls, tuple(padded_version)) + return super().__new__(cls, tuple(padded_version)) @classmethod def _padded(cls, iter, length, padding=0): @@ -43,16 +43,15 @@ def from_string(cls, version_string): version_string = version_string[0:-1] mod = -1 # Deal with '-rcX' substrings - if '-rc' in version_string: - version_string = version_string[0:version_string.find('-rc')] + if "-rc" in version_string: + version_string = version_string[0 : version_string.find("-rc")] mod = -1 # Deal with git describe generated substrings - elif '-' in version_string: - version_string = version_string[0:version_string.find('-')] + elif "-" in version_string: + version_string = version_string[0 : version_string.find("-")] mod = -1 bump_patch_level = True - version = [int(part) for part in version_string.split(".")] version = cls._padded(version, 3) # Make from_string and from_version_array agree. For example: @@ -77,9 +76,9 @@ def from_version_array(cls, version_array): @classmethod def from_client(cls, client): info = client.server_info() - if 'versionArray' in info: - return cls.from_version_array(info['versionArray']) - return cls.from_string(info['version']) + if "versionArray" in info: + return cls.from_version_array(info["versionArray"]) + return cls.from_string(info["version"]) def at_least(self, *other_version): return self >= Version(*other_version) diff --git a/test/versioned-api/crud-api-version-1-strict.json b/test/versioned-api/crud-api-version-1-strict.json new file mode 100644 index 0000000000..c1c8ecce01 --- /dev/null +++ b/test/versioned-api/crud-api-version-1-strict.json @@ -0,0 +1,1109 @@ +{ + "description": "CRUD Api Version 1 (strict)", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.9" + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ], + "serverApi": { + "version": "1", + "strict": true + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "versioned-api-tests" + } + }, + { + "database": { + "id": "adminDatabase", + "client": "client", + "databaseName": "admin" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "_yamlAnchors": { + "versions": [ + { + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + ] + }, + "initialData": [ + { + "collectionName": "test", + "databaseName": "versioned-api-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "aggregate on collection appends declared API version", + "operations": [ + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "aggregate on database appends declared API version", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "aggregate", + "object": "adminDatabase", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + }, + "expectError": { + "errorCodeName": "APIStrictError" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "bulkWrite appends declared API version", + "operations": [ + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 6, + "x": 66 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteMany": { + "filter": { + "x": { + "$nin": [ + 24, + 34 + ] + } + } + } + }, + { + "updateMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 7 + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 4 + }, + "replacement": { + "_id": 4, + "x": 44 + }, + "upsert": true + } + } + ], + "ordered": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 6, + "x": 66 + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 2 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "x": { + "$nin": [ + 24, + 34 + ] + } + }, + "limit": 0 + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": 7 + }, + "limit": 1 + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 4 + }, + "u": { + "_id": 4, + "x": 44 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": true + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "countDocuments appends declared API version", + "operations": [ + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": { + "x": { + "$gt": 11 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$match": { + "x": { + "$gt": 11 + } + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "deleteMany appends declared API version", + "operations": [ + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": { + "x": { + "$nin": [ + 24, + 34 + ] + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "x": { + "$nin": [ + 24, + 34 + ] + } + }, + "limit": 0 + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "deleteOne appends declared API version", + "operations": [ + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": { + "_id": 7 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": 7 + }, + "limit": 1 + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "distinct appends declared API version", + "operations": [ + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + }, + "expectError": { + "isError": true, + "errorContains": "command distinct is not in API Version 1", + "errorCodeName": "APIStrictError" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "test", + "key": "x", + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "estimatedDocumentCount appends declared API version", + "runOnRequirements": [ + { + "minServerVersion": "5.0.9", + "maxServerVersion": "5.0.99" + }, + { + "minServerVersion": "5.3.2" + } + ], + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": {} + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "test", + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "find and getMore append API version", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "batchSize": 3 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "findOneAndDelete appends declared API version", + "operations": [ + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 1 + }, + "remove": true, + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "findOneAndReplace appends declared API version", + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 1 + }, + "update": { + "x": 33 + }, + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "findOneAndUpdate appends declared API version", + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "insertMany appends declared API version", + "operations": [ + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 6, + "x": 66 + }, + { + "_id": 7, + "x": 77 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 6, + "x": 66 + }, + { + "_id": 7, + "x": 77 + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "insertOne appends declared API version", + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 6, + "x": 66 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 6, + "x": 66 + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "replaceOne appends declared API version", + "operations": [ + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": { + "_id": 4 + }, + "replacement": { + "_id": 4, + "x": 44 + }, + "upsert": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 4 + }, + "u": { + "_id": 4, + "x": 44 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": true + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "updateMany appends declared API version", + "operations": [ + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "updateOne appends declared API version", + "operations": [ + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 2 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/versioned-api/crud-api-version-1.json b/test/versioned-api/crud-api-version-1.json new file mode 100644 index 0000000000..a387d0587e --- /dev/null +++ b/test/versioned-api/crud-api-version-1.json @@ -0,0 +1,1101 @@ +{ + "description": "CRUD Api Version 1", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.9" + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ], + "serverApi": { + "version": "1", + "deprecationErrors": true + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "versioned-api-tests" + } + }, + { + "database": { + "id": "adminDatabase", + "client": "client", + "databaseName": "admin" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "_yamlAnchors": { + "versions": [ + { + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + ] + }, + "initialData": [ + { + "collectionName": "test", + "databaseName": "versioned-api-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "aggregate on collection appends declared API version", + "operations": [ + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "aggregate on database appends declared API version", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "aggregate", + "object": "adminDatabase", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "bulkWrite appends declared API version", + "operations": [ + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 6, + "x": 66 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteMany": { + "filter": { + "x": { + "$nin": [ + 24, + 34 + ] + } + } + } + }, + { + "updateMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 7 + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 4 + }, + "replacement": { + "_id": 4, + "x": 44 + }, + "upsert": true + } + } + ], + "ordered": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 6, + "x": 66 + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 2 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "x": { + "$nin": [ + 24, + 34 + ] + } + }, + "limit": 0 + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": 7 + }, + "limit": 1 + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 4 + }, + "u": { + "_id": 4, + "x": 44 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": true + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "countDocuments appends declared API version", + "operations": [ + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": { + "x": { + "$gt": 11 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$match": { + "x": { + "$gt": 11 + } + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "deleteMany appends declared API version", + "operations": [ + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": { + "x": { + "$nin": [ + 24, + 34 + ] + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "x": { + "$nin": [ + 24, + 34 + ] + } + }, + "limit": 0 + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "deleteOne appends declared API version", + "operations": [ + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": { + "_id": 7 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": 7 + }, + "limit": 1 + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "distinct appends declared API version", + "operations": [ + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "test", + "key": "x", + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "estimatedDocumentCount appends declared API version", + "runOnRequirements": [ + { + "minServerVersion": "5.0.9", + "maxServerVersion": "5.0.99" + }, + { + "minServerVersion": "5.3.2" + } + ], + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": {} + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "test", + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "find and getMore append API version", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "batchSize": 3 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "findOneAndDelete appends declared API version", + "operations": [ + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 1 + }, + "remove": true, + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "findOneAndReplace appends declared API version", + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 1 + }, + "update": { + "x": 33 + }, + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "findOneAndUpdate appends declared API version", + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "insertMany appends declared API version", + "operations": [ + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 6, + "x": 66 + }, + { + "_id": 7, + "x": 77 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 6, + "x": 66 + }, + { + "_id": 7, + "x": 77 + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "insertOne appends declared API version", + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 6, + "x": 66 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 6, + "x": 66 + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "replaceOne appends declared API version", + "operations": [ + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": { + "_id": 4 + }, + "replacement": { + "_id": 4, + "x": 44 + }, + "upsert": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 4 + }, + "u": { + "_id": 4, + "x": 44 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": true + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "updateMany appends declared API version", + "operations": [ + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "updateOne appends declared API version", + "operations": [ + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 2 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + } + ] +} diff --git a/test/versioned-api/runcommand-helper-no-api-version-declared.json b/test/versioned-api/runcommand-helper-no-api-version-declared.json new file mode 100644 index 0000000000..17e0126d10 --- /dev/null +++ b/test/versioned-api/runcommand-helper-no-api-version-declared.json @@ -0,0 +1,127 @@ +{ + "description": "RunCommand helper: No API version declared", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.9", + "serverParameters": { + "requireApiVersion": false + } + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "versioned-api-tests" + } + } + ], + "tests": [ + { + "description": "runCommand does not inspect or change the command document", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1, + "apiVersion": "server_will_never_support_this_api_version" + } + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "apiVersion": "server_will_never_support_this_api_version", + "apiStrict": { + "$$exists": false + }, + "apiDeprecationErrors": { + "$$exists": false + } + }, + "commandName": "ping", + "databaseName": "versioned-api-tests" + } + } + ] + } + ] + }, + { + "description": "runCommand does not prevent sending invalid API version declarations", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1, + "apiStrict": true + } + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "apiVersion": { + "$$exists": false + }, + "apiStrict": true, + "apiDeprecationErrors": { + "$$exists": false + } + }, + "commandName": "ping", + "databaseName": "versioned-api-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/versioned-api/test-commands-deprecation-errors.json b/test/versioned-api/test-commands-deprecation-errors.json new file mode 100644 index 0000000000..0668df830a --- /dev/null +++ b/test/versioned-api/test-commands-deprecation-errors.json @@ -0,0 +1,74 @@ +{ + "description": "Test commands: deprecation errors", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "4.9", + "serverParameters": { + "enableTestCommands": true, + "acceptApiVersion2": true, + "requireApiVersion": false + } + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "versioned-api-tests" + } + } + ], + "tests": [ + { + "description": "Running a command that is deprecated raises a deprecation error", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "testDeprecationInVersion2", + "command": { + "testDeprecationInVersion2": 1, + "apiVersion": "2", + "apiDeprecationErrors": true + } + }, + "expectError": { + "isError": true, + "errorContains": "command testDeprecationInVersion2 is deprecated in API Version 2", + "errorCodeName": "APIDeprecationError" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "testDeprecationInVersion2": 1, + "apiVersion": "2", + "apiStrict": { + "$$exists": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + } + ] +} diff --git a/test/versioned-api/test-commands-strict-mode.json b/test/versioned-api/test-commands-strict-mode.json new file mode 100644 index 0000000000..9c4ebea785 --- /dev/null +++ b/test/versioned-api/test-commands-strict-mode.json @@ -0,0 +1,75 @@ +{ + "description": "Test commands: strict mode", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.9", + "serverParameters": { + "enableTestCommands": true + }, + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ], + "serverApi": { + "version": "1", + "strict": true + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "versioned-api-tests" + } + } + ], + "tests": [ + { + "description": "Running a command that is not part of the versioned API results in an error", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "testVersion2", + "command": { + "testVersion2": 1 + } + }, + "expectError": { + "isError": true, + "errorContains": "command testVersion2 is not in API Version 1", + "errorCodeName": "APIStrictError" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "testVersion2": 1, + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/versioned-api/transaction-handling.json b/test/versioned-api/transaction-handling.json new file mode 100644 index 0000000000..c00c5240ae --- /dev/null +++ b/test/versioned-api/transaction-handling.json @@ -0,0 +1,348 @@ +{ + "description": "Transaction handling", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.9", + "topologies": [ + "replicaset", + "sharded-replicaset", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ], + "serverApi": { + "version": "1" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "versioned-api-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ], + "_yamlAnchors": { + "versions": [ + { + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + ] + }, + "initialData": [ + { + "collectionName": "test", + "databaseName": "versioned-api-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "All commands in a transaction declare an API version", + "runOnRequirements": [ + { + "topologies": [ + "replicaset", + "sharded-replicaset", + "load-balanced" + ] + } + ], + "operations": [ + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 6, + "x": 66 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 6 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 7, + "x": 77 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 7 + } + } + } + }, + { + "name": "commitTransaction", + "object": "session" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 6, + "x": 66 + } + ], + "lsid": { + "$$sessionLsid": "session" + }, + "startTransaction": true, + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 7, + "x": 77 + } + ], + "lsid": { + "$$sessionLsid": "session" + }, + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session" + }, + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "abortTransaction includes an API version", + "runOnRequirements": [ + { + "topologies": [ + "replicaset", + "sharded-replicaset", + "load-balanced" + ] + } + ], + "operations": [ + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 6, + "x": 66 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 6 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 7, + "x": 77 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 7 + } + } + } + }, + { + "name": "abortTransaction", + "object": "session" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 6, + "x": 66 + } + ], + "lsid": { + "$$sessionLsid": "session" + }, + "startTransaction": true, + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 7, + "x": 77 + } + ], + "lsid": { + "$$sessionLsid": "session" + }, + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session" + }, + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/tools/benchmark.py b/tools/benchmark.py deleted file mode 100644 index a0fc74a121..0000000000 --- a/tools/benchmark.py +++ /dev/null @@ -1,165 +0,0 @@ -# Copyright 2009-2015 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""MongoDB benchmarking suite.""" -from __future__ import print_function - -import time -import sys -sys.path[0:0] = [""] - -import datetime - -from pymongo import mongo_client -from pymongo import ASCENDING - -trials = 2 -per_trial = 5000 -batch_size = 100 -small = {} -medium = {"integer": 5, - "number": 5.05, - "boolean": False, - "array": ["test", "benchmark"] - } -# this is similar to the benchmark data posted to the user list -large = {"base_url": "http://www.example.com/test-me", - "total_word_count": 6743, - "access_time": datetime.datetime.utcnow(), - "meta_tags": {"description": "i am a long description string", - "author": "Holly Man", - "dynamically_created_meta_tag": "who know\n what" - }, - "page_structure": {"counted_tags": 3450, - "no_of_js_attached": 10, - "no_of_images": 6 - }, - "harvested_words": ["10gen", "web", "open", "source", "application", - "paas", "platform-as-a-service", "technology", - "helps", "developers", "focus", "building", - "mongodb", "mongo"] * 20 - } - - -def setup_insert(db, collection, object): - db.drop_collection(collection) - - -def insert(db, collection, object): - for i in range(per_trial): - to_insert = object.copy() - to_insert["x"] = i - db[collection].insert(to_insert) - - -def insert_batch(db, collection, object): - for i in range(per_trial / batch_size): - db[collection].insert([object] * batch_size) - - -def find_one(db, collection, x): - for _ in range(per_trial): - db[collection].find_one({"x": x}) - - -def find(db, collection, x): - for _ in range(per_trial): - for _ in db[collection].find({"x": x}): - pass - - -def timed(name, function, args=[], setup=None): - times = [] - for _ in range(trials): - if setup: - setup(*args) - start = time.time() - function(*args) - times.append(time.time() - start) - best_time = min(times) - print("{0:s}{1:d}".format(name + (60 - len(name)) * ".", per_trial / best_time)) - return best_time - - -def main(): - c = mongo_client.MongoClient(connectTimeoutMS=60*1000) # jack up timeout - c.drop_database("benchmark") - db = c.benchmark - - timed("insert (small, no index)", insert, - [db, 'small_none', small], setup_insert) - timed("insert (medium, no index)", insert, - [db, 'medium_none', medium], setup_insert) - timed("insert (large, no index)", insert, - [db, 'large_none', large], setup_insert) - - db.small_index.create_index("x", ASCENDING) - timed("insert (small, indexed)", insert, [db, 'small_index', small]) - db.medium_index.create_index("x", ASCENDING) - timed("insert (medium, indexed)", insert, [db, 'medium_index', medium]) - db.large_index.create_index("x", ASCENDING) - timed("insert (large, indexed)", insert, [db, 'large_index', large]) - - timed("batch insert (small, no index)", insert_batch, - [db, 'small_bulk', small], setup_insert) - timed("batch insert (medium, no index)", insert_batch, - [db, 'medium_bulk', medium], setup_insert) - timed("batch insert (large, no index)", insert_batch, - [db, 'large_bulk', large], setup_insert) - - timed("find_one (small, no index)", find_one, - [db, 'small_none', per_trial / 2]) - timed("find_one (medium, no index)", find_one, - [db, 'medium_none', per_trial / 2]) - timed("find_one (large, no index)", find_one, - [db, 'large_none', per_trial / 2]) - - timed("find_one (small, indexed)", find_one, - [db, 'small_index', per_trial / 2]) - timed("find_one (medium, indexed)", find_one, - [db, 'medium_index', per_trial / 2]) - timed("find_one (large, indexed)", find_one, - [db, 'large_index', per_trial / 2]) - - timed("find (small, no index)", find, [db, 'small_none', per_trial / 2]) - timed("find (medium, no index)", find, [db, 'medium_none', per_trial / 2]) - timed("find (large, no index)", find, [db, 'large_none', per_trial / 2]) - - timed("find (small, indexed)", find, [db, 'small_index', per_trial / 2]) - timed("find (medium, indexed)", find, [db, 'medium_index', per_trial / 2]) - timed("find (large, indexed)", find, [db, 'large_index', per_trial / 2]) - -# timed("find range (small, no index)", find, -# [db, 'small_none', -# {"$gt": per_trial / 4, "$lt": 3 * per_trial / 4}]) -# timed("find range (medium, no index)", find, -# [db, 'medium_none', -# {"$gt": per_trial / 4, "$lt": 3 * per_trial / 4}]) -# timed("find range (large, no index)", find, -# [db, 'large_none', -# {"$gt": per_trial / 4, "$lt": 3 * per_trial / 4}]) - - timed("find range (small, indexed)", find, - [db, 'small_index', - {"$gt": per_trial / 2, "$lt": per_trial / 2 + batch_size}]) - timed("find range (medium, indexed)", find, - [db, 'medium_index', - {"$gt": per_trial / 2, "$lt": per_trial / 2 + batch_size}]) - timed("find range (large, indexed)", find, - [db, 'large_index', - {"$gt": per_trial / 2, "$lt": per_trial / 2 + batch_size}]) - -if __name__ == "__main__": -# cProfile.run("main()") - main() diff --git a/tools/clean.py b/tools/clean.py index a5d383af4e..15db9a411b 100644 --- a/tools/clean.py +++ b/tools/clean.py @@ -16,30 +16,33 @@ Only really intended to be used by internal build scripts. """ +from __future__ import annotations -import os import sys +from pathlib import Path try: - os.remove("pymongo/_cmessage.so") - os.remove("bson/_cbson.so") -except: + Path("pymongo/_cmessage.so").unlink() + Path("bson/_cbson.so").unlink() +except BaseException: # noqa: S110 pass try: - os.remove("pymongo/_cmessage.pyd") - os.remove("bson/_cbson.pyd") -except: + Path("pymongo/_cmessage.pyd").unlink() + Path("bson/_cbson.pyd").unlink() +except BaseException: # noqa: S110 pass try: - from pymongo import _cmessage + from pymongo import _cmessage # type: ignore[attr-defined] # noqa: F401 + sys.exit("could still import _cmessage") except ImportError: pass try: - from bson import _cbson + from bson import _cbson # noqa: F401 + sys.exit("could still import _cbson") except ImportError: pass diff --git a/tools/ensure_future_annotations_import.py b/tools/ensure_future_annotations_import.py new file mode 100644 index 0000000000..3e7e60bfd4 --- /dev/null +++ b/tools/ensure_future_annotations_import.py @@ -0,0 +1,41 @@ +# Copyright 2023-Present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Ensure that 'from __future__ import annotations' is used in all package files +""" +from __future__ import annotations + +import sys +from pathlib import Path + +pattern = "from __future__ import annotations" +missing = [] +for dirname in ["pymongo", "bson", "gridfs"]: + for path in Path(dirname).glob("*.py"): + if Path(path).name in ["_version.py", "errors.py"]: + continue + found = False + with open(path) as fid: + for line in fid.readlines(): + if line.strip() == pattern: + found = True + break + if not found: + missing.append(path) + +if missing: + print(f"Missing '{pattern}' import in:") # noqa: T201 + for item in missing: + print(item) # noqa: T201 + sys.exit(1) diff --git a/tools/fail_if_no_c.py b/tools/fail_if_no_c.py index e6fd83a36b..2b59521c7d 100644 --- a/tools/fail_if_no_c.py +++ b/tools/fail_if_no_c.py @@ -16,12 +16,28 @@ Only really intended to be used by internal build scripts. """ +from __future__ import annotations +import os +import subprocess import sys +from pathlib import Path + sys.path[0:0] = [""] -import bson -import pymongo +import bson # noqa: E402 +import pymongo # noqa: E402 if not pymongo.has_c() or not bson.has_c(): sys.exit("could not load C extensions") + +if os.environ.get("ENSURE_UNIVERSAL2") == "1": + parent_dir = Path(pymongo.__path__[0]).parent + for pkg in ["pymongo", "bson", "grifs"]: + for so_file in Path(f"{parent_dir}/{pkg}").glob("*.so"): + print(f"Checking universal2 compatibility in {so_file}...") # noqa: T201 + output = subprocess.check_output(["file", so_file]) # noqa: S603, S607 + if "arm64" not in output.decode("utf-8"): + sys.exit("Universal wheel was not compiled with arm64 support") + if "x86_64" not in output.decode("utf-8"): + sys.exit("Universal wheel was not compiled with x86_64 support") diff --git a/tools/ocsptest.py b/tools/ocsptest.py new file mode 100644 index 0000000000..521d048f79 --- /dev/null +++ b/tools/ocsptest.py @@ -0,0 +1,61 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +from __future__ import annotations + +import argparse +import logging +import socket + +from pymongo.pyopenssl_context import SSLContext +from pymongo.ssl_support import get_ssl_context + +# Enable logs in this format: +# 2020-06-08 23:49:35,982 DEBUG ocsp_support Peer did not staple an OCSP response +FORMAT = "%(asctime)s %(levelname)s %(module)s %(message)s" +logging.basicConfig(format=FORMAT, level=logging.DEBUG) + + +def check_ocsp(host: str, port: int, capath: str) -> None: + ctx = get_ssl_context( + None, # certfile + None, # passphrase + capath, # ca_certs + None, # crlfile + False, # allow_invalid_certificates + False, # allow_invalid_hostnames + False, + ) # disable_ocsp_endpoint_check + + # Ensure we're using pyOpenSSL. + assert isinstance(ctx, SSLContext) + + s = socket.socket() + s.connect((host, port)) + try: + s = ctx.wrap_socket(s, server_hostname=host) # type: ignore[assignment] + finally: + s.close() + + +def main() -> None: + parser = argparse.ArgumentParser(description="Debug OCSP") + parser.add_argument("--host", type=str, required=True, help="Host to connect to") + parser.add_argument("-p", "--port", type=int, default=443, help="Port to connect to") + parser.add_argument("--ca_file", type=str, default=None, help="CA file for host") + args = parser.parse_args() + check_ocsp(args.host, args.port, args.ca_file) + + +if __name__ == "__main__": + main() diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000000..76c8700fef --- /dev/null +++ b/tox.ini @@ -0,0 +1,178 @@ +[tox] +requires = + tox>=4 + +envlist = + # Test using the system Python. + test, + # Test using the run-tests Evergreen script. + test-eg, + # Run pre-commit on all files. + lint, + # Run pre-commit on all files, including stages that require manual fixes. + lint-manual, + # Typecheck using mypy. + typecheck-mypy, + # Typecheck using pyright. + typecheck-pyright, + # Typecheck using pyright strict. + typecheck-pyright-strict, + # Typecheck all files. + typecheck, + # Build sphinx docs + doc, + # Test sphinx docs + doc-test, + # Linkcheck sphinx docs + linkcheck + # Check the sdist integrity. + manifest + +labels = # Use labels and -m instead of -e so that tox -m