diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 40f89e7bd4a..ba02f4308e0 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -123,6 +123,8 @@ mypy.ini @DataDog/python-guild @DataDog/apm-core-pyt ddtrace/internal/_file_queue.py @DataDog/python-guild ddtrace/internal/_unpatched.py @DataDog/python-guild ddtrace/internal/compat.py @DataDog/python-guild @DataDog/apm-core-python +ddtrace/settings/config.py @DataDog/python-guild @DataDog/apm-sdk-api-python +docs/ @DataDog/python-guild tests/utils.py @DataDog/python-guild tests/.suitespec.json @DataDog/python-guild @DataDog/apm-core-python tests/suitespec.py @DataDog/python-guild @DataDog/apm-core-python diff --git a/.gitlab/build-oci.sh b/.gitlab/build-oci.sh index e68568c4972..86d5d57135f 100755 --- a/.gitlab/build-oci.sh +++ b/.gitlab/build-oci.sh @@ -43,6 +43,7 @@ cp ../lib-injection/sitecustomize.py $BUILD_DIR/ cp auto_inject-python.version $BUILD_DIR/version cp ../min_compatible_versions.csv $BUILD_DIR/ cp ../lib-injection/telemetry-forwarder.sh $BUILD_DIR/ +chmod -R +r $BUILD_DIR chmod -R o-w $BUILD_DIR chmod -R g-w $BUILD_DIR diff --git a/.gitlab/dogfood.yml b/.gitlab/dogfood.yml index b1019b1bf24..d3ec269a1df 100644 --- a/.gitlab/dogfood.yml +++ b/.gitlab/dogfood.yml @@ -1,24 +1,7 @@ -dogfood-dogweb-failed: - stage: dogfood - tags: ["arch:amd64"] - needs: ["dogfood-dogweb-trigger"] - when: on_failure - script: - - exit 0 - -dogfood-dogweb: - stage: dogfood - tags: ["arch:amd64"] - needs: ["dogfood-dogweb-trigger"] - when: on_success - script: - - exit 0 - dogfood-dogweb-trigger: stage: dogfood trigger: project: DataDog/dogweb - strategy: depend branch: emmett.butler/ddtrace-unstable-dogfooding allow_failure: true variables: diff --git a/.riot/requirements/12e2ec4.txt b/.riot/requirements/12e2ec4.txt new file mode 100644 index 00000000000..41e1e125d5d --- /dev/null +++ b/.riot/requirements/12e2ec4.txt @@ -0,0 +1,72 @@ +# +# This file is autogenerated by pip-compile with Python 3.12 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/12e2ec4.in +# +arrow==1.3.0 +asgiref==3.8.1 +attrs==24.2.0 +autobahn==24.4.2 +automat==22.10.0 +blessed==1.20.0 +certifi==2024.7.4 +cffi==1.17.0 +channels==4.1.0 +charset-normalizer==3.3.2 +constantly==23.10.4 +coverage[toml]==7.6.1 +cryptography==43.0.0 +daphne==4.1.2 +django==4.2.15 +django-picklefield==3.2 +django-pylibmc==0.6.1 +django-q==1.3.6 +django-redis==4.5.0 +hyperlink==21.0.0 +hypothesis==6.45.0 +idna==3.7 +incremental==24.7.2 +iniconfig==2.0.0 +isodate==0.6.1 +lxml==5.3.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +platformdirs==4.2.2 +pluggy==1.5.0 +psycopg==3.2.1 +psycopg2-binary==2.9.9 +pyasn1==0.6.0 +pyasn1-modules==0.4.0 +pycparser==2.22 +pylibmc==1.6.3 +pyopenssl==24.2.1 +pytest==8.3.2 +pytest-cov==5.0.0 +pytest-django==3.10.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +python-dateutil==2.9.0.post0 +python-memcached==1.62 +pytz==2024.1 +redis==2.10.6 +requests==2.32.3 +requests-file==2.1.0 +requests-toolbelt==1.0.0 +service-identity==24.1.0 +six==1.16.0 +sortedcontainers==2.4.0 +spyne==2.14.0 +sqlparse==0.5.1 +twisted[tls]==24.7.0 +txaio==23.1.1 +types-python-dateutil==2.9.0.20240316 +typing-extensions==4.12.2 +urllib3==2.2.2 +wcwidth==0.2.13 +zeep==4.2.1 +zope-interface==7.0.1 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==72.2.0 diff --git a/.riot/requirements/138818b.txt b/.riot/requirements/138818b.txt new file mode 100644 index 00000000000..3339fff8cba --- /dev/null +++ b/.riot/requirements/138818b.txt @@ -0,0 +1,74 @@ +# +# This file is autogenerated by pip-compile with Python 3.10 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/138818b.in +# +arrow==1.3.0 +asgiref==3.8.1 +attrs==24.2.0 +autobahn==24.4.2 +automat==22.10.0 +blessed==1.20.0 +certifi==2024.7.4 +cffi==1.17.0 +channels==4.1.0 +charset-normalizer==3.3.2 +constantly==23.10.4 +coverage[toml]==7.6.1 +cryptography==43.0.0 +daphne==4.1.2 +django==4.2.15 +django-picklefield==3.2 +django-pylibmc==0.6.1 +django-q==1.3.6 +django-redis==4.5.0 +exceptiongroup==1.2.2 +hyperlink==21.0.0 +hypothesis==6.45.0 +idna==3.7 +incremental==24.7.2 +iniconfig==2.0.0 +isodate==0.6.1 +lxml==5.3.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +platformdirs==4.2.2 +pluggy==1.5.0 +psycopg==3.2.1 +psycopg2-binary==2.9.9 +pyasn1==0.6.0 +pyasn1-modules==0.4.0 +pycparser==2.22 +pylibmc==1.6.3 +pyopenssl==24.2.1 +pytest==8.3.2 +pytest-cov==5.0.0 +pytest-django==3.10.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +python-dateutil==2.9.0.post0 +python-memcached==1.62 +pytz==2024.1 +redis==2.10.6 +requests==2.32.3 +requests-file==2.1.0 +requests-toolbelt==1.0.0 +service-identity==24.1.0 +six==1.16.0 +sortedcontainers==2.4.0 +spyne==2.14.0 +sqlparse==0.5.1 +tomli==2.0.1 +twisted[tls]==24.7.0 +txaio==23.1.1 +types-python-dateutil==2.9.0.20240316 +typing-extensions==4.12.2 +urllib3==2.2.2 +wcwidth==0.2.13 +zeep==4.2.1 +zope-interface==7.0.1 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==72.2.0 diff --git a/.riot/requirements/1692fb0.txt b/.riot/requirements/15c2a69.txt similarity index 68% rename from .riot/requirements/1692fb0.txt rename to .riot/requirements/15c2a69.txt index 2e83cc45a4d..9fcafcd70b1 100644 --- a/.riot/requirements/1692fb0.txt +++ b/.riot/requirements/15c2a69.txt @@ -2,57 +2,64 @@ # This file is autogenerated by pip-compile with Python 3.7 # by the following command: # -# pip-compile --config=pyproject.toml --no-annotate --resolver=backtracking .riot/requirements/1692fb0.in +# pip-compile --allow-unsafe --no-annotate --resolver=backtracking .riot/requirements/15c2a69.in # arrow==1.2.3 asgiref==3.7.2 -attrs==23.2.0 +attrs==24.2.0 autobahn==23.1.2 automat==22.10.0 blessed==1.20.0 -certifi==2024.2.2 +cached-property==1.5.2 +certifi==2024.7.4 cffi==1.15.1 channels==4.0.0 charset-normalizer==3.3.2 constantly==15.1.0 coverage[toml]==7.2.7 -cryptography==42.0.3 +cryptography==43.0.0 daphne==4.0.0 -django==3.2.24 -django-picklefield==3.1 +django==3.2.25 +django-picklefield==3.2 django-pylibmc==0.6.1 django-q==1.3.6 django-redis==4.5.0 -exceptiongroup==1.2.0 +exceptiongroup==1.2.2 hyperlink==21.0.0 hypothesis==6.45.0 -idna==3.6 +idna==3.7 importlib-metadata==6.7.0 incremental==22.10.0 iniconfig==2.0.0 +isodate==0.6.1 +lxml==5.3.0 mock==5.1.0 opentracing==2.4.0 -packaging==23.2 +packaging==24.0 +platformdirs==4.0.0 pluggy==1.2.0 psycopg2-binary==2.9.9 pyasn1==0.5.1 pyasn1-modules==0.3.0 pycparser==2.21 pylibmc==1.6.3 -pyopenssl==24.0.0 +pyopenssl==24.2.1 pytest==7.4.4 pytest-cov==4.1.0 pytest-django==3.10.0 pytest-mock==3.11.1 pytest-randomly==3.12.0 -python-dateutil==2.8.2 +python-dateutil==2.9.0.post0 python-memcached==1.62 pytz==2024.1 redis==2.10.6 requests==2.31.0 +requests-file==2.1.0 +requests-toolbelt==1.0.0 service-identity==21.1.0 six==1.16.0 sortedcontainers==2.4.0 +spyne==2.14.0 sqlparse==0.4.4 tomli==2.0.1 twisted[tls]==23.8.0 @@ -60,8 +67,9 @@ txaio==23.1.1 typing-extensions==4.7.1 urllib3==2.0.7 wcwidth==0.2.13 +zeep==4.2.1 zipp==3.15.0 -zope-interface==6.2 +zope-interface==6.4.post2 # The following packages are considered to be unsafe in a requirements file: -# setuptools +setuptools==68.0.0 diff --git a/.riot/requirements/164c9d2.txt b/.riot/requirements/164c9d2.txt deleted file mode 100644 index 55284778172..00000000000 --- a/.riot/requirements/164c9d2.txt +++ /dev/null @@ -1,64 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.11 -# by the following command: -# -# pip-compile --no-annotate .riot/requirements/164c9d2.in -# -arrow==1.3.0 -asgiref==3.7.2 -attrs==23.2.0 -autobahn==23.6.2 -automat==22.10.0 -blessed==1.20.0 -certifi==2024.2.2 -cffi==1.16.0 -channels==4.0.0 -charset-normalizer==3.3.2 -constantly==23.10.4 -coverage[toml]==7.4.1 -cryptography==42.0.3 -daphne==4.1.0 -django==4.2.10 -django-picklefield==3.1 -django-pylibmc==0.6.1 -django-q==1.3.6 -django-redis==4.5.0 -hyperlink==21.0.0 -hypothesis==6.45.0 -idna==3.6 -incremental==22.10.0 -iniconfig==2.0.0 -mock==5.1.0 -opentracing==2.4.0 -packaging==23.2 -pluggy==1.4.0 -psycopg==3.1.18 -psycopg2-binary==2.9.9 -pyasn1==0.5.1 -pyasn1-modules==0.3.0 -pycparser==2.21 -pylibmc==1.6.3 -pyopenssl==24.0.0 -pytest==8.0.0 -pytest-cov==4.1.0 -pytest-django==3.10.0 -pytest-mock==3.12.0 -pytest-randomly==3.15.0 -python-dateutil==2.8.2 -python-memcached==1.62 -redis==2.10.6 -requests==2.31.0 -service-identity==24.1.0 -six==1.16.0 -sortedcontainers==2.4.0 -sqlparse==0.4.4 -twisted[tls]==23.10.0 -txaio==23.1.1 -types-python-dateutil==2.8.19.20240106 -typing-extensions==4.9.0 -urllib3==2.2.0 -wcwidth==0.2.13 -zope-interface==6.2 - -# The following packages are considered to be unsafe in a requirements file: -# setuptools diff --git a/.riot/requirements/173d698.txt b/.riot/requirements/173d698.txt new file mode 100644 index 00000000000..d34e2b4ec37 --- /dev/null +++ b/.riot/requirements/173d698.txt @@ -0,0 +1,76 @@ +# +# This file is autogenerated by pip-compile with Python 3.8 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate --resolver=backtracking .riot/requirements/173d698.in +# +arrow==1.3.0 +asgiref==3.8.1 +attrs==24.2.0 +autobahn==23.1.2 +automat==22.10.0 +backports-zoneinfo==0.2.1 +blessed==1.20.0 +certifi==2024.7.4 +cffi==1.17.0 +channels==4.1.0 +charset-normalizer==3.3.2 +constantly==23.10.4 +coverage[toml]==7.6.1 +cryptography==43.0.0 +daphne==4.1.2 +django==4.2.15 +django-picklefield==3.2 +django-pylibmc==0.6.1 +django-q==1.3.6 +django-redis==4.5.0 +exceptiongroup==1.2.2 +hyperlink==21.0.0 +hypothesis==6.45.0 +idna==3.7 +importlib-metadata==8.2.0 +incremental==24.7.2 +iniconfig==2.0.0 +isodate==0.6.1 +lxml==5.3.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +platformdirs==4.2.2 +pluggy==1.5.0 +psycopg2-binary==2.9.9 +pyasn1==0.6.0 +pyasn1-modules==0.4.0 +pycparser==2.22 +pylibmc==1.6.3 +pyopenssl==24.2.1 +pytest==8.3.2 +pytest-cov==5.0.0 +pytest-django==3.10.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +python-dateutil==2.9.0.post0 +python-memcached==1.62 +pytz==2024.1 +redis==2.10.6 +requests==2.32.3 +requests-file==2.1.0 +requests-toolbelt==1.0.0 +service-identity==24.1.0 +six==1.16.0 +sortedcontainers==2.4.0 +spyne==2.14.0 +sqlparse==0.5.1 +tomli==2.0.1 +twisted[tls]==24.7.0 +txaio==23.1.1 +types-python-dateutil==2.9.0.20240316 +typing-extensions==4.12.2 +urllib3==2.2.2 +wcwidth==0.2.13 +zeep==4.2.1 +zipp==3.20.0 +zope-interface==7.0.1 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==72.2.0 diff --git a/.riot/requirements/19126e6.txt b/.riot/requirements/19126e6.txt deleted file mode 100644 index dd1588c6f57..00000000000 --- a/.riot/requirements/19126e6.txt +++ /dev/null @@ -1,64 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.12 -# by the following command: -# -# pip-compile --no-annotate .riot/requirements/19126e6.in -# -arrow==1.3.0 -asgiref==3.7.2 -attrs==23.2.0 -autobahn==23.6.2 -automat==22.10.0 -blessed==1.20.0 -certifi==2024.2.2 -cffi==1.16.0 -channels==4.0.0 -charset-normalizer==3.3.2 -constantly==23.10.4 -coverage[toml]==7.4.1 -cryptography==42.0.3 -daphne==4.1.0 -django==4.2.10 -django-picklefield==3.1 -django-pylibmc==0.6.1 -django-q==1.3.6 -django-redis==4.5.0 -hyperlink==21.0.0 -hypothesis==6.45.0 -idna==3.6 -incremental==22.10.0 -iniconfig==2.0.0 -mock==5.1.0 -opentracing==2.4.0 -packaging==23.2 -pluggy==1.4.0 -psycopg==3.1.18 -psycopg2-binary==2.9.9 -pyasn1==0.5.1 -pyasn1-modules==0.3.0 -pycparser==2.21 -pylibmc==1.6.3 -pyopenssl==24.0.0 -pytest==8.0.0 -pytest-cov==4.1.0 -pytest-django==3.10.0 -pytest-mock==3.12.0 -pytest-randomly==3.15.0 -python-dateutil==2.8.2 -python-memcached==1.62 -redis==2.10.6 -requests==2.31.0 -service-identity==24.1.0 -six==1.16.0 -sortedcontainers==2.4.0 -sqlparse==0.4.4 -twisted[tls]==23.10.0 -txaio==23.1.1 -types-python-dateutil==2.8.19.20240106 -typing-extensions==4.9.0 -urllib3==2.2.0 -wcwidth==0.2.13 -zope-interface==6.2 - -# The following packages are considered to be unsafe in a requirements file: -# setuptools diff --git a/.riot/requirements/1bc3637.txt b/.riot/requirements/1bc3637.txt deleted file mode 100644 index 7e9dbb44bfc..00000000000 --- a/.riot/requirements/1bc3637.txt +++ /dev/null @@ -1,67 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --no-annotate .riot/requirements/1bc3637.in -# -arrow==1.3.0 -asgiref==3.7.2 -attrs==23.2.0 -autobahn==23.6.2 -automat==22.10.0 -blessed==1.20.0 -certifi==2024.2.2 -cffi==1.16.0 -channels==4.0.0 -charset-normalizer==3.3.2 -constantly==23.10.4 -coverage[toml]==7.4.1 -cryptography==42.0.3 -daphne==4.1.0 -django==4.2.10 -django-picklefield==3.1 -django-pylibmc==0.6.1 -django-q==1.3.6 -django-redis==4.5.0 -exceptiongroup==1.2.0 -hyperlink==21.0.0 -hypothesis==6.45.0 -idna==3.6 -importlib-metadata==7.0.1 -incremental==22.10.0 -iniconfig==2.0.0 -mock==5.1.0 -opentracing==2.4.0 -packaging==23.2 -pluggy==1.4.0 -psycopg2-binary==2.9.9 -pyasn1==0.5.1 -pyasn1-modules==0.3.0 -pycparser==2.21 -pylibmc==1.6.3 -pyopenssl==24.0.0 -pytest==8.0.0 -pytest-cov==4.1.0 -pytest-django==3.10.0 -pytest-mock==3.12.0 -pytest-randomly==3.15.0 -python-dateutil==2.8.2 -python-memcached==1.62 -redis==2.10.6 -requests==2.31.0 -service-identity==24.1.0 -six==1.16.0 -sortedcontainers==2.4.0 -sqlparse==0.4.4 -tomli==2.0.1 -twisted[tls]==23.10.0 -txaio==23.1.1 -types-python-dateutil==2.8.19.20240106 -typing-extensions==4.9.0 -urllib3==2.2.0 -wcwidth==0.2.13 -zipp==3.17.0 -zope-interface==6.2 - -# The following packages are considered to be unsafe in a requirements file: -# setuptools diff --git a/.riot/requirements/1f8c413.txt b/.riot/requirements/1f8c413.txt new file mode 100644 index 00000000000..7934f677e8f --- /dev/null +++ b/.riot/requirements/1f8c413.txt @@ -0,0 +1,76 @@ +# +# This file is autogenerated by pip-compile with Python 3.9 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate --resolver=backtracking .riot/requirements/1f8c413.in +# +arrow==1.3.0 +asgiref==3.8.1 +attrs==24.2.0 +autobahn==24.4.2 +automat==22.10.0 +blessed==1.20.0 +certifi==2024.7.4 +cffi==1.17.0 +channels==4.1.0 +charset-normalizer==3.3.2 +constantly==23.10.4 +coverage[toml]==7.6.1 +cryptography==43.0.0 +daphne==4.1.2 +django==4.2.15 +django-picklefield==3.2 +django-pylibmc==0.6.1 +django-q==1.3.6 +django-redis==4.5.0 +exceptiongroup==1.2.2 +hyperlink==21.0.0 +hypothesis==6.45.0 +idna==3.7 +importlib-metadata==8.2.0 +incremental==24.7.2 +iniconfig==2.0.0 +isodate==0.6.1 +lxml==5.3.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +platformdirs==4.2.2 +pluggy==1.5.0 +psycopg==3.2.1 +psycopg2-binary==2.9.9 +pyasn1==0.6.0 +pyasn1-modules==0.4.0 +pycparser==2.22 +pylibmc==1.6.3 +pyopenssl==24.2.1 +pytest==8.3.2 +pytest-cov==5.0.0 +pytest-django==3.10.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +python-dateutil==2.9.0.post0 +python-memcached==1.62 +pytz==2024.1 +redis==2.10.6 +requests==2.32.3 +requests-file==2.1.0 +requests-toolbelt==1.0.0 +service-identity==24.1.0 +six==1.16.0 +sortedcontainers==2.4.0 +spyne==2.14.0 +sqlparse==0.5.1 +tomli==2.0.1 +twisted[tls]==24.7.0 +txaio==23.1.1 +types-python-dateutil==2.9.0.20240316 +typing-extensions==4.12.2 +urllib3==2.2.2 +wcwidth==0.2.13 +zeep==4.2.1 +zipp==3.20.0 +zope-interface==7.0.1 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==72.2.0 diff --git a/.riot/requirements/204183a.txt b/.riot/requirements/204183a.txt deleted file mode 100644 index 7cd02b31dfa..00000000000 --- a/.riot/requirements/204183a.txt +++ /dev/null @@ -1,68 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --no-annotate .riot/requirements/204183a.in -# -arrow==1.3.0 -asgiref==3.7.2 -attrs==23.2.0 -autobahn==23.6.2 -automat==22.10.0 -blessed==1.20.0 -certifi==2024.2.2 -cffi==1.16.0 -channels==4.0.0 -charset-normalizer==3.3.2 -constantly==23.10.4 -coverage[toml]==7.4.1 -cryptography==42.0.3 -daphne==4.1.0 -django==4.2.10 -django-picklefield==3.1 -django-pylibmc==0.6.1 -django-q==1.3.6 -django-redis==4.5.0 -exceptiongroup==1.2.0 -hyperlink==21.0.0 -hypothesis==6.45.0 -idna==3.6 -importlib-metadata==7.0.1 -incremental==22.10.0 -iniconfig==2.0.0 -mock==5.1.0 -opentracing==2.4.0 -packaging==23.2 -pluggy==1.4.0 -psycopg==3.1.18 -psycopg2-binary==2.9.9 -pyasn1==0.5.1 -pyasn1-modules==0.3.0 -pycparser==2.21 -pylibmc==1.6.3 -pyopenssl==24.0.0 -pytest==8.0.0 -pytest-cov==4.1.0 -pytest-django==3.10.0 -pytest-mock==3.12.0 -pytest-randomly==3.15.0 -python-dateutil==2.8.2 -python-memcached==1.62 -redis==2.10.6 -requests==2.31.0 -service-identity==24.1.0 -six==1.16.0 -sortedcontainers==2.4.0 -sqlparse==0.4.4 -tomli==2.0.1 -twisted[tls]==23.10.0 -txaio==23.1.1 -types-python-dateutil==2.8.19.20240106 -typing-extensions==4.9.0 -urllib3==2.2.0 -wcwidth==0.2.13 -zipp==3.17.0 -zope-interface==6.2 - -# The following packages are considered to be unsafe in a requirements file: -# setuptools diff --git a/.riot/requirements/7f107f8.txt b/.riot/requirements/3e79269.txt similarity index 68% rename from .riot/requirements/7f107f8.txt rename to .riot/requirements/3e79269.txt index 0b2eba9cada..821ff542732 100644 --- a/.riot/requirements/7f107f8.txt +++ b/.riot/requirements/3e79269.txt @@ -2,57 +2,64 @@ # This file is autogenerated by pip-compile with Python 3.7 # by the following command: # -# pip-compile --config=pyproject.toml --no-annotate --resolver=backtracking .riot/requirements/7f107f8.in +# pip-compile --allow-unsafe --no-annotate --resolver=backtracking .riot/requirements/3e79269.in # arrow==1.2.3 asgiref==3.7.2 -attrs==23.2.0 +attrs==24.2.0 autobahn==23.1.2 automat==22.10.0 blessed==1.20.0 -certifi==2024.2.2 +cached-property==1.5.2 +certifi==2024.7.4 cffi==1.15.1 channels==3.0.5 charset-normalizer==3.3.2 constantly==15.1.0 coverage[toml]==7.2.7 -cryptography==42.0.3 +cryptography==43.0.0 daphne==3.0.2 -django==3.2.24 -django-picklefield==3.1 +django==3.2.25 +django-picklefield==3.2 django-pylibmc==0.6.1 django-q==1.3.6 django-redis==4.5.0 -exceptiongroup==1.2.0 +exceptiongroup==1.2.2 hyperlink==21.0.0 hypothesis==6.45.0 -idna==3.6 +idna==3.7 importlib-metadata==6.7.0 incremental==22.10.0 iniconfig==2.0.0 +isodate==0.6.1 +lxml==5.3.0 mock==5.1.0 opentracing==2.4.0 -packaging==23.2 +packaging==24.0 +platformdirs==4.0.0 pluggy==1.2.0 psycopg2-binary==2.9.9 pyasn1==0.5.1 pyasn1-modules==0.3.0 pycparser==2.21 pylibmc==1.6.3 -pyopenssl==24.0.0 +pyopenssl==24.2.1 pytest==7.4.4 pytest-cov==4.1.0 pytest-django==3.10.0 pytest-mock==3.11.1 pytest-randomly==3.12.0 -python-dateutil==2.8.2 +python-dateutil==2.9.0.post0 python-memcached==1.62 pytz==2024.1 redis==2.10.6 requests==2.31.0 +requests-file==2.1.0 +requests-toolbelt==1.0.0 service-identity==21.1.0 six==1.16.0 sortedcontainers==2.4.0 +spyne==2.14.0 sqlparse==0.4.4 tomli==2.0.1 twisted[tls]==23.8.0 @@ -60,8 +67,9 @@ txaio==23.1.1 typing-extensions==4.7.1 urllib3==2.0.7 wcwidth==0.2.13 +zeep==4.2.1 zipp==3.15.0 -zope-interface==6.2 +zope-interface==6.4.post2 # The following packages are considered to be unsafe in a requirements file: -# setuptools +setuptools==68.0.0 diff --git a/.riot/requirements/7b17c71.txt b/.riot/requirements/7b17c71.txt new file mode 100644 index 00000000000..7a7faef114e --- /dev/null +++ b/.riot/requirements/7b17c71.txt @@ -0,0 +1,77 @@ +# +# This file is autogenerated by pip-compile with Python 3.8 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate --resolver=backtracking .riot/requirements/7b17c71.in +# +arrow==1.3.0 +asgiref==3.8.1 +attrs==24.2.0 +autobahn==23.1.2 +automat==22.10.0 +backports-zoneinfo==0.2.1 +blessed==1.20.0 +certifi==2024.7.4 +cffi==1.17.0 +channels==4.1.0 +charset-normalizer==3.3.2 +constantly==23.10.4 +coverage[toml]==7.6.1 +cryptography==43.0.0 +daphne==4.1.2 +django==4.2.15 +django-picklefield==3.2 +django-pylibmc==0.6.1 +django-q==1.3.6 +django-redis==4.5.0 +exceptiongroup==1.2.2 +hyperlink==21.0.0 +hypothesis==6.45.0 +idna==3.7 +importlib-metadata==8.2.0 +incremental==24.7.2 +iniconfig==2.0.0 +isodate==0.6.1 +lxml==5.3.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +platformdirs==4.2.2 +pluggy==1.5.0 +psycopg==3.2.1 +psycopg2-binary==2.9.9 +pyasn1==0.6.0 +pyasn1-modules==0.4.0 +pycparser==2.22 +pylibmc==1.6.3 +pyopenssl==24.2.1 +pytest==8.3.2 +pytest-cov==5.0.0 +pytest-django==3.10.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +python-dateutil==2.9.0.post0 +python-memcached==1.62 +pytz==2024.1 +redis==2.10.6 +requests==2.32.3 +requests-file==2.1.0 +requests-toolbelt==1.0.0 +service-identity==24.1.0 +six==1.16.0 +sortedcontainers==2.4.0 +spyne==2.14.0 +sqlparse==0.5.1 +tomli==2.0.1 +twisted[tls]==24.7.0 +txaio==23.1.1 +types-python-dateutil==2.9.0.20240316 +typing-extensions==4.12.2 +urllib3==2.2.2 +wcwidth==0.2.13 +zeep==4.2.1 +zipp==3.20.0 +zope-interface==7.0.1 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==72.2.0 diff --git a/.riot/requirements/8d1d496.txt b/.riot/requirements/8d1d496.txt deleted file mode 100644 index c87538ebe03..00000000000 --- a/.riot/requirements/8d1d496.txt +++ /dev/null @@ -1,66 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.10 -# by the following command: -# -# pip-compile --no-annotate .riot/requirements/8d1d496.in -# -arrow==1.3.0 -asgiref==3.7.2 -attrs==23.2.0 -autobahn==23.6.2 -automat==22.10.0 -blessed==1.20.0 -certifi==2024.2.2 -cffi==1.16.0 -channels==4.0.0 -charset-normalizer==3.3.2 -constantly==23.10.4 -coverage[toml]==7.4.1 -cryptography==42.0.3 -daphne==4.1.0 -django==4.2.10 -django-picklefield==3.1 -django-pylibmc==0.6.1 -django-q==1.3.6 -django-redis==4.5.0 -exceptiongroup==1.2.0 -hyperlink==21.0.0 -hypothesis==6.45.0 -idna==3.6 -incremental==22.10.0 -iniconfig==2.0.0 -mock==5.1.0 -opentracing==2.4.0 -packaging==23.2 -pluggy==1.4.0 -psycopg==3.1.18 -psycopg2-binary==2.9.9 -pyasn1==0.5.1 -pyasn1-modules==0.3.0 -pycparser==2.21 -pylibmc==1.6.3 -pyopenssl==24.0.0 -pytest==8.0.0 -pytest-cov==4.1.0 -pytest-django==3.10.0 -pytest-mock==3.12.0 -pytest-randomly==3.15.0 -python-dateutil==2.8.2 -python-memcached==1.62 -redis==2.10.6 -requests==2.31.0 -service-identity==24.1.0 -six==1.16.0 -sortedcontainers==2.4.0 -sqlparse==0.4.4 -tomli==2.0.1 -twisted[tls]==23.10.0 -txaio==23.1.1 -types-python-dateutil==2.8.19.20240106 -typing-extensions==4.9.0 -urllib3==2.2.0 -wcwidth==0.2.13 -zope-interface==6.2 - -# The following packages are considered to be unsafe in a requirements file: -# setuptools diff --git a/.riot/requirements/9b47188.txt b/.riot/requirements/9b47188.txt new file mode 100644 index 00000000000..65654ab8459 --- /dev/null +++ b/.riot/requirements/9b47188.txt @@ -0,0 +1,75 @@ +# +# This file is autogenerated by pip-compile with Python 3.9 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate --resolver=backtracking .riot/requirements/9b47188.in +# +arrow==1.3.0 +asgiref==3.8.1 +attrs==24.2.0 +autobahn==24.4.2 +automat==22.10.0 +blessed==1.20.0 +certifi==2024.7.4 +cffi==1.17.0 +channels==4.1.0 +charset-normalizer==3.3.2 +constantly==23.10.4 +coverage[toml]==7.6.1 +cryptography==43.0.0 +daphne==4.1.2 +django==4.2.15 +django-picklefield==3.2 +django-pylibmc==0.6.1 +django-q==1.3.6 +django-redis==4.5.0 +exceptiongroup==1.2.2 +hyperlink==21.0.0 +hypothesis==6.45.0 +idna==3.7 +importlib-metadata==8.2.0 +incremental==24.7.2 +iniconfig==2.0.0 +isodate==0.6.1 +lxml==5.3.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +platformdirs==4.2.2 +pluggy==1.5.0 +psycopg2-binary==2.9.9 +pyasn1==0.6.0 +pyasn1-modules==0.4.0 +pycparser==2.22 +pylibmc==1.6.3 +pyopenssl==24.2.1 +pytest==8.3.2 +pytest-cov==5.0.0 +pytest-django==3.10.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +python-dateutil==2.9.0.post0 +python-memcached==1.62 +pytz==2024.1 +redis==2.10.6 +requests==2.32.3 +requests-file==2.1.0 +requests-toolbelt==1.0.0 +service-identity==24.1.0 +six==1.16.0 +sortedcontainers==2.4.0 +spyne==2.14.0 +sqlparse==0.5.1 +tomli==2.0.1 +twisted[tls]==24.7.0 +txaio==23.1.1 +types-python-dateutil==2.9.0.20240316 +typing-extensions==4.12.2 +urllib3==2.2.2 +wcwidth==0.2.13 +zeep==4.2.1 +zipp==3.20.0 +zope-interface==7.0.1 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==72.2.0 diff --git a/.riot/requirements/c126367.txt b/.riot/requirements/c126367.txt deleted file mode 100644 index 4b8ed876505..00000000000 --- a/.riot/requirements/c126367.txt +++ /dev/null @@ -1,68 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.8 -# by the following command: -# -# pip-compile --no-annotate .riot/requirements/c126367.in -# -arrow==1.3.0 -asgiref==3.7.2 -attrs==23.2.0 -autobahn==23.1.2 -automat==22.10.0 -backports-zoneinfo==0.2.1 -blessed==1.20.0 -certifi==2024.2.2 -cffi==1.16.0 -channels==4.0.0 -charset-normalizer==3.3.2 -constantly==23.10.4 -coverage[toml]==7.4.1 -cryptography==42.0.3 -daphne==4.1.0 -django==4.2.10 -django-picklefield==3.1 -django-pylibmc==0.6.1 -django-q==1.3.6 -django-redis==4.5.0 -exceptiongroup==1.2.0 -hyperlink==21.0.0 -hypothesis==6.45.0 -idna==3.6 -importlib-metadata==7.0.1 -incremental==22.10.0 -iniconfig==2.0.0 -mock==5.1.0 -opentracing==2.4.0 -packaging==23.2 -pluggy==1.4.0 -psycopg2-binary==2.9.9 -pyasn1==0.5.1 -pyasn1-modules==0.3.0 -pycparser==2.21 -pylibmc==1.6.3 -pyopenssl==24.0.0 -pytest==8.0.0 -pytest-cov==4.1.0 -pytest-django==3.10.0 -pytest-mock==3.12.0 -pytest-randomly==3.15.0 -python-dateutil==2.8.2 -python-memcached==1.62 -redis==2.10.6 -requests==2.31.0 -service-identity==24.1.0 -six==1.16.0 -sortedcontainers==2.4.0 -sqlparse==0.4.4 -tomli==2.0.1 -twisted[tls]==23.10.0 -txaio==23.1.1 -types-python-dateutil==2.8.19.20240106 -typing-extensions==4.9.0 -urllib3==2.2.0 -wcwidth==0.2.13 -zipp==3.17.0 -zope-interface==6.2 - -# The following packages are considered to be unsafe in a requirements file: -# setuptools diff --git a/.riot/requirements/e701565.txt b/.riot/requirements/e701565.txt new file mode 100644 index 00000000000..9068f50a4f2 --- /dev/null +++ b/.riot/requirements/e701565.txt @@ -0,0 +1,72 @@ +# +# This file is autogenerated by pip-compile with Python 3.11 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/e701565.in +# +arrow==1.3.0 +asgiref==3.8.1 +attrs==24.2.0 +autobahn==24.4.2 +automat==22.10.0 +blessed==1.20.0 +certifi==2024.7.4 +cffi==1.17.0 +channels==4.1.0 +charset-normalizer==3.3.2 +constantly==23.10.4 +coverage[toml]==7.6.1 +cryptography==43.0.0 +daphne==4.1.2 +django==4.2.15 +django-picklefield==3.2 +django-pylibmc==0.6.1 +django-q==1.3.6 +django-redis==4.5.0 +hyperlink==21.0.0 +hypothesis==6.45.0 +idna==3.7 +incremental==24.7.2 +iniconfig==2.0.0 +isodate==0.6.1 +lxml==5.3.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +platformdirs==4.2.2 +pluggy==1.5.0 +psycopg==3.2.1 +psycopg2-binary==2.9.9 +pyasn1==0.6.0 +pyasn1-modules==0.4.0 +pycparser==2.22 +pylibmc==1.6.3 +pyopenssl==24.2.1 +pytest==8.3.2 +pytest-cov==5.0.0 +pytest-django==3.10.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +python-dateutil==2.9.0.post0 +python-memcached==1.62 +pytz==2024.1 +redis==2.10.6 +requests==2.32.3 +requests-file==2.1.0 +requests-toolbelt==1.0.0 +service-identity==24.1.0 +six==1.16.0 +sortedcontainers==2.4.0 +spyne==2.14.0 +sqlparse==0.5.1 +twisted[tls]==24.7.0 +txaio==23.1.1 +types-python-dateutil==2.9.0.20240316 +typing-extensions==4.12.2 +urllib3==2.2.2 +wcwidth==0.2.13 +zeep==4.2.1 +zope-interface==7.0.1 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==72.2.0 diff --git a/.riot/requirements/f3e34f7.txt b/.riot/requirements/f3e34f7.txt deleted file mode 100644 index ca3ec955011..00000000000 --- a/.riot/requirements/f3e34f7.txt +++ /dev/null @@ -1,69 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.8 -# by the following command: -# -# pip-compile --no-annotate .riot/requirements/f3e34f7.in -# -arrow==1.3.0 -asgiref==3.7.2 -attrs==23.2.0 -autobahn==23.1.2 -automat==22.10.0 -backports-zoneinfo==0.2.1 -blessed==1.20.0 -certifi==2024.2.2 -cffi==1.16.0 -channels==4.0.0 -charset-normalizer==3.3.2 -constantly==23.10.4 -coverage[toml]==7.4.1 -cryptography==42.0.3 -daphne==4.1.0 -django==4.2.10 -django-picklefield==3.1 -django-pylibmc==0.6.1 -django-q==1.3.6 -django-redis==4.5.0 -exceptiongroup==1.2.0 -hyperlink==21.0.0 -hypothesis==6.45.0 -idna==3.6 -importlib-metadata==7.0.1 -incremental==22.10.0 -iniconfig==2.0.0 -mock==5.1.0 -opentracing==2.4.0 -packaging==23.2 -pluggy==1.4.0 -psycopg==3.1.18 -psycopg2-binary==2.9.9 -pyasn1==0.5.1 -pyasn1-modules==0.3.0 -pycparser==2.21 -pylibmc==1.6.3 -pyopenssl==24.0.0 -pytest==8.0.0 -pytest-cov==4.1.0 -pytest-django==3.10.0 -pytest-mock==3.12.0 -pytest-randomly==3.15.0 -python-dateutil==2.8.2 -python-memcached==1.62 -redis==2.10.6 -requests==2.31.0 -service-identity==24.1.0 -six==1.16.0 -sortedcontainers==2.4.0 -sqlparse==0.4.4 -tomli==2.0.1 -twisted[tls]==23.10.0 -txaio==23.1.1 -types-python-dateutil==2.8.19.20240106 -typing-extensions==4.9.0 -urllib3==2.2.0 -wcwidth==0.2.13 -zipp==3.17.0 -zope-interface==6.2 - -# The following packages are considered to be unsafe in a requirements file: -# setuptools diff --git a/ddtrace/_trace/trace_handlers.py b/ddtrace/_trace/trace_handlers.py index 3fee65b575e..198627645f1 100644 --- a/ddtrace/_trace/trace_handlers.py +++ b/ddtrace/_trace/trace_handlers.py @@ -487,10 +487,12 @@ def _on_django_finalize_response_pre(ctx, after_request_tags, request, response) def _on_django_start_response( - ctx, request, extract_body: Callable, query: str, uri: str, path: Optional[Dict[str, str]] + ctx, request, extract_body: Callable, remake_body: Callable, query: str, uri: str, path: Optional[Dict[str, str]] ): parsed_query = request.GET body = extract_body(request) + remake_body(request) + trace_utils.set_http_meta( ctx["call"], ctx["distributed_headers_config"], diff --git a/ddtrace/appsec/_ddwaf/ddwaf_types.py b/ddtrace/appsec/_ddwaf/ddwaf_types.py index ad5ce493121..d950a8eedbd 100644 --- a/ddtrace/appsec/_ddwaf/ddwaf_types.py +++ b/ddtrace/appsec/_ddwaf/ddwaf_types.py @@ -181,7 +181,7 @@ def create_without_limits(cls, struct: DDWafRulesType) -> "ddwaf_object": def struct(self) -> DDWafRulesType: """Generate a python structure from ddwaf_object""" if self.type == DDWAF_OBJ_TYPE.DDWAF_OBJ_STRING: - return self.value.stringValue.decode("UTF-8", errors="ignore") + return self.value.stringValue[: self.nbEntries].decode("UTF-8", errors="ignore") if self.type == DDWAF_OBJ_TYPE.DDWAF_OBJ_MAP: return { self.value.array[i].parameterName.decode("UTF-8", errors="ignore"): self.value.array[i].struct @@ -211,7 +211,7 @@ def __repr__(self): class ddwaf_value(ctypes.Union): _fields_ = [ - ("stringValue", ctypes.c_char_p), + ("stringValue", ctypes.POINTER(ctypes.c_char)), ("uintValue", ctypes.c_ulonglong), ("intValue", ctypes.c_longlong), ("array", ddwaf_object_p), diff --git a/ddtrace/appsec/_iast/_ast/ast_patching.py b/ddtrace/appsec/_iast/_ast/ast_patching.py index a48e5580681..9760e936f0b 100644 --- a/ddtrace/appsec/_iast/_ast/ast_patching.py +++ b/ddtrace/appsec/_iast/_ast/ast_patching.py @@ -39,6 +39,7 @@ "ddsketch", "ddtrace", "encodings", # this package is used to load encodings when a module is imported, propagation is not needed + "encodings.idna", "envier", "exceptiongroup", "freezegun", # Testing utilities for time manipulation diff --git a/ddtrace/appsec/_iast/_taint_tracking/Utils/StringUtils.cpp b/ddtrace/appsec/_iast/_taint_tracking/Utils/StringUtils.cpp index 589bd7c18df..1d41f71e1e4 100644 --- a/ddtrace/appsec/_iast/_taint_tracking/Utils/StringUtils.cpp +++ b/ddtrace/appsec/_iast/_taint_tracking/Utils/StringUtils.cpp @@ -78,31 +78,65 @@ PyObjectToString(PyObject* obj) PyObject* new_pyobject_id(PyObject* tainted_object) { + if (!tainted_object) + return nullptr; + if (PyUnicode_Check(tainted_object)) { PyObject* empty_unicode = PyUnicode_New(0, 127); + if (!empty_unicode) + return tainted_object; PyObject* val = Py_BuildValue("(OO)", tainted_object, empty_unicode); + if (!val) { + Py_XDECREF(empty_unicode); + return tainted_object; + } PyObject* result = PyUnicode_Join(empty_unicode, val); - Py_DecRef(empty_unicode); - Py_DecRef(val); + if (!result) { + result = tainted_object; + } + Py_XDECREF(empty_unicode); + Py_XDECREF(val); return result; } if (PyBytes_Check(tainted_object)) { PyObject* empty_bytes = PyBytes_FromString(""); + if (!empty_bytes) + return tainted_object; + const auto bytes_join_ptr = py::reinterpret_borrow(empty_bytes).attr("join"); const auto val = Py_BuildValue("(OO)", tainted_object, empty_bytes); + if (!val or !bytes_join_ptr.ptr()) { + Py_XDECREF(empty_bytes); + return tainted_object; + } + const auto res = PyObject_CallFunctionObjArgs(bytes_join_ptr.ptr(), val, NULL); - Py_DecRef(val); - Py_DecRef(empty_bytes); + Py_XDECREF(val); + Py_XDECREF(empty_bytes); return res; } else if (PyByteArray_Check(tainted_object)) { PyObject* empty_bytes = PyBytes_FromString(""); + if (!empty_bytes) + return tainted_object; + PyObject* empty_bytearray = PyByteArray_FromObject(empty_bytes); + if (!empty_bytearray) { + Py_XDECREF(empty_bytes); + return tainted_object; + } + const auto bytearray_join_ptr = py::reinterpret_borrow(empty_bytearray).attr("join"); const auto val = Py_BuildValue("(OO)", tainted_object, empty_bytearray); + if (!val or !bytearray_join_ptr.ptr()) { + Py_XDECREF(empty_bytes); + Py_XDECREF(empty_bytearray); + return tainted_object; + } + const auto res = PyObject_CallFunctionObjArgs(bytearray_join_ptr.ptr(), val, NULL); - Py_DecRef(val); - Py_DecRef(empty_bytes); - Py_DecRef(empty_bytearray); + Py_XDECREF(val); + Py_XDECREF(empty_bytes); + Py_XDECREF(empty_bytearray); return res; } return tainted_object; diff --git a/ddtrace/contrib/django/patch.py b/ddtrace/contrib/django/patch.py index 0b94a4e3fed..34a41ba1d3d 100644 --- a/ddtrace/contrib/django/patch.py +++ b/ddtrace/contrib/django/patch.py @@ -527,7 +527,9 @@ def blocked_response(): except Exception: path = None - core.dispatch("django.start_response", (ctx, request, utils._extract_body, query, uri, path)) + core.dispatch( + "django.start_response", (ctx, request, utils._extract_body, utils._remake_body, query, uri, path) + ) core.dispatch("django.start_response.post", ("Django",)) if core.get_item(HTTP_REQUEST_BLOCKED): diff --git a/ddtrace/contrib/django/utils.py b/ddtrace/contrib/django/utils.py index ed38328a4c6..eaec377d7bc 100644 --- a/ddtrace/contrib/django/utils.py +++ b/ddtrace/contrib/django/utils.py @@ -1,3 +1,4 @@ +import io import json from typing import Any # noqa:F401 from typing import Dict # noqa:F401 @@ -286,6 +287,19 @@ def _extract_body(request): return req_body +def _remake_body(request): + # some libs that utilize django (Spyne) require the body stream to be unread or else will throw errors + # see: https://github.com/arskom/spyne/blob/f105ec2f41495485fef1211fe73394231b3f76e5/spyne/server/wsgi.py#L538 + if request.method in _BODY_METHODS: + try: + unread_body = io.BytesIO(request._body) + if unread_body.seekable(): + unread_body.seek(0) + request.META["wsgi.input"] = unread_body + except Exception: + log.debug("Failed to remake Django request body", exc_info=True) + + def _get_request_headers(request): # type: (Any) -> Mapping[str, str] if DJANGO22: diff --git a/ddtrace/contrib/openai/patch.py b/ddtrace/contrib/openai/patch.py index 5e3bf2caead..a610cf55771 100644 --- a/ddtrace/contrib/openai/patch.py +++ b/ddtrace/contrib/openai/patch.py @@ -294,7 +294,12 @@ async def patched_endpoint(func, args, kwargs): raise finally: try: - g.send((resp, err)) + if resp is not None: + # openai responses cannot be None + # if resp is None, it is likely because the context + # of the request was cancelled, so we want that to propagate up properly + # see: https://github.com/DataDog/dd-trace-py/issues/10191 + g.send((resp, err)) except StopIteration as e: if err is None: # This return takes priority over `return resp` diff --git a/ddtrace/contrib/redis/patch.py b/ddtrace/contrib/redis/patch.py index 44bb79c91b4..d63eadf1135 100644 --- a/ddtrace/contrib/redis/patch.py +++ b/ddtrace/contrib/redis/patch.py @@ -125,6 +125,7 @@ def _run_redis_command(ctx: core.ExecutionContext, func, args, kwargs): parsed_command = stringify_cache_args(args) redis_command = parsed_command.split(" ")[0] rowcount = None + result = None try: result = func(*args, **kwargs) return result diff --git a/ddtrace/contrib/tornado/handlers.py b/ddtrace/contrib/tornado/handlers.py index 04877dde2d5..a16ec4e993c 100644 --- a/ddtrace/contrib/tornado/handlers.py +++ b/ddtrace/contrib/tornado/handlers.py @@ -58,7 +58,8 @@ def execute(func, handler, args, kwargs): request_span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, settings.get("analytics_sample_rate", True)) http_route = _find_route(handler.application.default_router.rules, handler.request) - request_span.set_tag_str("http.route", http_route) + if http_route is not None and isinstance(http_route, str): + request_span.set_tag_str("http.route", http_route) setattr(handler.request, REQUEST_SPAN_KEY, request_span) return func(*args, **kwargs) diff --git a/ddtrace/internal/ci_visibility/recorder.py b/ddtrace/internal/ci_visibility/recorder.py index 91d4aa7d89c..6a3121b35e0 100644 --- a/ddtrace/internal/ci_visibility/recorder.py +++ b/ddtrace/internal/ci_visibility/recorder.py @@ -3,6 +3,7 @@ import json import os from pathlib import Path +import re import socket from typing import TYPE_CHECKING # noqa:F401 from typing import NamedTuple # noqa:F401 @@ -102,12 +103,17 @@ class CIVisibilityAuthenticationException(Exception): pass -def _extract_repository_name_from_url(repository_url): - # type: (str) -> str +def _extract_repository_name_from_url(repository_url: str) -> str: + _REPO_NAME_REGEX = r".*/(?P.*?)(\.git)?$" + try: - return parse.urlparse(repository_url).path.rstrip(".git").rpartition("/")[-1] + url_path = parse.urlparse(repository_url).path + matches = re.match(_REPO_NAME_REGEX, url_path, flags=re.IGNORECASE) + if matches: + return matches.group("repo_name") + log.warning("Cannot extract repository name from unexpected URL path: %s", url_path) + return repository_url except ValueError: - # In case of parsing error, default to repository url log.warning("Repository name cannot be parsed from repository_url: %s", repository_url) return repository_url diff --git a/ddtrace/internal/core/__init__.py b/ddtrace/internal/core/__init__.py index 0fe4cb1b9ef..249e099c7f2 100644 --- a/ddtrace/internal/core/__init__.py +++ b/ddtrace/internal/core/__init__.py @@ -100,6 +100,7 @@ def _on_jsonify_context_started_flask(ctx): The names of these events follow the pattern ``context.[started|ended].``. """ + from contextlib import contextmanager import logging import sys @@ -115,7 +116,6 @@ def _on_jsonify_context_started_flask(ctx): from ..utils.deprecations import DDTraceDeprecationWarning from . import event_hub # noqa:F401 -from ._core import RateLimiter # noqa:F401 from .event_hub import EventResultDict # noqa:F401 from .event_hub import dispatch from .event_hub import dispatch_with_results # noqa:F401 diff --git a/ddtrace/internal/core/_core.pyi b/ddtrace/internal/core/_core.pyi index 48ec6baf707..e69de29bb2d 100644 --- a/ddtrace/internal/core/_core.pyi +++ b/ddtrace/internal/core/_core.pyi @@ -1,49 +0,0 @@ -import typing - -class RateLimiter: - """ - A token bucket rate limiter implementation - """ - - rate_limit: int - time_window: float - effective_rate: float - current_window_rate: float - prev_window_rate: typing.Optional[float] - tokens: float - max_tokens: float - tokens_allowed: int - tokens_total: int - last_update_ns: float - current_window_ns: float - - def __init__(self, rate_limit: int, time_window: float = 1e9): - """ - Constructor for RateLimiter - - :param rate_limit: The rate limit to apply for number of requests per second. - rate limit > 0 max number of requests to allow per second, - rate limit == 0 to disallow all requests, - rate limit < 0 to allow all requests - :type rate_limit: :obj:`int` - :param time_window: The time window where the rate limit applies in nanoseconds. default value is 1 second. - :type time_window: :obj:`float` - """ - def is_allowed(self, timestamp_ns: typing.Optional[int] = None) -> bool: - """ - Check whether the current request is allowed or not - - This method will also reduce the number of available tokens by 1 - - :param int timestamp_ns: timestamp in nanoseconds for the current request. [deprecated] - :returns: Whether the current request is allowed or not - :rtype: :obj:`bool` - """ - def _is_allowed(self, timestamp_ns: int) -> bool: - """ - Internal method to check whether the current request is allowed or not - - :param int timestamp_ns: timestamp in nanoseconds for the current request. - :returns: Whether the current request is allowed or not - :rtype: :obj:`bool` - """ diff --git a/ddtrace/internal/datadog/profiling/ddup/__init__.py b/ddtrace/internal/datadog/profiling/ddup/__init__.py index 32bd273c5a4..1a1c9ebe7a4 100644 --- a/ddtrace/internal/datadog/profiling/ddup/__init__.py +++ b/ddtrace/internal/datadog/profiling/ddup/__init__.py @@ -1,4 +1,9 @@ -from .utils import sanitize_string # noqa: F401 +# This module supports an optional feature. It may not even load on all platforms or configurations. +# In ddtrace/settings/profiling.py, this module is imported and the is_available attribute is checked to determine +# whether the feature is available. If not, then the feature is disabled and all downstream consumption is +# suppressed. +is_available = False +failure_msg = "" try: @@ -7,89 +12,4 @@ is_available = True except Exception as e: - from typing import Dict # noqa:F401 - from typing import Optional # noqa:F401 - - from ddtrace.internal.logger import get_logger - - LOG = get_logger(__name__) - LOG.debug("Failed to import _ddup: %s", e) - - is_available = False - - # Decorator for not-implemented - def not_implemented(func): - def wrapper(*args, **kwargs): - raise NotImplementedError("{} is not implemented on this platform".format(func.__name__)) - - @not_implemented - def init( - env, # type: Optional[str] - service, # type: Optional[str] - version, # type: Optional[str] - tags, # type: Optional[Dict[str, str]] - max_nframes, # type: Optional[int] - url, # type: Optional[str] - ): - pass - - @not_implemented - def upload(): # type: () -> None - pass - - class SampleHandle: - @not_implemented - def push_cputime(self, value, count): # type: (int, int) -> None - pass - - @not_implemented - def push_walltime(self, value, count): # type: (int, int) -> None - pass - - @not_implemented - def push_acquire(self, value, count): # type: (int, int) -> None - pass - - @not_implemented - def push_release(self, value, count): # type: (int, int) -> None - pass - - @not_implemented - def push_alloc(self, value, count): # type: (int, int) -> None - pass - - @not_implemented - def push_heap(self, value): # type: (int) -> None - pass - - @not_implemented - def push_lock_name(self, lock_name): # type: (str) -> None - pass - - @not_implemented - def push_frame(self, name, filename, address, line): # type: (str, str, int, int) -> None - pass - - @not_implemented - def push_threadinfo(self, thread_id, thread_native_id, thread_name): # type: (int, int, Optional[str]) -> None - pass - - @not_implemented - def push_taskinfo(self, task_id, task_name): # type: (int, str) -> None - pass - - @not_implemented - def push_exceptioninfo(self, exc_type, count): # type: (type, int) -> None - pass - - @not_implemented - def push_class_name(self, class_name): # type: (str) -> None - pass - - @not_implemented - def push_span(self, span, endpoint_collection_enabled): # type: (Optional[Span], bool) -> None - pass - - @not_implemented - def flush_sample(self): # type: () -> None - pass + failure_msg = str(e) diff --git a/ddtrace/internal/datadog/profiling/ddup/_ddup.pyx b/ddtrace/internal/datadog/profiling/ddup/_ddup.pyx index a80ae0a8d81..4179bbda283 100644 --- a/ddtrace/internal/datadog/profiling/ddup/_ddup.pyx +++ b/ddtrace/internal/datadog/profiling/ddup/_ddup.pyx @@ -280,10 +280,10 @@ cdef class SampleHandle: span_type_bytes = ensure_binary_or_empty(span._local_root.span_type) ddup_push_trace_type(self.ptr, string_view(span_type_bytes, len(span_type_bytes))) if endpoint_collection_enabled: - root_service_bytes = ensure_binary_or_empty(span._local_root.service) + root_resource_bytes = ensure_binary_or_empty(span._local_root.resource) ddup_push_trace_resource_container( self.ptr, - string_view(root_service_bytes, len(root_service_bytes)) + string_view(root_resource_bytes, len(root_resource_bytes)) ) def push_monotonic_ns(self, monotonic_ns: int) -> None: diff --git a/ddtrace/internal/datadog/profiling/stack_v2/__init__.py b/ddtrace/internal/datadog/profiling/stack_v2/__init__.py index 8a8484e6950..399906e115d 100644 --- a/ddtrace/internal/datadog/profiling/stack_v2/__init__.py +++ b/ddtrace/internal/datadog/profiling/stack_v2/__init__.py @@ -1,34 +1,13 @@ +# See ../ddup/__init__.py for some discussion on the is_available attribute. +# This component is also loaded in ddtrace/settings/profiling.py is_available = False - - -# Decorator for not-implemented -def not_implemented(func): - def wrapper(*args, **kwargs): - raise NotImplementedError("{} is not implemented on this platform".format(func.__name__)) - - -@not_implemented -def start(*args, **kwargs): - pass - - -@not_implemented -def stop(*args, **kwargs): - pass - - -@not_implemented -def set_interval(*args, **kwargs): - pass +failure_msg = "" try: - from ._stack_v2 import * # noqa: F401, F403 + from ._stack_v2 import * # noqa: F403, F401 is_available = True -except Exception as e: - from ddtrace.internal.logger import get_logger - LOG = get_logger(__name__) - - LOG.debug("Failed to import _stack_v2: %s", e) +except Exception as e: + failure_msg = str(e) diff --git a/ddtrace/internal/rate_limiter.py b/ddtrace/internal/rate_limiter.py index cb25e3ed12f..cf04aa8bde8 100644 --- a/ddtrace/internal/rate_limiter.py +++ b/ddtrace/internal/rate_limiter.py @@ -13,24 +13,174 @@ from ..internal import compat from ..internal.constants import DEFAULT_SAMPLING_RATE_LIMIT -from .core import RateLimiter as _RateLimiter -class RateLimiter(_RateLimiter): +class RateLimiter(object): + """ + A token bucket rate limiter implementation + """ + + __slots__ = ( + "_lock", + "current_window_ns", + "time_window", + "last_update_ns", + "max_tokens", + "prev_window_rate", + "rate_limit", + "tokens", + "tokens_allowed", + "tokens_total", + ) + + def __init__(self, rate_limit: int, time_window: float = 1e9): + """ + Constructor for RateLimiter + + :param rate_limit: The rate limit to apply for number of requests per second. + rate limit > 0 max number of requests to allow per second, + rate limit == 0 to disallow all requests, + rate limit < 0 to allow all requests + :type rate_limit: :obj:`int` + :param time_window: The time window where the rate limit applies in nanoseconds. default value is 1 second. + :type time_window: :obj:`float` + """ + self.rate_limit = rate_limit + self.time_window = time_window + self.tokens = rate_limit # type: float + self.max_tokens = rate_limit + + self.last_update_ns = compat.monotonic_ns() + + self.current_window_ns = 0 # type: float + self.tokens_allowed = 0 + self.tokens_total = 0 + self.prev_window_rate = None # type: Optional[float] + + self._lock = threading.Lock() + @property def _has_been_configured(self): return self.rate_limit != DEFAULT_SAMPLING_RATE_LIMIT def is_allowed(self, timestamp_ns: Optional[int] = None) -> bool: + """ + Check whether the current request is allowed or not + + This method will also reduce the number of available tokens by 1 + + :param int timestamp_ns: timestamp in nanoseconds for the current request. + :returns: Whether the current request is allowed or not + :rtype: :obj:`bool` + """ if timestamp_ns is not None: deprecate( "The `timestamp_ns` parameter is deprecated and will be removed in a future version." "Ratelimiter will use the current time.", category=DDTraceDeprecationWarning, ) + # rate limits are tested and mocked in pytest so we need to compute the timestamp here # (or move the unit tests to rust) - return self._is_allowed(compat.monotonic_ns()) + timestamp_ns = timestamp_ns or compat.monotonic_ns() + allowed = self._is_allowed(timestamp_ns) + # Update counts used to determine effective rate + self._update_rate_counts(allowed, timestamp_ns) + return allowed + + def _update_rate_counts(self, allowed: bool, timestamp_ns: int) -> None: + # No tokens have been seen yet, start a new window + if not self.current_window_ns: + self.current_window_ns = timestamp_ns + + # If more time than the configured time window + # has past since last window, reset + # DEV: We are comparing nanoseconds, so 1e9 is 1 second + elif timestamp_ns - self.current_window_ns >= self.time_window: + # Store previous window's rate to average with current for `.effective_rate` + self.prev_window_rate = self._current_window_rate() + self.tokens_allowed = 0 + self.tokens_total = 0 + self.current_window_ns = timestamp_ns + + # Keep track of total tokens seen vs allowed + if allowed: + self.tokens_allowed += 1 + self.tokens_total += 1 + + def _is_allowed(self, timestamp_ns: int) -> bool: + # Rate limit of 0 blocks everything + if self.rate_limit == 0: + return False + + # Negative rate limit disables rate limiting + elif self.rate_limit < 0: + return True + + # Lock, we need this to be thread safe, it should be shared by all threads + with self._lock: + self._replenish(timestamp_ns) + + if self.tokens >= 1: + self.tokens -= 1 + return True + + return False + + def _replenish(self, timestamp_ns: int) -> None: + try: + # If we are at the max, we do not need to add any more + if self.tokens == self.max_tokens: + return + + # Add more available tokens based on how much time has passed + # DEV: We store as nanoseconds, convert to seconds + elapsed = (timestamp_ns - self.last_update_ns) / self.time_window + finally: + # always update the timestamp + # we can't update at the beginning of the function, since if we did, our calculation for + # elapsed would be incorrect + self.last_update_ns = timestamp_ns + + # Update the number of available tokens, but ensure we do not exceed the max + self.tokens = min( + self.max_tokens, + self.tokens + (elapsed * self.rate_limit), + ) + + def _current_window_rate(self) -> float: + # No tokens have been seen, effectively 100% sample rate + # DEV: This is to avoid division by zero error + if not self.tokens_total: + return 1.0 + + # Get rate of tokens allowed + return self.tokens_allowed / self.tokens_total + + @property + def effective_rate(self) -> float: + """ + Return the effective sample rate of this rate limiter + + :returns: Effective sample rate value 0.0 <= rate <= 1.0 + :rtype: :obj:`float`` + """ + # If we have not had a previous window yet, return current rate + if self.prev_window_rate is None: + return self._current_window_rate() + + return (self._current_window_rate() + self.prev_window_rate) / 2.0 + + def __repr__(self): + return "{}(rate_limit={!r}, tokens={!r}, last_update_ns={!r}, effective_rate={!r})".format( + self.__class__.__name__, + self.rate_limit, + self.tokens, + self.last_update_ns, + self.effective_rate, + ) + + __str__ = __repr__ class RateLimitExceeded(Exception): diff --git a/ddtrace/internal/telemetry/writer.py b/ddtrace/internal/telemetry/writer.py index 1e167b085fd..c5834abbecb 100644 --- a/ddtrace/internal/telemetry/writer.py +++ b/ddtrace/internal/telemetry/writer.py @@ -121,7 +121,7 @@ def _get_agentless_telemetry_url(site: str): if site == "datad0g.com": return "https://all-http-intake.logs.datad0g.com" if site == "datadoghq.eu": - return "https://instrumentation-telemetry-intake.eu1.datadoghq.com" + return "https://instrumentation-telemetry-intake.datadoghq.eu" return f"https://instrumentation-telemetry-intake.{site}/" diff --git a/ddtrace/llmobs/_integrations/openai.py b/ddtrace/llmobs/_integrations/openai.py index a23456a35d9..2bd84764bd1 100644 --- a/ddtrace/llmobs/_integrations/openai.py +++ b/ddtrace/llmobs/_integrations/openai.py @@ -94,11 +94,13 @@ def _logs_tags(cls, span: Span) -> str: @classmethod def _metrics_tags(cls, span: Span) -> List[str]: + model_name = span.get_tag("openai.request.model") or "" tags = [ "version:%s" % (config.version or ""), "env:%s" % (config.env or ""), "service:%s" % (span.service or ""), - "openai.request.model:%s" % (span.get_tag("openai.request.model") or ""), + "openai.request.model:%s" % model_name, + "model:%s" % model_name, "openai.request.endpoint:%s" % (span.get_tag("openai.request.endpoint") or ""), "openai.request.method:%s" % (span.get_tag("openai.request.method") or ""), "openai.organization.id:%s" % (span.get_tag("openai.organization.id") or ""), diff --git a/ddtrace/opentelemetry/_span.py b/ddtrace/opentelemetry/_span.py index f4cf456bfc7..bd2b0dfe83b 100644 --- a/ddtrace/opentelemetry/_span.py +++ b/ddtrace/opentelemetry/_span.py @@ -10,6 +10,7 @@ from opentelemetry.trace.span import TraceState from ddtrace import config +from ddtrace import tracer as ddtracer from ddtrace.constants import ERROR_MSG from ddtrace.constants import ERROR_STACK from ddtrace.constants import ERROR_TYPE @@ -136,13 +137,27 @@ def kind(self): def get_span_context(self): # type: () -> SpanContext """Returns an OpenTelemetry SpanContext""" - ts = None - tf = TraceFlags.DEFAULT - if self._ddspan.context: - ts_str = w3c_tracestate_add_p(self._ddspan.context._tracestate, self._ddspan.span_id) - ts = TraceState.from_header([ts_str]) - if self._ddspan.context.sampling_priority and self._ddspan.context.sampling_priority > 0: - tf = TraceFlags.SAMPLED + if self._ddspan.context.sampling_priority is None: + # With the introduction of lazy sampling, spans are now sampled on serialization. With this change + # a spans trace flags could be propagated before a sampling + # decision is made. Since the default sampling decision is to unsample spans this can result + # in missing spans. To resolve this issue, a sampling decision must be made the first time + # the span context is accessed. + ddtracer.sample(self._ddspan._local_root or self._ddspan) + + if self._ddspan.context.sampling_priority is None: + tf = TraceFlags.DEFAULT + log.warning( + "Span context is missing a sampling decision, defaulting to unsampled: %s", str(self._ddspan.context) + ) + elif self._ddspan.context.sampling_priority > 0: + tf = TraceFlags.SAMPLED + else: + tf = TraceFlags.DEFAULT + + # Evaluate the tracestate header after the sampling decision has been made + ts_str = w3c_tracestate_add_p(self._ddspan.context._tracestate, self._ddspan.span_id) + ts = TraceState.from_header([ts_str]) return SpanContext(self._ddspan.trace_id, self._ddspan.span_id, False, tf, ts) diff --git a/ddtrace/profiling/collector/_lock.py b/ddtrace/profiling/collector/_lock.py index dbd1f5a4c98..43476dd8faa 100644 --- a/ddtrace/profiling/collector/_lock.py +++ b/ddtrace/profiling/collector/_lock.py @@ -4,6 +4,7 @@ import abc import os.path import sys +import types import typing import attr @@ -90,7 +91,8 @@ def __init__( self._self_export_libdd_enabled = export_libdd_enabled frame = sys._getframe(2 if WRAPT_C_EXT else 3) code = frame.f_code - self._self_name = "%s:%d" % (os.path.basename(code.co_filename), frame.f_lineno) + self._self_init_loc = "%s:%d" % (os.path.basename(code.co_filename), frame.f_lineno) + self._self_name: typing.Optional[str] = None def __aenter__(self): return self.__wrapped__.__aenter__() @@ -110,9 +112,13 @@ def _acquire(self, inner_func, *args, **kwargs): end = self._self_acquired_at = compat.monotonic_ns() thread_id, thread_name = _current_thread() task_id, task_name, task_frame = _task.get_task(thread_id) + self._maybe_update_self_name() + lock_name = "%s:%s" % (self._self_init_loc, self._self_name) if self._self_name else self._self_init_loc if task_frame is None: - frame = sys._getframe(1) + # If we can't get the task frame, we use the caller frame. We expect acquire/release or + # __enter__/__exit__ to be on the stack, so we go back 2 frames. + frame = sys._getframe(2) else: frame = task_frame @@ -123,7 +129,7 @@ def _acquire(self, inner_func, *args, **kwargs): handle = ddup.SampleHandle() handle.push_monotonic_ns(end) - handle.push_lock_name(self._self_name) + handle.push_lock_name(lock_name) handle.push_acquire(end - start, 1) # AFAICT, capture_pct does not adjust anything here handle.push_threadinfo(thread_id, thread_native_id, thread_name) handle.push_task_id(task_id) @@ -136,7 +142,7 @@ def _acquire(self, inner_func, *args, **kwargs): handle.flush_sample() else: event = self.ACQUIRE_EVENT_CLASS( - lock_name=self._self_name, + lock_name=lock_name, frames=frames, nframes=nframes, thread_id=thread_id, @@ -169,9 +175,13 @@ def _release(self, inner_func, *args, **kwargs): end = compat.monotonic_ns() thread_id, thread_name = _current_thread() task_id, task_name, task_frame = _task.get_task(thread_id) + lock_name = ( + "%s:%s" % (self._self_init_loc, self._self_name) if self._self_name else self._self_init_loc + ) if task_frame is None: - frame = sys._getframe(1) + # See the comments in _acquire + frame = sys._getframe(2) else: frame = task_frame @@ -182,7 +192,7 @@ def _release(self, inner_func, *args, **kwargs): handle = ddup.SampleHandle() handle.push_monotonic_ns(end) - handle.push_lock_name(self._self_name) + handle.push_lock_name(lock_name) handle.push_release( end - self._self_acquired_at, 1 ) # AFAICT, capture_pct does not adjust anything here @@ -199,7 +209,7 @@ def _release(self, inner_func, *args, **kwargs): handle.flush_sample() else: event = self.RELEASE_EVENT_CLASS( - lock_name=self._self_name, + lock_name=lock_name, frames=frames, nframes=nframes, thread_id=thread_id, @@ -233,6 +243,50 @@ def __enter__(self, *args, **kwargs): def __exit__(self, *args, **kwargs): self._release(self.__wrapped__.__exit__, *args, **kwargs) + def _find_self_name(self, var_dict: typing.Dict): + for name, value in var_dict.items(): + if name.startswith("__") or isinstance(value, types.ModuleType): + continue + if value is self: + return name + if config.lock.name_inspect_dir: + for attribute in dir(value): + if not attribute.startswith("__") and getattr(value, attribute) is self: + self._self_name = attribute + return attribute + return None + + # Get lock acquire/release call location and variable name the lock is assigned to + def _maybe_update_self_name(self): + if self._self_name: + return + try: + # We expect the call stack to be like this: + # 0: this + # 1: _acquire/_release + # 2: acquire/release (or __enter__/__exit__) + # 3: caller frame + if config.enable_asserts: + frame = sys._getframe(1) + if frame.f_code.co_name not in {"_acquire", "_release"}: + raise AssertionError("Unexpected frame %s" % frame.f_code.co_name) + frame = sys._getframe(2) + if frame.f_code.co_name not in {"acquire", "release", "__enter__", "__exit__"}: + raise AssertionError("Unexpected frame %s" % frame.f_code.co_name) + frame = sys._getframe(3) + + # First, look at the local variables of the caller frame, and then the global variables + self._self_name = self._find_self_name(frame.f_locals) or self._find_self_name(frame.f_globals) + + if not self._self_name: + self._self_name = "" + LOG.warning( + "Failed to get lock variable name, we only support local/global variables and their attributes." + ) + + except Exception as e: + LOG.warning("Error getting lock acquire/release call location and variable name: %s", e) + class FunctionWrapper(wrapt.FunctionWrapper): # Override the __get__ method: whatever happens, _allocate_lock is always considered by Python like a "static" diff --git a/ddtrace/profiling/collector/stack.pyx b/ddtrace/profiling/collector/stack.pyx index 6164f477191..9a3f1f32838 100644 --- a/ddtrace/profiling/collector/stack.pyx +++ b/ddtrace/profiling/collector/stack.pyx @@ -478,7 +478,7 @@ class StackCollector(collector.PeriodicCollector): _thread_time = attr.ib(init=False, repr=False, eq=False) _last_wall_time = attr.ib(init=False, repr=False, eq=False, type=int) _thread_span_links = attr.ib(default=None, init=False, repr=False, eq=False) - _stack_collector_v2_enabled = attr.ib(type=bool, default=config.stack.v2.enabled) + _stack_collector_v2_enabled = attr.ib(type=bool, default=config.stack.v2_enabled) @max_time_usage_pct.validator def _check_max_time_usage(self, attribute, value): @@ -497,7 +497,7 @@ class StackCollector(collector.PeriodicCollector): if config.export.libdd_enabled: set_use_libdd(True) - # If at the end of things, stack v2 is still enabled, then start the native thread running the v2 sampler + # If stack v2 is enabled, then use the v2 sampler if self._stack_collector_v2_enabled: LOG.debug("Starting the stack v2 sampler") stack_v2.start() diff --git a/ddtrace/profiling/profiler.py b/ddtrace/profiling/profiler.py index acd16b68469..7341976e012 100644 --- a/ddtrace/profiling/profiler.py +++ b/ddtrace/profiling/profiler.py @@ -116,6 +116,7 @@ class _ProfilerInstance(service.Service): agentless = attr.ib(type=bool, default=config.agentless) _memory_collector_enabled = attr.ib(type=bool, default=config.memory.enabled) _stack_collector_enabled = attr.ib(type=bool, default=config.stack.enabled) + _stack_v2_enabled = attr.ib(type=bool, default=config.stack.v2_enabled) _lock_collector_enabled = attr.ib(type=bool, default=config.lock.enabled) enable_code_provenance = attr.ib(type=bool, default=config.code_provenance) endpoint_collection_enabled = attr.ib(type=bool, default=config.endpoint_collection) @@ -128,7 +129,6 @@ class _ProfilerInstance(service.Service): init=False, factory=lambda: os.environ.get("AWS_LAMBDA_FUNCTION_NAME"), type=Optional[str] ) _export_libdd_enabled = attr.ib(type=bool, default=config.export.libdd_enabled) - _export_libdd_required = attr.ib(type=bool, default=config.export.libdd_required) ENDPOINT_TEMPLATE = "https://intake.profile.{}" @@ -171,16 +171,10 @@ def _build_default_exporters(self): if self._lambda_function_name is not None: self.tags.update({"functionname": self._lambda_function_name}) - # Did the user request the libdd collector? Better log it. - if self._export_libdd_enabled: - LOG.debug("The libdd collector is enabled") - if self._export_libdd_required: - LOG.debug("The libdd collector is required") - # Build the list of enabled Profiling features and send along as a tag configured_features = [] if self._stack_collector_enabled: - if config.stack.v2.enabled: + if self._stack_v2_enabled: configured_features.append("stack_v2") else: configured_features.append("stack") @@ -195,8 +189,6 @@ def _build_default_exporters(self): configured_features.append("exp_dd") else: configured_features.append("exp_py") - if self._export_libdd_required: - configured_features.append("req_dd") configured_features.append("CAP" + str(config.capture_pct)) configured_features.append("MAXF" + str(config.max_frames)) self.tags.update({"profiler_config": "_".join(configured_features)}) @@ -207,7 +199,6 @@ def _build_default_exporters(self): # If libdd is enabled, then # * If initialization fails, disable the libdd collector and fall back to the legacy exporter - # * If initialization fails and libdd is required, disable everything and return (error) if self._export_libdd_enabled: try: ddup.init( @@ -225,16 +216,11 @@ def _build_default_exporters(self): self._export_libdd_enabled = False config.export.libdd_enabled = False - # If we're here and libdd was required, then there's nothing else to do. We don't have a - # collector. - if self._export_libdd_required: - LOG.error("libdd collector is required but could not be initialized. Disabling profiling.") - config.enabled = False - config.export.libdd_required = False - config.lock.enabled = False - config.memory.enabled = False - config.stack.enabled = False - return [] + # also disable other features that might be enabled + if self._stack_v2_enabled: + LOG.error("Disabling stack_v2 as libdd collector failed to initialize") + self._stack_v2_enabled = False + config.stack.v2_enabled = False # DEV: Import this only if needed to avoid importing protobuf # unnecessarily diff --git a/ddtrace/settings/profiling.py b/ddtrace/settings/profiling.py index 74f02d07f57..a9f31fa9fd5 100644 --- a/ddtrace/settings/profiling.py +++ b/ddtrace/settings/profiling.py @@ -10,8 +10,15 @@ logger = get_logger(__name__) +# Stash the reason why a transitive dependency failed to load; since we try to load things safely in order to guide +# configuration, these errors won't bubble up naturally. All of these components should use the same pattern +# in order to guarantee uniformity. +ddup_failure_msg = "" +stack_v2_failure_msg = "" + + def _derive_default_heap_sample_size(heap_config, default_heap_sample_size=1024 * 1024): - # type: (ProfilingConfig.Heap, int) -> int + # type: (ProfilingConfigHeap, int) -> int heap_sample_size = heap_config._sample_size if heap_sample_size is not None: return heap_sample_size @@ -38,18 +45,24 @@ def _derive_default_heap_sample_size(heap_config, default_heap_sample_size=1024 def _check_for_ddup_available(): + global ddup_failure_msg ddup_is_available = False try: from ddtrace.internal.datadog.profiling import ddup ddup_is_available = ddup.is_available + ddup_failure_msg = ddup.failure_msg except Exception: pass # nosec return ddup_is_available def _check_for_stack_v2_available(): + global stack_v2_failure_msg stack_v2_is_available = False + + # stack_v2 will use libdd; in order to prevent two separate collectors from running, it then needs to force + # libdd to be enabled as well; that means it depends on the libdd interface (ddup) if not _check_for_ddup_available(): return False @@ -57,15 +70,14 @@ def _check_for_stack_v2_available(): from ddtrace.internal.datadog.profiling import stack_v2 stack_v2_is_available = stack_v2.is_available + stack_v2_failure_msg = stack_v2.failure_msg except Exception: pass # nosec return stack_v2_is_available -# We don't check for the availability of the ddup module when determining whether libdd is _required_, -# since it's up to the application code to determine what happens in that failure case. def _is_libdd_required(config): - return config.stack.v2.enabled or config._libdd_required + return config.stack.v2_enabled or config.export._libdd_enabled or config.timeline_enabled class ProfilingConfig(En): @@ -188,6 +200,14 @@ class ProfilingConfig(En): help="The tags to apply to uploaded profile. Must be a list in the ``key1:value,key2:value2`` format", ) + enable_asserts = En.v( + bool, + "enable_asserts", + default=False, + help_type="Boolean", + help="Whether to enable debug assertions in the profiler code", + ) + class Stack(En): __item__ = __prefix__ = "stack" @@ -212,89 +232,125 @@ class V2(En): enabled = En.d(bool, lambda c: _check_for_stack_v2_available() and c._enabled) - class Lock(En): - __item__ = __prefix__ = "lock" - enabled = En.v( - bool, - "enabled", - default=True, - help_type="Boolean", - help="Whether to enable the lock profiler", - ) +class ProfilingConfigStack(En): + __item__ = __prefix__ = "stack" - class Memory(En): - __item__ = __prefix__ = "memory" + enabled = En.v( + bool, + "enabled", + default=True, + help_type="Boolean", + help="Whether to enable the stack profiler", + ) - enabled = En.v( - bool, - "enabled", - default=True, - help_type="Boolean", - help="Whether to enable the memory profiler", - ) + _v2_enabled = En.v( + bool, + "v2_enabled", + default=False, + help_type="Boolean", + help="Whether to enable the v2 stack profiler. Also enables the libdatadog collector.", + ) - events_buffer = En.v( - int, - "events_buffer", - default=16, - help_type="Integer", - help="", - ) + # V2 can't be enabled if stack collection is disabled or if pre-requisites are not met + v2_enabled = En.d(bool, lambda c: _check_for_stack_v2_available() and c._v2_enabled and c.enabled) - class Heap(En): - __item__ = __prefix__ = "heap" - enabled = En.v( - bool, - "enabled", - default=True, - help_type="Boolean", - help="Whether to enable the heap memory profiler", - ) +class ProfilingConfigLock(En): + __item__ = __prefix__ = "lock" - _sample_size = En.v( - t.Optional[int], - "sample_size", - default=None, - help_type="Integer", - help="", - ) - sample_size = En.d(int, _derive_default_heap_sample_size) + enabled = En.v( + bool, + "enabled", + default=True, + help_type="Boolean", + help="Whether to enable the lock profiler", + ) - class Export(En): - __item__ = __prefix__ = "export" + name_inspect_dir = En.v( + bool, + "name_inspect_dir", + default=True, + help_type="Boolean", + help="Whether to inspect the ``dir()`` of local and global variables to find the name of the lock. " + "With this enabled, the profiler finds the name of locks that are attributes of an object.", + ) - _libdd_required = En.v( - bool, - "libdd_required", - default=False, - help_type="Boolean", - help="Requires the native exporter to be enabled", - ) - libdd_required = En.d( - bool, - _is_libdd_required, - ) +class ProfilingConfigMemory(En): + __item__ = __prefix__ = "memory" - _libdd_enabled = En.v( - bool, - "libdd_enabled", - default=False, - help_type="Boolean", - help="Enables collection and export using a native exporter. Can fallback to the pure-Python exporter.", - ) + enabled = En.v( + bool, + "enabled", + default=True, + help_type="Boolean", + help="Whether to enable the memory profiler", + ) - libdd_enabled = En.d( - bool, lambda c: (_is_libdd_required(c) or c._libdd_enabled) and _check_for_ddup_available() - ) + events_buffer = En.v( + int, + "events_buffer", + default=16, + help_type="Integer", + help="", + ) + + +class ProfilingConfigHeap(En): + __item__ = __prefix__ = "heap" + + enabled = En.v( + bool, + "enabled", + default=True, + help_type="Boolean", + help="Whether to enable the heap memory profiler", + ) + + _sample_size = En.v( + t.Optional[int], + "sample_size", + default=None, + help_type="Integer", + help="", + ) + sample_size = En.d(int, _derive_default_heap_sample_size) + + +class ProfilingConfigExport(En): + __item__ = __prefix__ = "export" + + _libdd_enabled = En.v( + bool, + "libdd_enabled", + default=False, + help_type="Boolean", + help="Enables collection and export using a native exporter. Can fallback to the pure-Python exporter.", + ) - Export.include(Stack, namespace="stack") +# Include all the sub-configs +ProfilingConfig.include(ProfilingConfigStack, namespace="stack") +ProfilingConfig.include(ProfilingConfigLock, namespace="lock") +ProfilingConfig.include(ProfilingConfigMemory, namespace="memory") +ProfilingConfig.include(ProfilingConfigHeap, namespace="heap") +ProfilingConfig.include(ProfilingConfigExport, namespace="export") config = ProfilingConfig() -if config.export.libdd_required and not config.export.libdd_enabled: - logger.warning("The native exporter is required, but not enabled. Disabling profiling.") - config.enabled = False +# Force the enablement of libdd if the user requested a feature which requires it; otherwise the user has to manage +# configuration too intentionally and we'll need to change the API too much over time. +config.export.libdd_enabled = _is_libdd_required(config) + +# Certain features depend on libdd being available. If it isn't for some reason, those features cannot be enabled. +if config.stack.v2_enabled and not config.export.libdd_enabled: + msg = ddup_failure_msg or "libdd not available" + logger.warning("The v2 stack profiler cannot be used (%s)", msg) + config.stack.v2_enabled = False + +# Loading stack_v2 can fail for similar reasons +if config.stack.v2_enabled and not _check_for_stack_v2_available(): + msg = stack_v2_failure_msg or "stack_v2 not available" + logger.warning("The v2 stack profiler cannot be used (%s)", msg) + config.stack.v2_enabled = False diff --git a/releasenotes/notes/asm-avoid-idna-d724dce73afafa93.yaml b/releasenotes/notes/asm-avoid-idna-d724dce73afafa93.yaml new file mode 100644 index 00000000000..58f63e5d8df --- /dev/null +++ b/releasenotes/notes/asm-avoid-idna-d724dce73afafa93.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Code Security: add encodings.idna to the IAST patching denylist to avoid problems with gevent. diff --git a/releasenotes/notes/ci_visibility-fix-update_eu_telemetry_url-0642a6f665c75a0f.yaml b/releasenotes/notes/ci_visibility-fix-update_eu_telemetry_url-0642a6f665c75a0f.yaml new file mode 100644 index 00000000000..d8a898e9806 --- /dev/null +++ b/releasenotes/notes/ci_visibility-fix-update_eu_telemetry_url-0642a6f665c75a0f.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + CI Visibility: fixes incorrect URL for telemetry intake in EU that was causing missing telemetry data and SSL error log messages. diff --git a/releasenotes/notes/ci_visibility-fix_properly_strip_dotgit_from_repo_url-523a908075aea559.yaml b/releasenotes/notes/ci_visibility-fix_properly_strip_dotgit_from_repo_url-523a908075aea559.yaml new file mode 100644 index 00000000000..10cb457166d --- /dev/null +++ b/releasenotes/notes/ci_visibility-fix_properly_strip_dotgit_from_repo_url-523a908075aea559.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + CI Visibility: Fixes a bug where ``.git`` was incorrectly being stripped from repository URLs when extracting service names, + resulting in ``g``, ``i``, or ``t`` being removed (eg: ``test-environment.git`` incorrectly becoming ``test-environmen``) diff --git a/releasenotes/notes/fix-django-stream-body-exhausted-by-ddtrace-eb25702730c20e5e.yaml b/releasenotes/notes/fix-django-stream-body-exhausted-by-ddtrace-eb25702730c20e5e.yaml new file mode 100644 index 00000000000..91964e991c7 --- /dev/null +++ b/releasenotes/notes/fix-django-stream-body-exhausted-by-ddtrace-eb25702730c20e5e.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + tracing(django): This fix resolves a bug where ddtrace was exhausting a Django stream response before returning it to user. diff --git a/releasenotes/notes/fix-rate-limiter-borrow-263dedb6e46ba74e.yaml b/releasenotes/notes/fix-rate-limiter-borrow-263dedb6e46ba74e.yaml new file mode 100644 index 00000000000..aaab3b69d3c --- /dev/null +++ b/releasenotes/notes/fix-rate-limiter-borrow-263dedb6e46ba74e.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + internal: Fix for ``Already mutably borrowed`` error when rate limiter is accessed across threads. diff --git a/releasenotes/notes/fix-revert-rust-rate-limiter-e61ca589aa24105b.yaml b/releasenotes/notes/fix-revert-rust-rate-limiter-e61ca589aa24105b.yaml new file mode 100644 index 00000000000..93585c83bda --- /dev/null +++ b/releasenotes/notes/fix-revert-rust-rate-limiter-e61ca589aa24105b.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + internal: Fix ``Already mutably borrowed`` error by reverting back to pure-python rate limiter. diff --git a/releasenotes/notes/fix-ssi-permissions-647693af3d5ce49d.yaml b/releasenotes/notes/fix-ssi-permissions-647693af3d5ce49d.yaml new file mode 100644 index 00000000000..ff010664a2b --- /dev/null +++ b/releasenotes/notes/fix-ssi-permissions-647693af3d5ce49d.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + SSI: This fixes incorrect file permissions on lib-injection images for 2.10.x releases. + diff --git a/releasenotes/notes/iast-new-pyobject-check-6aecf9c4f22e2ae9.yaml b/releasenotes/notes/iast-new-pyobject-check-6aecf9c4f22e2ae9.yaml new file mode 100644 index 00000000000..ff38ff60deb --- /dev/null +++ b/releasenotes/notes/iast-new-pyobject-check-6aecf9c4f22e2ae9.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Code Security: add null pointer checks when creating new objects ids. diff --git a/releasenotes/notes/openai-async-timeout-errors-f9ccc1adbe4ed14e.yaml b/releasenotes/notes/openai-async-timeout-errors-f9ccc1adbe4ed14e.yaml new file mode 100644 index 00000000000..17d85525559 --- /dev/null +++ b/releasenotes/notes/openai-async-timeout-errors-f9ccc1adbe4ed14e.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + openai: Fixes a bug where `asyncio.TimeoutError`s were not being propagated correctly from canceled OpenAI API requests. \ No newline at end of file diff --git a/releasenotes/notes/openai-model-tag-2482b3d5b2905db9.yaml b/releasenotes/notes/openai-model-tag-2482b3d5b2905db9.yaml new file mode 100644 index 00000000000..deb2be5ac17 --- /dev/null +++ b/releasenotes/notes/openai-model-tag-2482b3d5b2905db9.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + openai: This introduces 'model' tag for openai integration metrics for consistency with the OpenAI SaaS Integration. It has the same value as `openai.request.model`. \ No newline at end of file diff --git a/releasenotes/notes/otel-ensure-sampling-decision-is-made-40ab760eada20b20.yaml b/releasenotes/notes/otel-ensure-sampling-decision-is-made-40ab760eada20b20.yaml new file mode 100644 index 00000000000..5c286f8a06e --- /dev/null +++ b/releasenotes/notes/otel-ensure-sampling-decision-is-made-40ab760eada20b20.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + opentelemetry: Resolves an edge case where distributed tracing headers could be generated before a sampling decision is made, + resulting in dropped spans in downstream services. diff --git a/releasenotes/notes/profiling-fix-endpoint-v2-a6ca2ebbc9701fe5.yaml b/releasenotes/notes/profiling-fix-endpoint-v2-a6ca2ebbc9701fe5.yaml new file mode 100644 index 00000000000..9cc3a342b39 --- /dev/null +++ b/releasenotes/notes/profiling-fix-endpoint-v2-a6ca2ebbc9701fe5.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + profiling: Fixes endpoing profiling for stack v2, that is when + ``DD_PROFILING_STACK_V2_ENABLED`` set. + diff --git a/releasenotes/notes/profiling-libdd-required-46f56ee4027ef2af.yaml b/releasenotes/notes/profiling-libdd-required-46f56ee4027ef2af.yaml new file mode 100644 index 00000000000..b6d8719b771 --- /dev/null +++ b/releasenotes/notes/profiling-libdd-required-46f56ee4027ef2af.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + profiling: turns on the new native exporter when DD_PROFILING_TIMELINE_ENABLED + is set. + diff --git a/releasenotes/notes/profiling-timeline-lock-init-loc-and-frames-39a43f924bde88d2.yaml b/releasenotes/notes/profiling-timeline-lock-init-loc-and-frames-39a43f924bde88d2.yaml new file mode 100644 index 00000000000..2edfc627289 --- /dev/null +++ b/releasenotes/notes/profiling-timeline-lock-init-loc-and-frames-39a43f924bde88d2.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + profiling: show lock init location in Lock Name and hide profiler internal + frames from Stack Frame in Timeline Details tab. diff --git a/riotfile.py b/riotfile.py index 1ccf4d7514d..6fde633926e 100644 --- a/riotfile.py +++ b/riotfile.py @@ -800,6 +800,8 @@ def select_pys(min_version=MIN_PYTHON_VERSION, max_version=MAX_PYTHON_VERSION): "python-memcached": latest, "pytest-randomly": latest, "django-q": latest, + "spyne": latest, + "zeep": latest, }, env={ "DD_CIVISIBILITY_ITR_ENABLED": "0", @@ -2677,6 +2679,9 @@ def select_pys(min_version=MIN_PYTHON_VERSION, max_version=MAX_PYTHON_VERSION): name="profile", # NB riot commands that use this Venv must include --pass-env to work properly command="python -m tests.profiling.run pytest -v --no-cov --capture=no --benchmark-disable {cmdargs} tests/profiling", # noqa: E501 + env={ + "DD_PROFILING_ENABLE_ASSERTS": "1", + }, pkgs={ "gunicorn": latest, # diff --git a/src/core/lib.rs b/src/core/lib.rs index ceee6a484a8..0016cdfece6 100644 --- a/src/core/lib.rs +++ b/src/core/lib.rs @@ -1,9 +1,6 @@ -mod rate_limiter; - use pyo3::prelude::*; #[pymodule] -fn _core(m: &Bound<'_, PyModule>) -> PyResult<()> { - m.add_class::()?; +fn _core(_: &Bound<'_, PyModule>) -> PyResult<()> { Ok(()) } diff --git a/src/core/rate_limiter.rs b/src/core/rate_limiter.rs deleted file mode 100644 index 1d96caf1118..00000000000 --- a/src/core/rate_limiter.rs +++ /dev/null @@ -1,179 +0,0 @@ -use pyo3::prelude::*; -use std::sync::Mutex; - -// Token bucket rate limiter -struct RateLimiter { - rate_limit: i32, - time_window: f64, - tokens: f64, - max_tokens: f64, - last_update_ns: f64, - current_window_ns: f64, - tokens_allowed: i32, - tokens_total: i32, - prev_window_rate: Option, - _lock: Mutex<()>, -} - -impl RateLimiter { - pub fn new(rate_limit: i32, time_window: f64) -> RateLimiter { - RateLimiter { - rate_limit, - time_window, - tokens: rate_limit as f64, - max_tokens: rate_limit as f64, - last_update_ns: 0.0, - current_window_ns: 0.0, - tokens_allowed: 0, - tokens_total: 0, - prev_window_rate: None, - _lock: Mutex::new(()), - } - } - - pub fn _is_allowed(&mut self, timestamp_ns: f64) -> bool { - let mut _lock = self._lock.lock().unwrap(); - - let allowed = (|| -> bool { - // Rate limit of 0 is always disallowed. Negative rate limits are always allowed. - match self.rate_limit { - 0 => return false, - _ if self.rate_limit < 0 => return true, - _ => {} - } - - if self.tokens < self.max_tokens { - let mut elapsed: f64 = (timestamp_ns - self.last_update_ns) / self.time_window; - if elapsed < 0.0 { - // Note - this should never happen, but if it does, we should reset the elapsed time to avoid negative tokens. - elapsed = 0.0 - } - self.tokens += elapsed * self.max_tokens; - if self.tokens > self.max_tokens { - self.tokens = self.max_tokens; - } - } - - self.last_update_ns = timestamp_ns; - - if self.tokens >= 1.0 { - self.tokens -= 1.0; - return true; - } - - false - })(); - - // If we are in a new window, update the window rate - if self.current_window_ns == 0.0 { - self.current_window_ns = timestamp_ns; - } else if timestamp_ns - self.current_window_ns >= self.time_window { - self.prev_window_rate = Some(self.current_window_rate()); - self.current_window_ns = timestamp_ns; - self.tokens_allowed = 0; - self.tokens_total = 0; - } - - // Update the token counts - self.tokens_total += 1; - if allowed { - self.tokens_allowed += 1; - } - - allowed - } - - pub fn effective_rate(&self) -> f64 { - let current_rate: f64 = self.current_window_rate(); - - if self.prev_window_rate.is_none() { - return current_rate; - } - - (current_rate + self.prev_window_rate.unwrap()) / 2.0 - } - - fn current_window_rate(&self) -> f64 { - // If no tokens have been seen then return 1.0 - // DEV: This is to avoid a division by zero error - if self.tokens_total == 0 { - return 1.0; - } - - self.tokens_allowed as f64 / self.tokens_total as f64 - } -} - -#[pyclass(name = "RateLimiter", subclass, module = "ddtrace.internal.core._core")] -pub struct RateLimiterPy { - rate_limiter: RateLimiter, -} - -#[pymethods] -impl RateLimiterPy { - #[new] - fn new(rate_limit: i32, time_window: Option) -> Self { - RateLimiterPy { - rate_limiter: RateLimiter::new(rate_limit, time_window.unwrap_or(1e9)), - } - } - - pub fn _is_allowed(&mut self, py: Python<'_>, timestamp_ns: f64) -> bool { - py.allow_threads(|| self.rate_limiter._is_allowed(timestamp_ns)) - } - - #[getter] - pub fn effective_rate(&self) -> f64 { - self.rate_limiter.effective_rate() - } - - #[getter] - pub fn current_window_rate(&self) -> f64 { - self.rate_limiter.current_window_rate() - } - - #[getter] - pub fn rate_limit(&self) -> i32 { - self.rate_limiter.rate_limit - } - - #[getter] - pub fn time_window(&self) -> f64 { - self.rate_limiter.time_window - } - - #[getter] - pub fn tokens(&self) -> f64 { - self.rate_limiter.tokens - } - - #[getter] - pub fn max_tokens(&self) -> f64 { - self.rate_limiter.max_tokens - } - - #[getter] - pub fn last_update_ns(&self) -> f64 { - self.rate_limiter.last_update_ns - } - - #[getter] - pub fn current_window_ns(&self) -> f64 { - self.rate_limiter.current_window_ns - } - - #[getter] - pub fn prev_window_rate(&self) -> Option { - self.rate_limiter.prev_window_rate - } - - #[getter] - pub fn tokens_allowed(&self) -> i32 { - self.rate_limiter.tokens_allowed - } - - #[getter] - pub fn tokens_total(&self) -> i32 { - self.rate_limiter.tokens_total - } -} diff --git a/tests/appsec/iast_memcheck/test_iast_mem_check.py b/tests/appsec/iast_memcheck/test_iast_mem_check.py index 04e2d8d3694..20a40917eaa 100644 --- a/tests/appsec/iast_memcheck/test_iast_mem_check.py +++ b/tests/appsec/iast_memcheck/test_iast_mem_check.py @@ -102,7 +102,9 @@ def test_propagation_memory_check(origin1, origin2, iast_span_defaults): _initializer_size = initializer_size() assert _initializer_size > 0 - assert _num_objects_tainted == num_objects_tainted() + # Some tainted pyobject is freed, and Python may reuse the memory address + # hence the number of tainted objects may be the same or less + assert _num_objects_tainted in (num_objects_tainted(), num_objects_tainted() - 1) assert _active_map_addreses_size == active_map_addreses_size() assert _initializer_size == initializer_size() reset_context() diff --git a/tests/ci_visibility/test_ci_visibility.py b/tests/ci_visibility/test_ci_visibility.py index 221a8d7be46..a02d9da934e 100644 --- a/tests/ci_visibility/test_ci_visibility.py +++ b/tests/ci_visibility/test_ci_visibility.py @@ -292,6 +292,16 @@ def test_ci_visibility_service_disable(): ("git+git://github.com/org/repo-name.git", "repo-name"), ("git+ssh://github.com/org/repo-name.git", "repo-name"), ("git+https://github.com/org/repo-name.git", "repo-name"), + ("https://github.com/fastapi/fastapi.git", "fastapi"), + ("git@github.com:fastapi/fastapi.git", "fastapi"), + ("git@github.com:fastapi/fastapi.gitttttt", "fastapi.gitttttt"), + ("git@github.com:fastapi/fastapiiiititititi.git", "fastapiiiititititi"), + ("https://github.com/fastapi/fastapitttttt.git", "fastapitttttt"), + ("this is definitely not a valid git repo URL", "this is definitely not a valid git repo URL"), + ("git@github.com:fastapi/FastAPI.GiT", "FastAPI"), + ("git+https://github.com/org/REPO-NAME.GIT", "REPO-NAME"), + ("https://github.com/DataDog/DD-TRACE-py", "DD-TRACE-py"), + ("https://github.com/DataDog/dd-trace-py.GIT", "dd-trace-py"), ], ) def test_repository_name_extracted(repository_url, repository_name): diff --git a/tests/contrib/django/soap/__init__.py b/tests/contrib/django/soap/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/contrib/django/soap/apps.py b/tests/contrib/django/soap/apps.py new file mode 100644 index 00000000000..05f7bc90b2c --- /dev/null +++ b/tests/contrib/django/soap/apps.py @@ -0,0 +1,6 @@ +from django.apps import AppConfig + + +class SoapConfig(AppConfig): + default_auto_field = "django.db.models.BigAutoField" + name = "soap" diff --git a/tests/contrib/django/soap/models.py b/tests/contrib/django/soap/models.py new file mode 100644 index 00000000000..9df7e1f2288 --- /dev/null +++ b/tests/contrib/django/soap/models.py @@ -0,0 +1,51 @@ +from spyne import Boolean +from spyne import ComplexModel +from spyne import ComplexModelBase +from spyne import String +from spyne import Unicode +from spyne.util.odict import odict + + +class OrderedModel(object): + """ + Ugly hack to create an ordered model in Spyne, but there's no other way. + See: https://mail.python.org/pipermail/soap/2013-June/001113.html + """ + + def __init__(self): + self.result = odict() + + def fields(self): + """This method should be overwritten.""" + raise NotImplementedError("Overwrite the OrderedModel.fields() method.") + + def model_names(self): + """This method should be overwritten.""" + raise NotImplementedError("Overwrite the OrderedModel.model_names() method.") + + def produce(self, type_name, prefix=""): + """Produce the actual model.""" + for field in self.fields(): + if isinstance(field[1], OrderedModel): + self.result[field[0]] = field[1].produce(field[2]) + else: + self.result[field[0]] = field[1] + return ComplexModelBase.produce(prefix, type_name, self.result) + + +class LeaveStatusModel(OrderedModel): + def fields(self): + return [ + ("LeaveID", String(), "leave_id"), + ("Description", String(), "description"), + ] + + def model_names(self): + return ("leave", "LeaveStatus") + + +class ResponseModel(ComplexModel): + __namespace__ = "" + + success = Boolean + errorText = Unicode diff --git a/tests/contrib/django/soap/services.py b/tests/contrib/django/soap/services.py new file mode 100644 index 00000000000..9b767e296dc --- /dev/null +++ b/tests/contrib/django/soap/services.py @@ -0,0 +1,36 @@ +import logging + +from django.views.decorators.csrf import csrf_exempt +from lxml import etree +from spyne import Application # noqa +from spyne import ServiceBase # noqa +from spyne import rpc # noqa +from spyne.protocol.soap import Soap11 # noqa +from spyne.server.django import DjangoApplication # noqa + +from .models import LeaveStatusModel +from .models import ResponseModel + + +logger = logging.getLogger(__name__) + + +class LeaveStatusService(ServiceBase): + @rpc(LeaveStatusModel().produce("leave_status", ""), _body_style="bare", _returns=ResponseModel) + def EmployeeLeaveStatus(self, leave_status): + in_body_doc = etree.tostring(self.in_body_doc) + logger.info("Leave service called with: ", in_body_doc) + logger.info("Parsed Leave Status: ", leave_status) + return ResponseModel(success=True, errorText=in_body_doc) + + +leave_status_app = Application( + [ + LeaveStatusService, + ], + tns="http://kabisa.nl/soap/reproduction", + in_protocol=Soap11(validator="lxml"), + out_protocol=Soap11(), +) + +leave_status_service = csrf_exempt(DjangoApplication(leave_status_app)) diff --git a/tests/contrib/django/test_django_wsgi.py b/tests/contrib/django/test_django_wsgi.py index d2db3dc8def..edc065377d8 100644 --- a/tests/contrib/django/test_django_wsgi.py +++ b/tests/contrib/django/test_django_wsgi.py @@ -11,6 +11,8 @@ import pytest from ddtrace.contrib.wsgi import DDWSGIMiddleware +from ddtrace.internal.compat import PYTHON_VERSION_INFO +from tests.contrib.django.utils import make_soap_request from tests.webclient import Client @@ -34,18 +36,27 @@ def handler(_): urlpatterns = [path("", handler)] + +# this import fails for python 3.12 +if PYTHON_VERSION_INFO < (3, 12): + from tests.contrib.django.soap.services import leave_status_service + + urlpatterns.append(path("soap/", leave_status_service, name="soap_account")) + + # it would be better to check for app_is_iterator programmatically, but Django WSGI apps behave like # iterators for the purpose of DDWSGIMiddleware despite not having both "__next__" and "__iter__" methods app = DDWSGIMiddleware(get_wsgi_application(), app_is_iterator=True) -@pytest.mark.skipif(django.VERSION < (3, 0, 0), reason="Older Django versions don't work with this use of django-admin") -def test_django_app_receives_request_finished_signal_when_app_is_ddwsgimiddleware(): +@pytest.fixture() +def wsgi_app(): env = os.environ.copy() env.update( { "PYTHONPATH": os.path.dirname(os.path.abspath(__file__)) + ":" + env["PYTHONPATH"], "DJANGO_SETTINGS_MODULE": "test_django_wsgi", + "DD_TRACE_ENABLED": "true", } ) cmd = ["django-admin", "runserver", "--noreload", str(SERVER_PORT)] @@ -57,6 +68,18 @@ def test_django_app_receives_request_finished_signal_when_app_is_ddwsgimiddlewar env=env, ) + yield proc + + try: + proc.terminate() + proc.wait(timeout=5) # Wait up to 5 seconds for the process to terminate + except subprocess.TimeoutExpired: + proc.kill() + proc.wait() + + +@pytest.mark.skipif(django.VERSION < (3, 0, 0), reason="Older Django versions don't work with this use of django-admin") +def test_django_app_receives_request_finished_signal_when_app_is_ddwsgimiddleware(wsgi_app): client = Client("http://localhost:%d" % SERVER_PORT) client.wait() output = "" @@ -64,8 +87,19 @@ def test_django_app_receives_request_finished_signal_when_app_is_ddwsgimiddlewar assert client.get("/").status_code == 200 finally: try: - _, output = proc.communicate(timeout=1) + _, output = wsgi_app.communicate(timeout=1) except subprocess.TimeoutExpired: - proc.kill() - _, output = proc.communicate() + wsgi_app.kill() + _, output = wsgi_app.communicate() assert SENTINEL_LOG in str(output) + + +@pytest.mark.skipif(PYTHON_VERSION_INFO >= (3, 12), reason="A Spyne import fails when using Python 3.12") +def test_django_wsgi_soap_app_works(wsgi_app): + client = Client("http://localhost:%d" % SERVER_PORT) + client.wait() + + url = "http://localhost:%d" % SERVER_PORT + "/soap/?wsdl" + response = make_soap_request(url) + + assert response["success"] is True diff --git a/tests/contrib/django/utils.py b/tests/contrib/django/utils.py new file mode 100644 index 00000000000..f69140a0456 --- /dev/null +++ b/tests/contrib/django/utils.py @@ -0,0 +1,14 @@ +from zeep import Client +from zeep.transports import Transport + + +def make_soap_request(url): + client = Client(wsdl=url, transport=Transport()) + + # Call the SOAP service + response = client.service.EmployeeLeaveStatus(LeaveID="124", Description="Annual leave") + # Print the response + print(f"Success: {response.success}") + print(f"ErrorText: {response.errorText}") + + return response diff --git a/tests/contrib/openai/test_openai_v0.py b/tests/contrib/openai/test_openai_v0.py index db15db897bc..631e80f67d0 100644 --- a/tests/contrib/openai/test_openai_v0.py +++ b/tests/contrib/openai/test_openai_v0.py @@ -151,6 +151,7 @@ def test_completion( "env:", "service:", "openai.request.model:ada", + "model:ada", "openai.request.endpoint:/v1/completions", "openai.request.method:POST", "openai.organization.id:", @@ -220,6 +221,7 @@ async def test_acompletion( "env:", "service:", "openai.request.model:curie", + "model:curie", "openai.request.endpoint:/v1/completions", "openai.request.method:POST", "openai.organization.id:", @@ -321,6 +323,7 @@ def test_global_tags(openai_vcr, ddtrace_config_openai, openai, mock_metrics, mo "env:staging", "version:1234", "openai.request.model:ada", + "model:ada", "openai.request.endpoint:/v1/completions", "openai.request.method:POST", "openai.organization.name:datadog-4", @@ -1285,6 +1288,7 @@ def test_completion_stream(openai, openai_vcr, mock_metrics, mock_tracer): "env:", "service:", "openai.request.model:ada", + "model:ada", "openai.request.endpoint:/v1/completions", "openai.request.method:POST", "openai.organization.id:", @@ -1325,6 +1329,7 @@ async def test_completion_async_stream(openai, openai_vcr, mock_metrics, mock_tr "env:", "service:", "openai.request.model:ada", + "model:ada", "openai.request.endpoint:/v1/completions", "openai.request.method:POST", "openai.organization.id:", @@ -1372,6 +1377,7 @@ def test_chat_completion_stream(openai, openai_vcr, mock_metrics, snapshot_trace "env:", "service:", "openai.request.model:gpt-3.5-turbo", + "model:gpt-3.5-turbo", "openai.request.endpoint:/v1/chat/completions", "openai.request.method:POST", "openai.organization.id:", @@ -1422,6 +1428,7 @@ async def test_chat_completion_async_stream(openai, openai_vcr, mock_metrics, sn "env:", "service:", "openai.request.model:gpt-3.5-turbo", + "model:gpt-3.5-turbo", "openai.request.endpoint:/v1/chat/completions", "openai.request.method:POST", "openai.organization.id:", diff --git a/tests/contrib/openai/test_openai_v1.py b/tests/contrib/openai/test_openai_v1.py index e14d54bca8d..0a57e52a7cc 100644 --- a/tests/contrib/openai/test_openai_v1.py +++ b/tests/contrib/openai/test_openai_v1.py @@ -168,6 +168,7 @@ def test_completion( "env:", "service:", "openai.request.model:ada", + "model:ada", "openai.request.endpoint:/v1/completions", "openai.request.method:POST", "openai.organization.id:", @@ -238,6 +239,7 @@ async def test_acompletion( "env:", "service:", "openai.request.model:curie", + "model:curie", "openai.request.endpoint:/v1/completions", "openai.request.method:POST", "openai.organization.id:", @@ -345,6 +347,7 @@ def test_global_tags(openai_vcr, ddtrace_config_openai, openai, mock_metrics, mo "env:staging", "version:1234", "openai.request.model:ada", + "model:ada", "openai.request.endpoint:/v1/completions", "openai.request.method:POST", "openai.organization.name:datadog-4", @@ -952,6 +955,7 @@ def test_completion_stream(openai, openai_vcr, mock_metrics, mock_tracer): "env:", "service:", "openai.request.model:ada", + "model:ada", "openai.request.endpoint:/v1/completions", "openai.request.method:POST", "openai.organization.id:", @@ -991,6 +995,7 @@ async def test_completion_async_stream(openai, openai_vcr, mock_metrics, mock_tr "env:", "service:", "openai.request.model:ada", + "model:ada", "openai.request.endpoint:/v1/completions", "openai.request.method:POST", "openai.organization.id:", @@ -1033,6 +1038,7 @@ def test_completion_stream_context_manager(openai, openai_vcr, mock_metrics, moc "env:", "service:", "openai.request.model:ada", + "model:ada", "openai.request.endpoint:/v1/completions", "openai.request.method:POST", "openai.organization.id:", @@ -1079,6 +1085,7 @@ def test_chat_completion_stream(openai, openai_vcr, mock_metrics, snapshot_trace "env:", "service:", "openai.request.model:gpt-3.5-turbo", + "model:gpt-3.5-turbo", "openai.request.endpoint:/v1/chat/completions", "openai.request.method:POST", "openai.organization.id:", @@ -1128,6 +1135,7 @@ async def test_chat_completion_async_stream(openai, openai_vcr, mock_metrics, sn "env:", "service:", "openai.request.model:gpt-3.5-turbo", + "model:gpt-3.5-turbo", "openai.request.endpoint:/v1/chat/completions", "openai.request.method:POST", "openai.organization.id:", @@ -1184,6 +1192,7 @@ async def test_chat_completion_async_stream_context_manager(openai, openai_vcr, "env:", "service:", "openai.request.model:gpt-3.5-turbo", + "model:gpt-3.5-turbo", "openai.request.endpoint:/v1/chat/completions", "openai.request.method:POST", "openai.organization.id:", @@ -1682,3 +1691,43 @@ def test_integration_service_name(openai_api_key, ddtrace_run_python_code_in_sub assert status == 0, err assert out == b"" assert err == b"" + + +async def test_openai_asyncio_cancellation(openai): + import asyncio + + import httpx + + class DelayedTransport(httpx.AsyncBaseTransport): + def __init__(self, delay: float): + self.delay = delay + self._transport = httpx.AsyncHTTPTransport() + + async def handle_async_request(self, request: httpx.Request) -> httpx.Response: + # Introduce a delay before making the actual request + await asyncio.sleep(self.delay) + return await self._transport.handle_async_request(request) + + client = openai.AsyncOpenAI(http_client=httpx.AsyncClient(transport=DelayedTransport(delay=10))) + asyncio_timeout = False + + try: + await asyncio.wait_for( + client.chat.completions.create( + model="gpt-3.5-turbo", + messages=[ + { + "role": "user", + "content": "Write a Python program that writes a Python program for a given task.", + }, + ], + user="ddtrace-test", + ), + timeout=1, + ) + except asyncio.TimeoutError: + asyncio_timeout = True + except Exception as e: + assert False, f"Unexpected exception: {e}" + + assert asyncio_timeout, "Expected asyncio.TimeoutError" diff --git a/tests/opentelemetry/test_trace.py b/tests/opentelemetry/test_trace.py index 3e4dba17aaf..6015fb4c299 100644 --- a/tests/opentelemetry/test_trace.py +++ b/tests/opentelemetry/test_trace.py @@ -1,5 +1,7 @@ import mock import opentelemetry +from opentelemetry.trace import set_span_in_context +from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator import opentelemetry.version import pytest @@ -50,7 +52,7 @@ def test_otel_start_span_without_default_args(oteltracer): root = oteltracer.start_span("root-span") otel_span = oteltracer.start_span( "test-start-span", - context=opentelemetry.trace.set_span_in_context(root), + context=set_span_in_context(root), kind=opentelemetry.trace.SpanKind.CLIENT, attributes={"start_span_tag": "start_span_val"}, links=None, @@ -117,7 +119,7 @@ def test_otel_start_current_span_without_default_args(oteltracer): with oteltracer.start_as_current_span("root-span") as root: with oteltracer.start_as_current_span( "test-start-current-span-no-defualts", - context=opentelemetry.trace.set_span_in_context(root), + context=set_span_in_context(root), kind=opentelemetry.trace.SpanKind.SERVER, attributes={"start_current_span_tag": "start_cspan_val"}, links=[], @@ -138,6 +140,50 @@ def test_otel_start_current_span_without_default_args(oteltracer): otel_span.end() +def test_otel_get_span_context_sets_sampling_decision(oteltracer): + with oteltracer.start_span("otel-server") as otelspan: + # Sampling priority is not set on span creation + assert otelspan._ddspan.context.sampling_priority is None + # Ensure the sampling priority is always consistent with traceflags + span_context = otelspan.get_span_context() + # Sampling priority is evaluated when the SpanContext is first accessed + sp = otelspan._ddspan.context.sampling_priority + assert sp is not None + if sp > 0: + assert span_context.trace_flags == 1 + else: + assert span_context.trace_flags == 0 + # Ensure the sampling priority is always consistent + for _ in range(1000): + otelspan.get_span_context() + assert otelspan._ddspan.context.sampling_priority == sp + + +def test_distributed_trace_inject(oteltracer): # noqa:F811 + with oteltracer.start_as_current_span("test-otel-distributed-trace") as span: + headers = {} + TraceContextTextMapPropagator().inject(headers, set_span_in_context(span)) + sp = span.get_span_context() + assert headers["traceparent"] == f"00-{sp.trace_id:032x}-{sp.span_id:016x}-{sp.trace_flags:02x}" + assert headers["tracestate"] == sp.trace_state.to_header() + + +def test_distributed_trace_extract(oteltracer): # noqa:F811 + headers = { + "traceparent": "00-0af7651916cd43dd8448eb211c80319c-b7ad6b7169203331-01", + "tracestate": "congo=t61rcWkgMzE,dd=s:2", + } + context = TraceContextTextMapPropagator().extract(headers) + with oteltracer.start_as_current_span("test-otel-distributed-trace", context=context) as span: + sp = span.get_span_context() + assert sp.trace_id == int("0af7651916cd43dd8448eb211c80319c", 16) + assert span._ddspan.parent_id == int("b7ad6b7169203331", 16) + assert sp.trace_flags == 1 + assert sp.trace_state.get("congo") == "t61rcWkgMzE" + assert "s:2" in sp.trace_state.get("dd") + assert sp.is_remote is False + + @flaky(1717428664) @pytest.mark.parametrize( "flask_wsgi_application,flask_env_arg,flask_port,flask_command", @@ -164,10 +210,12 @@ def test_otel_start_current_span_without_default_args(oteltracer): "with_opentelemetry_instrument", ], ) -@pytest.mark.snapshot(ignores=["metrics.net.peer.port", "meta.traceparent", "meta.flask.version"]) +@pytest.mark.snapshot(ignores=["metrics.net.peer.port", "meta.traceparent", "meta.tracestate", "meta.flask.version"]) def test_distributed_trace_with_flask_app(flask_client, oteltracer): # noqa:F811 - with oteltracer.start_as_current_span("test-otel-distributed-trace"): - resp = flask_client.get("/otel") + with oteltracer.start_as_current_span("test-otel-distributed-trace") as span: + headers = {} + TraceContextTextMapPropagator().inject(headers, set_span_in_context(span)) + resp = flask_client.get("/otel", headers=headers) assert resp.text == "otel" assert resp.status_code == 200 diff --git a/tests/profiling/collector/global_locks.py b/tests/profiling/collector/global_locks.py new file mode 100644 index 00000000000..7fec29caba9 --- /dev/null +++ b/tests/profiling/collector/global_locks.py @@ -0,0 +1,22 @@ +import threading + + +global_lock = threading.Lock() + + +def foo(): + global global_lock + with global_lock: + pass + + +class Bar: + def __init__(self): + self.bar_lock = threading.Lock() + + def bar(self): + with self.bar_lock: + pass + + +bar_instance = Bar() diff --git a/tests/profiling/collector/test_asyncio.py b/tests/profiling/collector/test_asyncio.py index d7ed9ab9e86..4488a79728c 100644 --- a/tests/profiling/collector/test_asyncio.py +++ b/tests/profiling/collector/test_asyncio.py @@ -18,7 +18,7 @@ async def test_lock_acquire_events(): assert len(r.events[collector_asyncio.AsyncioLockAcquireEvent]) == 1 assert len(r.events[collector_asyncio.AsyncioLockReleaseEvent]) == 0 event = r.events[collector_asyncio.AsyncioLockAcquireEvent][0] - assert event.lock_name == "test_asyncio.py:15" + assert event.lock_name == "test_asyncio.py:15:lock" assert event.thread_id == _thread.get_ident() assert event.wait_time_ns >= 0 # It's called through pytest so I'm sure it's gonna be that long, right? @@ -39,7 +39,7 @@ async def test_asyncio_lock_release_events(): assert len(r.events[collector_asyncio.AsyncioLockAcquireEvent]) == 1 assert len(r.events[collector_asyncio.AsyncioLockReleaseEvent]) == 1 event = r.events[collector_asyncio.AsyncioLockReleaseEvent][0] - assert event.lock_name == "test_asyncio.py:35" + assert event.lock_name == "test_asyncio.py:35:lock" assert event.thread_id == _thread.get_ident() assert event.locked_for_ns >= 0 # It's called through pytest so I'm sure it's gonna be that long, right? @@ -61,7 +61,6 @@ async def test_lock_events_tracer(tracer): lock2 = asyncio.Lock() await lock2.acquire() lock.release() - trace_id = t.trace_id span_id = t.span_id lock2.release() @@ -70,16 +69,27 @@ async def test_lock_events_tracer(tracer): pass events = r.reset() # The tracer might use locks, so we need to look into every event to assert we got ours + lock1_name = "test_asyncio.py:58:lock" + lock2_name = "test_asyncio.py:61:lock2" + lines_with_trace = [61, 63] + lines_without_trace = [59, 65] for event_type in (collector_asyncio.AsyncioLockAcquireEvent, collector_asyncio.AsyncioLockReleaseEvent): - assert {"test_asyncio.py:58", "test_asyncio.py:61"}.issubset({e.lock_name for e in events[event_type]}) + if event_type == collector_asyncio.AsyncioLockAcquireEvent: + assert {lock1_name, lock2_name}.issubset({e.lock_name for e in events[event_type]}) + elif event_type == collector_asyncio.AsyncioLockReleaseEvent: + assert {lock1_name, lock2_name}.issubset({e.lock_name for e in events[event_type]}) for event in events[event_type]: - if event.name == "test_asyncio.py:58": - assert event.trace_id is None - assert event.span_id is None - assert event.trace_resource_container is None - assert event.trace_type is None - elif event.name == "test_asyncio.py:61": - assert event.trace_id == trace_id - assert event.span_id == span_id - assert event.trace_resource_container[0] == t.resource - assert event.trace_type == t.span_type + if event.name in [lock1_name, lock2_name]: + file_name, lineno, function_name, class_name = event.frames[0] + assert file_name == __file__.replace(".pyc", ".py") + assert lineno in lines_with_trace + lines_without_trace + assert function_name == "test_lock_events_tracer" + assert class_name == "" + if lineno in lines_without_trace: + assert event.span_id is None + assert event.trace_resource_container is None + assert event.trace_type is None + elif lineno in lines_with_trace: + assert event.span_id == span_id + assert event.trace_resource_container[0] == resource + assert event.trace_type == span_type diff --git a/tests/profiling/collector/test_threading.py b/tests/profiling/collector/test_threading.py index 9d6d43c2642..4165053fe26 100644 --- a/tests/profiling/collector/test_threading.py +++ b/tests/profiling/collector/test_threading.py @@ -3,6 +3,7 @@ import threading import uuid +import mock import pytest from six.moves import _thread @@ -69,13 +70,13 @@ def test_lock_acquire_events(): assert len(r.events[collector_threading.ThreadingLockAcquireEvent]) == 1 assert len(r.events[collector_threading.ThreadingLockReleaseEvent]) == 0 event = r.events[collector_threading.ThreadingLockAcquireEvent][0] - assert event.lock_name == "test_threading.py:67" + assert event.lock_name == "test_threading.py:68:lock" assert event.thread_id == _thread.get_ident() assert event.wait_time_ns >= 0 # It's called through pytest so I'm sure it's gonna be that long, right? assert len(event.frames) > 3 assert event.nframes > 3 - assert event.frames[1] == (__file__.replace(".pyc", ".py"), 68, "test_lock_acquire_events", "") + assert event.frames[0] == (__file__.replace(".pyc", ".py"), 69, "test_lock_acquire_events", "") assert event.sampling_pct == 100 @@ -93,13 +94,13 @@ def lockfunc(self): assert len(r.events[collector_threading.ThreadingLockAcquireEvent]) == 1 assert len(r.events[collector_threading.ThreadingLockReleaseEvent]) == 0 event = r.events[collector_threading.ThreadingLockAcquireEvent][0] - assert event.lock_name == "test_threading.py:88" + assert event.lock_name == "test_threading.py:89:lock" assert event.thread_id == _thread.get_ident() assert event.wait_time_ns >= 0 # It's called through pytest so I'm sure it's gonna be that long, right? assert len(event.frames) > 3 assert event.nframes > 3 - assert event.frames[1] == (__file__.replace(".pyc", ".py"), 89, "lockfunc", "Foobar") + assert event.frames[0] == (__file__.replace(".pyc", ".py"), 90, "lockfunc", "Foobar") assert event.sampling_pct == 100 @@ -114,24 +115,34 @@ def test_lock_events_tracer(tracer): lock2 = threading.Lock() lock2.acquire() lock.release() - trace_id = t.trace_id span_id = t.span_id lock2.release() events = r.reset() + lock1_name = "test_threading.py:112:lock" + lock2_name = "test_threading.py:115:lock2" + lines_with_trace = [116, 117] + lines_without_trace = [113, 119] # The tracer might use locks, so we need to look into every event to assert we got ours for event_type in (collector_threading.ThreadingLockAcquireEvent, collector_threading.ThreadingLockReleaseEvent): - assert {"test_threading.py:111", "test_threading.py:114"}.issubset({e.lock_name for e in events[event_type]}) + if event_type == collector_threading.ThreadingLockAcquireEvent: + assert {lock1_name, lock2_name}.issubset({e.lock_name for e in events[event_type]}) + elif event_type == collector_threading.ThreadingLockReleaseEvent: + assert {lock1_name, lock2_name}.issubset({e.lock_name for e in events[event_type]}) for event in events[event_type]: - if event.name == "test_threading.py:86": - assert event.trace_id is None - assert event.span_id is None - assert event.trace_resource_container is None - assert event.trace_type is None - elif event.name == "test_threading.py:89": - assert event.trace_id == trace_id - assert event.span_id == span_id - assert event.trace_resource_container[0] == t.resource - assert event.trace_type == t.span_type + if event.name in [lock1_name, lock2_name]: + file_name, lineno, function_name, class_name = event.frames[0] + assert file_name == __file__.replace(".pyc", ".py") + assert lineno in lines_with_trace + lines_without_trace + assert function_name == "test_lock_events_tracer" + assert class_name == "" + if lineno in lines_without_trace: + assert event.span_id is None + assert event.trace_resource_container is None + assert event.trace_type is None + elif lineno in lines_with_trace: + assert event.span_id == span_id + assert event.trace_resource_container[0] == resource + assert event.trace_type == span_type def test_lock_events_tracer_late_finish(tracer): @@ -145,26 +156,22 @@ def test_lock_events_tracer_late_finish(tracer): lock2 = threading.Lock() lock2.acquire() lock.release() - trace_id = span.trace_id - span_id = span.span_id lock2.release() span.resource = resource span.finish() events = r.reset() + lock1_name = "test_threading.py:153:lock" + lock2_name = "test_threading.py:156:lock2" # The tracer might use locks, so we need to look into every event to assert we got ours for event_type in (collector_threading.ThreadingLockAcquireEvent, collector_threading.ThreadingLockReleaseEvent): - assert {"test_threading.py:142", "test_threading.py:145"}.issubset({e.lock_name for e in events[event_type]}) + if event_type == collector_threading.ThreadingLockAcquireEvent: + assert {lock1_name, lock2_name}.issubset({e.lock_name for e in events[event_type]}) + elif event_type == collector_threading.ThreadingLockReleaseEvent: + assert {lock1_name, lock2_name}.issubset({e.lock_name for e in events[event_type]}) for event in events[event_type]: - if event.name == "test_threading.py:118": - assert event.trace_id is None - assert event.span_id is None - assert event.trace_resource_container is None - assert event.trace_type is None - elif event.name == "test_threading.py:121": - assert event.trace_id == trace_id - assert event.span_id == span_id - assert event.trace_resource_container[0] == span.resource - assert event.trace_type == span.span_type + assert event.span_id is None + assert event.trace_resource_container is None + assert event.trace_type is None def test_resource_not_collected(monkeypatch, tracer): @@ -179,24 +186,34 @@ def test_resource_not_collected(monkeypatch, tracer): lock2 = threading.Lock() lock2.acquire() lock.release() - trace_id = t.trace_id span_id = t.span_id lock2.release() events = r.reset() + lock1_name = "test_threading.py:183:lock" + lock2_name = "test_threading.py:186:lock2" + lines_with_trace = [187, 188] + lines_without_trace = [184, 190] # The tracer might use locks, so we need to look into every event to assert we got ours for event_type in (collector_threading.ThreadingLockAcquireEvent, collector_threading.ThreadingLockReleaseEvent): - assert {"test_threading.py:176", "test_threading.py:179"}.issubset({e.lock_name for e in events[event_type]}) + if event_type == collector_threading.ThreadingLockAcquireEvent: + assert {lock1_name, lock2_name}.issubset({e.lock_name for e in events[event_type]}) + elif event_type == collector_threading.ThreadingLockReleaseEvent: + assert {lock1_name, lock2_name}.issubset({e.lock_name for e in events[event_type]}) for event in events[event_type]: - if event.name == "test_threading.py:151": - assert event.trace_id is None - assert event.span_id is None - assert event.trace_resource_container is None - assert event.trace_type is None - elif event.name == "test_threading.py:154": - assert event.trace_id == trace_id - assert event.span_id == span_id - assert event.trace_resource_container is None - assert event.trace_type == t.span_type + if event.name in [lock1_name, lock2_name]: + file_name, lineno, function_name, class_name = event.frames[0] + assert file_name == __file__.replace(".pyc", ".py") + assert lineno in lines_with_trace + lines_without_trace + assert function_name == "test_resource_not_collected" + assert class_name == "" + if lineno in lines_without_trace: + assert event.span_id is None + assert event.trace_resource_container is None + assert event.trace_type is None + elif lineno in lines_with_trace: + assert event.span_id == span_id + assert event.trace_resource_container[0] == resource + assert event.trace_type == span_type def test_lock_release_events(): @@ -208,13 +225,13 @@ def test_lock_release_events(): assert len(r.events[collector_threading.ThreadingLockAcquireEvent]) == 1 assert len(r.events[collector_threading.ThreadingLockReleaseEvent]) == 1 event = r.events[collector_threading.ThreadingLockReleaseEvent][0] - assert event.lock_name == "test_threading.py:205" + assert event.lock_name == "test_threading.py:222:lock" assert event.thread_id == _thread.get_ident() assert event.locked_for_ns >= 0 # It's called through pytest so I'm sure it's gonna be that long, right? assert len(event.frames) > 3 assert event.nframes > 3 - assert event.frames[1] == (__file__.replace(".pyc", ".py"), 207, "test_lock_release_events", "") + assert event.frames[0] == (__file__.replace(".pyc", ".py"), 224, "test_lock_release_events", "") assert event.sampling_pct == 100 @@ -248,16 +265,16 @@ def play_with_lock(): assert len(r.events[collector_threading.ThreadingLockReleaseEvent]) >= 1 for event in r.events[collector_threading.ThreadingLockAcquireEvent]: - if event.lock_name == "test_threading.py:238": + if event.lock_name == "test_threading.py:255:lock": assert event.wait_time_ns >= 0 assert event.task_id == t.ident assert event.task_name == "foobar" # It's called through pytest so I'm sure it's gonna be that long, right? assert len(event.frames) > 3 assert event.nframes > 3 - assert event.frames[1] == ( + assert event.frames[0] == ( "tests/profiling/collector/test_threading.py", - 239, + 256, "play_with_lock", "", ), event.frames @@ -267,16 +284,16 @@ def play_with_lock(): pytest.fail("Lock event not found") for event in r.events[collector_threading.ThreadingLockReleaseEvent]: - if event.lock_name == "test_threading.py:238": + if event.lock_name == "test_threading.py:255:lock": assert event.locked_for_ns >= 0 assert event.task_id == t.ident assert event.task_name == "foobar" # It's called through pytest so I'm sure it's gonna be that long, right? assert len(event.frames) > 3 assert event.nframes > 3 - assert event.frames[1] == ( + assert event.frames[0] == ( "tests/profiling/collector/test_threading.py", - 240, + 257, "play_with_lock", "", ), event.frames @@ -368,41 +385,226 @@ def test_user_threads_have_native_id(): def test_lock_enter_exit_events(): r = recorder.Recorder() with collector_threading.ThreadingLockCollector(r, capture_pct=100): - lock = threading.Lock() - with lock: + th_lock = threading.Lock() + with th_lock: pass assert len(r.events[collector_threading.ThreadingLockAcquireEvent]) == 1 assert len(r.events[collector_threading.ThreadingLockReleaseEvent]) == 1 acquire_event = r.events[collector_threading.ThreadingLockAcquireEvent][0] - assert acquire_event.lock_name == "test_threading.py:371" + assert acquire_event.lock_name == "test_threading.py:388:th_lock" assert acquire_event.thread_id == _thread.get_ident() assert acquire_event.wait_time_ns >= 0 # We know that at least __enter__, this function, and pytest should be # in the stack. assert len(acquire_event.frames) >= 3 assert acquire_event.nframes >= 3 - # To implement 'with lock:', _lock._ProfiledLock implements __enter__ and - # __exit__. So frames[0] is __enter__ and __exit__ respectively. - - assert acquire_event.frames[0] == ( - _lock.__file__.replace(".pyc", ".py"), - 231, - "__enter__", - "_ProfiledThreadingLock", - ) - assert acquire_event.frames[1] == (__file__.replace(".pyc", ".py"), 372, "test_lock_enter_exit_events", "") + + assert acquire_event.frames[0] == (__file__.replace(".pyc", ".py"), 389, "test_lock_enter_exit_events", "") assert acquire_event.sampling_pct == 100 release_event = r.events[collector_threading.ThreadingLockReleaseEvent][0] - assert release_event.lock_name == "test_threading.py:371" + assert release_event.lock_name == "test_threading.py:388:th_lock" assert release_event.thread_id == _thread.get_ident() assert release_event.locked_for_ns >= 0 - assert release_event.frames[0] == (_lock.__file__.replace(".pyc", ".py"), 234, "__exit__", "_ProfiledThreadingLock") - release_lineno = 372 if sys.version_info >= (3, 10) else 373 - assert release_event.frames[1] == ( + release_lineno = 389 if sys.version_info >= (3, 10) else 390 + assert release_event.frames[0] == ( __file__.replace(".pyc", ".py"), release_lineno, "test_lock_enter_exit_events", "", ) assert release_event.sampling_pct == 100 + + +class Foo: + def __init__(self): + self.foo_lock = threading.Lock() + + def foo(self): + with self.foo_lock: + pass + + +class Bar: + def __init__(self): + self.foo = Foo() + + def bar(self): + self.foo.foo() + + +def test_class_member_lock(): + r = recorder.Recorder() + with collector_threading.ThreadingLockCollector(r, capture_pct=100): + foobar = Foo() + foobar.foo() + bar = Bar() + bar.bar() + + assert len(r.events[collector_threading.ThreadingLockAcquireEvent]) == 2 + assert len(r.events[collector_threading.ThreadingLockReleaseEvent]) == 2 + + expected_lock_name = "test_threading.py:421:foo_lock" + for e in r.events[collector_threading.ThreadingLockAcquireEvent]: + assert e.lock_name == expected_lock_name + assert e.frames[0] == (__file__.replace(".pyc", ".py"), 424, "foo", "Foo") + for e in r.events[collector_threading.ThreadingLockReleaseEvent]: + assert e.lock_name == expected_lock_name + release_lineno = 424 if sys.version_info >= (3, 10) else 425 + assert e.frames[0] == (__file__.replace(".pyc", ".py"), release_lineno, "foo", "Foo") + + +def test_class_member_lock_no_inspect_dir(): + with mock.patch("ddtrace.settings.profiling.config.lock.name_inspect_dir", False): + r = recorder.Recorder() + with collector_threading.ThreadingLockCollector(r, capture_pct=100): + bar = Bar() + bar.bar() + assert len(r.events[collector_threading.ThreadingLockAcquireEvent]) == 1 + assert len(r.events[collector_threading.ThreadingLockReleaseEvent]) == 1 + expected_lock_name = "test_threading.py:421" + acquire_event = r.events[collector_threading.ThreadingLockAcquireEvent][0] + assert acquire_event.lock_name == expected_lock_name + assert acquire_event.frames[0] == (__file__.replace(".pyc", ".py"), 424, "foo", "Foo") + release_event = r.events[collector_threading.ThreadingLockReleaseEvent][0] + assert release_event.lock_name == expected_lock_name + release_lineno = 424 if sys.version_info >= (3, 10) else 425 + assert release_event.frames[0] == (__file__.replace(".pyc", ".py"), release_lineno, "foo", "Foo") + + +def test_private_lock(): + class Foo: + def __init__(self): + self.__lock = threading.Lock() + + def foo(self): + with self.__lock: + pass + + r = recorder.Recorder() + with collector_threading.ThreadingLockCollector(r, capture_pct=100): + foo = Foo() + foo.foo() + + assert len(r.events[collector_threading.ThreadingLockAcquireEvent]) == 1 + assert len(r.events[collector_threading.ThreadingLockReleaseEvent]) == 1 + expected_lock_name = "test_threading.py:478:_Foo__lock" + acquire_event = r.events[collector_threading.ThreadingLockAcquireEvent][0] + assert acquire_event.lock_name == expected_lock_name + assert acquire_event.frames[0] == (__file__.replace(".pyc", ".py"), 481, "foo", "Foo") + release_event = r.events[collector_threading.ThreadingLockReleaseEvent][0] + assert release_event.lock_name == expected_lock_name + release_lineno = 481 if sys.version_info >= (3, 10) else 482 + assert release_event.frames[0] == (__file__.replace(".pyc", ".py"), release_lineno, "foo", "Foo") + + +def test_inner_lock(): + class Bar: + def __init__(self): + self.foo = Foo() + + def bar(self): + with self.foo.foo_lock: + pass + + r = recorder.Recorder() + with collector_threading.ThreadingLockCollector(r, capture_pct=100): + bar = Bar() + bar.bar() + + assert len(r.events[collector_threading.ThreadingLockAcquireEvent]) == 1 + assert len(r.events[collector_threading.ThreadingLockReleaseEvent]) == 1 + expected_lock_name = "test_threading.py:421" + acquire_event = r.events[collector_threading.ThreadingLockAcquireEvent][0] + assert acquire_event.lock_name == expected_lock_name + assert acquire_event.frames[0] == (__file__.replace(".pyc", ".py"), 507, "bar", "Bar") + release_event = r.events[collector_threading.ThreadingLockReleaseEvent][0] + assert release_event.lock_name == expected_lock_name + release_lineno = 507 if sys.version_info >= (3, 10) else 508 + assert release_event.frames[0] == (__file__.replace(".pyc", ".py"), release_lineno, "bar", "Bar") + + +def test_anonymous_lock(): + r = recorder.Recorder() + with collector_threading.ThreadingLockCollector(r, capture_pct=100): + with threading.Lock(): + pass + + assert len(r.events[collector_threading.ThreadingLockAcquireEvent]) == 1 + assert len(r.events[collector_threading.ThreadingLockReleaseEvent]) == 1 + expected_lock_name = "test_threading.py:530" + acquire_event = r.events[collector_threading.ThreadingLockAcquireEvent][0] + assert acquire_event.lock_name == expected_lock_name + assert acquire_event.frames[0] == (__file__.replace(".pyc", ".py"), 530, "test_anonymous_lock", "") + release_event = r.events[collector_threading.ThreadingLockReleaseEvent][0] + assert release_event.lock_name == expected_lock_name + release_lineno = 530 if sys.version_info >= (3, 10) else 531 + assert release_event.frames[0] == (__file__.replace(".pyc", ".py"), release_lineno, "test_anonymous_lock", "") + + +@pytest.mark.skipif(not os.getenv("WRAPT_DISABLE_EXTENSIONS"), reason="wrapt C extension is disabled") +def test_wrapt_c_ext_false(): + assert _lock.WRAPT_C_EXT is False + r = recorder.Recorder() + with collector_threading.ThreadingLockCollector(r, capture_pct=100): + th_lock = threading.Lock() + with th_lock: + pass + expected_lock_name = "test_threading.py:550:th_lock" + assert len(r.events[collector_threading.ThreadingLockAcquireEvent]) == 1 + acquire_event = r.events[collector_threading.ThreadingLockAcquireEvent][0] + assert acquire_event.lock_name == expected_lock_name + assert acquire_event.frames[0] == (__file__.replace(".pyc", ".py"), 551, "test_wrapt_c_ext_false", "") + assert len(r.events[collector_threading.ThreadingLockReleaseEvent]) == 1 + release_event = r.events[collector_threading.ThreadingLockReleaseEvent][0] + assert release_event.lock_name == expected_lock_name + release_lineno = 551 if sys.version_info >= (3, 10) else 552 + assert release_event.frames[0] == (__file__.replace(".pyc", ".py"), release_lineno, "test_wrapt_c_ext_false", "") + + +@pytest.mark.skipif(os.getenv("WRAPT_DISABLE_EXTENSIONS"), reason="wrapt C extension is enabled") +def test_wrapt_c_ext_true(): + assert _lock.WRAPT_C_EXT is True + r = recorder.Recorder() + with collector_threading.ThreadingLockCollector(r, capture_pct=100): + th_lock = threading.Lock() + with th_lock: + pass + expected_lock_name = "test_threading.py:570:th_lock" + assert len(r.events[collector_threading.ThreadingLockAcquireEvent]) == 1 + acquire_event = r.events[collector_threading.ThreadingLockAcquireEvent][0] + assert acquire_event.lock_name == expected_lock_name + assert acquire_event.frames[0] == (__file__.replace(".pyc", ".py"), 571, "test_wrapt_c_ext_true", "") + assert len(r.events[collector_threading.ThreadingLockReleaseEvent]) == 1 + release_event = r.events[collector_threading.ThreadingLockReleaseEvent][0] + assert release_event.lock_name == expected_lock_name + release_lineno = 571 if sys.version_info >= (3, 10) else 572 + assert release_event.frames[0] == (__file__.replace(".pyc", ".py"), release_lineno, "test_wrapt_c_ext_true", "") + + +def test_global_locks(): + r = recorder.Recorder() + with collector_threading.ThreadingLockCollector(r, capture_pct=100): + from . import global_locks + + global_locks.foo() + global_locks.bar_instance.bar() + + assert len(r.events[collector_threading.ThreadingLockAcquireEvent]) == 2 + assert len(r.events[collector_threading.ThreadingLockReleaseEvent]) == 2 + expected_lock_names = ["global_locks.py:4:global_lock", "global_locks.py:15:bar_lock"] + expected_filename = __file__.replace(".pyc", ".py").replace("test_threading", "global_locks") + for e in r.events[collector_threading.ThreadingLockAcquireEvent]: + assert e.lock_name in expected_lock_names + if e.lock_name == expected_lock_names[0]: + assert e.frames[0] == (expected_filename, 9, "foo", "") + elif e.lock_name == expected_lock_names[1]: + assert e.frames[0] == (expected_filename, 18, "bar", "Bar") + for e in r.events[collector_threading.ThreadingLockReleaseEvent]: + assert e.lock_name in expected_lock_names + if e.lock_name == expected_lock_names[0]: + release_lineno = 9 if sys.version_info >= (3, 10) else 10 + assert e.frames[0] == (expected_filename, release_lineno, "foo", "") + elif e.lock_name == expected_lock_names[1]: + release_lineno = 18 if sys.version_info >= (3, 10) else 19 + assert e.frames[0] == (expected_filename, release_lineno, "bar", "Bar") diff --git a/tests/profiling/collector/test_threading_asyncio.py b/tests/profiling/collector/test_threading_asyncio.py index c2b8edc1973..a91c5d7156f 100644 --- a/tests/profiling/collector/test_threading_asyncio.py +++ b/tests/profiling/collector/test_threading_asyncio.py @@ -32,10 +32,10 @@ def asyncio_run(): lock_found = 0 for event in events[collector_threading.ThreadingLockAcquireEvent]: - if event.lock_name == "test_threading_asyncio.py:16": + if event.lock_name == "test_threading_asyncio.py:16:lock": assert event.task_name.startswith("Task-") lock_found += 1 - elif event.lock_name == "test_threading_asyncio.py:20": + elif event.lock_name == "test_threading_asyncio.py:20:lock": assert event.task_name is None assert event.thread_name == "foobar" lock_found += 1 diff --git a/tests/profiling/simple_program_fork.py b/tests/profiling/simple_program_fork.py index a0653fd0f19..5671e0904b0 100644 --- a/tests/profiling/simple_program_fork.py +++ b/tests/profiling/simple_program_fork.py @@ -12,7 +12,7 @@ lock = threading.Lock() lock.acquire() -test_lock_name = "simple_program_fork.py:13" +lock_lock_name = "simple_program_fork.py:13:lock" assert ddtrace.profiling.bootstrap.profiler.status == service.ServiceStatus.RUNNING @@ -30,23 +30,23 @@ lock.release() # We don't track it - assert test_lock_name not in set(e.lock_name for e in recorder.reset()[cthreading.ThreadingLockReleaseEvent]) + assert lock_lock_name not in set(e.lock_name for e in recorder.reset()[cthreading.ThreadingLockReleaseEvent]) # We track this one though lock = threading.Lock() - test_lock_name = "simple_program_fork.py:36" - assert test_lock_name not in set(e.lock_name for e in recorder.reset()[cthreading.ThreadingLockAcquireEvent]) + lock_lock_name = "simple_program_fork.py:36:lock" + assert lock_lock_name not in set(e.lock_name for e in recorder.reset()[cthreading.ThreadingLockAcquireEvent]) lock.acquire() events = recorder.reset() - assert test_lock_name in set(e.lock_name for e in events[cthreading.ThreadingLockAcquireEvent]) - assert test_lock_name not in set(e.lock_name for e in events[cthreading.ThreadingLockReleaseEvent]) + assert lock_lock_name in set(e.lock_name for e in events[cthreading.ThreadingLockAcquireEvent]) + assert lock_lock_name not in set(e.lock_name for e in events[cthreading.ThreadingLockReleaseEvent]) lock.release() - assert test_lock_name in set(e.lock_name for e in recorder.reset()[cthreading.ThreadingLockReleaseEvent]) + assert lock_lock_name in set(e.lock_name for e in recorder.reset()[cthreading.ThreadingLockReleaseEvent]) parent_events = parent_recorder.reset() # Let's sure our copy of the parent recorder does not receive it since the parent profiler has been stopped - assert test_lock_name not in set(e.lock_name for e in parent_events[cthreading.ThreadingLockAcquireEvent]) - assert test_lock_name not in set(e.lock_name for e in parent_events[cthreading.ThreadingLockReleaseEvent]) + assert lock_lock_name not in set(e.lock_name for e in parent_events[cthreading.ThreadingLockAcquireEvent]) + assert lock_lock_name not in set(e.lock_name for e in parent_events[cthreading.ThreadingLockReleaseEvent]) # This can run forever if anything is broken! while not recorder.events[stack_event.StackSampleEvent]: @@ -54,9 +54,9 @@ else: recorder = ddtrace.profiling.bootstrap.profiler._profiler._recorder assert recorder is parent_recorder - assert test_lock_name not in set(e.lock_name for e in recorder.reset()[cthreading.ThreadingLockReleaseEvent]) + assert lock_lock_name not in set(e.lock_name for e in recorder.reset()[cthreading.ThreadingLockReleaseEvent]) lock.release() - assert test_lock_name in set(e.lock_name for e in recorder.reset()[cthreading.ThreadingLockReleaseEvent]) + assert lock_lock_name in set(e.lock_name for e in recorder.reset()[cthreading.ThreadingLockReleaseEvent]) assert ddtrace.profiling.bootstrap.profiler.status == service.ServiceStatus.RUNNING print(child_pid) pid, status = os.waitpid(child_pid, 0) diff --git a/tests/snapshots/tests.opentelemetry.test_trace.test_distributed_trace_with_flask_app[with_ddtrace_run].json b/tests/snapshots/tests.opentelemetry.test_trace.test_distributed_trace_with_flask_app[with_ddtrace_run].json index c05e181c129..132e962af9b 100644 --- a/tests/snapshots/tests.opentelemetry.test_trace.test_distributed_trace_with_flask_app[with_ddtrace_run].json +++ b/tests/snapshots/tests.opentelemetry.test_trace.test_distributed_trace_with_flask_app[with_ddtrace_run].json @@ -10,9 +10,9 @@ "error": 0, "meta": { "_dd.p.dm": "-0", - "_dd.p.tid": "655535db00000000", + "_dd.p.tid": "66a6956800000000", "language": "python", - "runtime-id": "b4ffa244c11343de919ca20a7e8eebcf" + "runtime-id": "0da930484bcf4b488bfd7bb9cfd9c4b6" }, "metrics": { "_dd.top_level": 1, @@ -35,11 +35,12 @@ "meta": { "_dd.base_service": "", "_dd.p.dm": "-0", - "_dd.p.tid": "655535db00000000", + "_dd.p.tid": "66a6956800000000", + "_dd.parent_id": "4f4e94b0f4d57229", "component": "flask", "flask.endpoint": "otel", "flask.url_rule": "/otel", - "flask.version": "2.1.3", + "flask.version": "1.1.4", "http.method": "GET", "http.route": "/otel", "http.status_code": "200", @@ -56,10 +57,10 @@ "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 7713 + "process_id": 31660 }, - "duration": 14813125, - "start": 1700083163141821300 + "duration": 4600000, + "start": 1722193256335520000 }, { "name": "flask.application", @@ -76,8 +77,8 @@ "flask.endpoint": "otel", "flask.url_rule": "/otel" }, - "duration": 14243208, - "start": 1700083163142061300 + "duration": 4219000, + "start": 1722193256335701000 }, { "name": "flask.try_trigger_before_first_request_functions", @@ -92,8 +93,8 @@ "_dd.base_service": "", "component": "flask" }, - "duration": 14625, - "start": 1700083163142217383 + "duration": 12000, + "start": 1722193256335827000 }, { "name": "flask.preprocess_request", @@ -108,8 +109,8 @@ "_dd.base_service": "", "component": "flask" }, - "duration": 20125, - "start": 1700083163142331717 + "duration": 13000, + "start": 1722193256335911000 }, { "name": "flask.dispatch_request", @@ -124,8 +125,8 @@ "_dd.base_service": "", "component": "flask" }, - "duration": 13437750, - "start": 1700083163142415633 + "duration": 3646000, + "start": 1722193256335970000 }, { "name": "tests.opentelemetry.flask_app.otel", @@ -140,8 +141,8 @@ "_dd.base_service": "", "component": "flask" }, - "duration": 13354542, - "start": 1700083163142480508 + "duration": 3585000, + "start": 1722193256336021000 }, { "name": "internal", @@ -155,8 +156,8 @@ "meta": { "_dd.base_service": "" }, - "duration": 52875, - "start": 1700083163155755800 + "duration": 36000, + "start": 1722193256339552000 }, { "name": "flask.process_response", @@ -171,8 +172,8 @@ "_dd.base_service": "", "component": "flask" }, - "duration": 32375, - "start": 1700083163156004175 + "duration": 17000, + "start": 1722193256339703000 }, { "name": "flask.do_teardown_request", @@ -187,8 +188,8 @@ "_dd.base_service": "", "component": "flask" }, - "duration": 31625, - "start": 1700083163156192133 + "duration": 14000, + "start": 1722193256339831000 }, { "name": "flask.do_teardown_appcontext", @@ -203,8 +204,8 @@ "_dd.base_service": "", "component": "flask" }, - "duration": 9292, - "start": 1700083163156278258 + "duration": 10000, + "start": 1722193256339897000 }, { "name": "flask.response", @@ -219,6 +220,6 @@ "_dd.base_service": "", "component": "flask" }, - "duration": 311541, - "start": 1700083163156314342 + "duration": 183000, + "start": 1722193256339931000 }]] diff --git a/tests/snapshots/tests.opentelemetry.test_trace.test_distributed_trace_with_flask_app[with_opentelemetry_instrument].json b/tests/snapshots/tests.opentelemetry.test_trace.test_distributed_trace_with_flask_app[with_opentelemetry_instrument].json index e1fbd2196d3..daf39a83b8a 100644 --- a/tests/snapshots/tests.opentelemetry.test_trace.test_distributed_trace_with_flask_app[with_opentelemetry_instrument].json +++ b/tests/snapshots/tests.opentelemetry.test_trace.test_distributed_trace_with_flask_app[with_opentelemetry_instrument].json @@ -10,23 +10,23 @@ "error": 0, "meta": { "_dd.p.dm": "-0", - "_dd.p.tid": "655535dc00000000", + "_dd.p.tid": "66a666b600000000", "language": "python", - "runtime-id": "b4ffa244c11343de919ca20a7e8eebcf" + "runtime-id": "4fe738d296c04c00aff7eb2a66389ad4" }, "metrics": { "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 7703 + "process_id": 75526 }, - "duration": 24467250, - "start": 1700083164080082009 + "duration": 3561000, + "start": 1722181302828129000 }, { "name": "server", "service": "", - "resource": "/otel", + "resource": "GET /otel", "trace_id": 0, "span_id": 2, "parent_id": 1, @@ -34,7 +34,8 @@ "error": 0, "meta": { "_dd.p.dm": "-0", - "_dd.p.tid": "655535dc00000000", + "_dd.p.tid": "66a666b600000000", + "_dd.parent_id": "ae6e2bf21738f62f", "http.flavor": "1.1", "http.host": "0.0.0.0:8001", "http.method": "GET", @@ -45,21 +46,22 @@ "http.target": "/otel", "http.user_agent": "python-requests/2.28.1", "language": "python", + "net.host.name": "0.0.0.0:8001", "net.peer.ip": "127.0.0.1", - "runtime-id": "f90fe90fc53a4388b02210639d156981", + "runtime-id": "0dacca250fe4471094dc593f4892b91f", "span.kind": "server", - "tracestate": "dd=s:1;t.dm:-0" + "tracestate": "dd=p:ae6e2bf21738f62f;s:1;t.dm:-0" }, "metrics": { "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, "net.host.port": 8001, - "net.peer.port": 65508, - "process_id": 7721 + "net.peer.port": 62683, + "process_id": 75530 }, - "duration": 413250, - "start": 1700083164081574592 + "duration": 515000, + "start": 1722181302830460000 }, { "name": "internal", @@ -70,6 +72,6 @@ "parent_id": 2, "type": "", "error": 0, - "duration": 18250, - "start": 1700083164081867259 + "duration": 16000, + "start": 1722181302830847000 }]] diff --git a/tests/telemetry/test_writer.py b/tests/telemetry/test_writer.py index d6713694a2b..49faf39ab1b 100644 --- a/tests/telemetry/test_writer.py +++ b/tests/telemetry/test_writer.py @@ -648,9 +648,7 @@ def test_telemetry_writer_agentless_setup_eu(): assert new_telemetry_writer._client._is_agentless is True assert new_telemetry_writer._client._is_disabled is False assert new_telemetry_writer._client._endpoint == "api/v2/apmtelemetry" - assert ( - new_telemetry_writer._client._telemetry_url == "https://instrumentation-telemetry-intake.eu1.datadoghq.com" - ) + assert new_telemetry_writer._client._telemetry_url == "https://instrumentation-telemetry-intake.datadoghq.eu" assert new_telemetry_writer._client._headers["dd-api-key"] == "foobarkey"