client.py 74 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951
  1. import asyncio
  2. import copy
  3. import inspect
  4. import re
  5. import time
  6. import warnings
  7. from typing import (
  8. TYPE_CHECKING,
  9. Any,
  10. AsyncIterator,
  11. Awaitable,
  12. Callable,
  13. Dict,
  14. Iterable,
  15. List,
  16. Mapping,
  17. MutableMapping,
  18. Optional,
  19. Protocol,
  20. Set,
  21. Tuple,
  22. Type,
  23. TypedDict,
  24. TypeVar,
  25. Union,
  26. cast,
  27. )
  28. from redis._parsers.helpers import (
  29. _RedisCallbacks,
  30. _RedisCallbacksRESP2,
  31. _RedisCallbacksRESP3,
  32. bool_ok,
  33. )
  34. from redis.asyncio.connection import (
  35. Connection,
  36. ConnectionPool,
  37. SSLConnection,
  38. UnixDomainSocketConnection,
  39. )
  40. from redis.asyncio.lock import Lock
  41. from redis.asyncio.observability.recorder import (
  42. record_error_count,
  43. record_operation_duration,
  44. record_pubsub_message,
  45. )
  46. from redis.asyncio.retry import Retry
  47. from redis.backoff import ExponentialWithJitterBackoff
  48. from redis.client import (
  49. EMPTY_RESPONSE,
  50. NEVER_DECODE,
  51. AbstractRedis,
  52. CaseInsensitiveDict,
  53. )
  54. from redis.commands import (
  55. AsyncCoreCommands,
  56. AsyncRedisModuleCommands,
  57. AsyncSentinelCommands,
  58. list_or_args,
  59. )
  60. from redis.credentials import CredentialProvider
  61. from redis.driver_info import DriverInfo, resolve_driver_info
  62. from redis.event import (
  63. AfterPooledConnectionsInstantiationEvent,
  64. AfterPubSubConnectionInstantiationEvent,
  65. AfterSingleConnectionInstantiationEvent,
  66. ClientType,
  67. EventDispatcher,
  68. )
  69. from redis.exceptions import (
  70. ConnectionError,
  71. ExecAbortError,
  72. PubSubError,
  73. RedisError,
  74. ResponseError,
  75. WatchError,
  76. )
  77. from redis.observability.attributes import PubSubDirection
  78. from redis.typing import ChannelT, EncodableT, KeyT
  79. from redis.utils import (
  80. SSL_AVAILABLE,
  81. _set_info_logger,
  82. deprecated_args,
  83. deprecated_function,
  84. safe_str,
  85. str_if_bytes,
  86. truncate_text,
  87. )
  88. if TYPE_CHECKING and SSL_AVAILABLE:
  89. from ssl import TLSVersion, VerifyFlags, VerifyMode
  90. else:
  91. TLSVersion = None
  92. VerifyMode = None
  93. VerifyFlags = None
  94. PubSubHandler = Callable[[Dict[str, str]], Awaitable[None]]
  95. _KeyT = TypeVar("_KeyT", bound=KeyT)
  96. _ArgT = TypeVar("_ArgT", KeyT, EncodableT)
  97. _RedisT = TypeVar("_RedisT", bound="Redis")
  98. _NormalizeKeysT = TypeVar("_NormalizeKeysT", bound=Mapping[ChannelT, object])
  99. if TYPE_CHECKING:
  100. from redis.commands.core import Script
  101. class ResponseCallbackProtocol(Protocol):
  102. def __call__(self, response: Any, **kwargs): ...
  103. class AsyncResponseCallbackProtocol(Protocol):
  104. async def __call__(self, response: Any, **kwargs): ...
  105. ResponseCallbackT = Union[ResponseCallbackProtocol, AsyncResponseCallbackProtocol]
  106. class Redis(
  107. AbstractRedis, AsyncRedisModuleCommands, AsyncCoreCommands, AsyncSentinelCommands
  108. ):
  109. """
  110. Implementation of the Redis protocol.
  111. This abstract class provides a Python interface to all Redis commands
  112. and an implementation of the Redis protocol.
  113. Pipelines derive from this, implementing how
  114. the commands are sent and received to the Redis server. Based on
  115. configuration, an instance will either use a ConnectionPool, or
  116. Connection object to talk to redis.
  117. """
  118. response_callbacks: MutableMapping[Union[str, bytes], ResponseCallbackT]
  119. @classmethod
  120. def from_url(
  121. cls: Type["Redis"],
  122. url: str,
  123. single_connection_client: bool = False,
  124. auto_close_connection_pool: Optional[bool] = None,
  125. **kwargs,
  126. ) -> "Redis":
  127. """
  128. Return a Redis client object configured from the given URL
  129. For example::
  130. redis://[[username]:[password]]@localhost:6379/0
  131. rediss://[[username]:[password]]@localhost:6379/0
  132. unix://[username@]/path/to/socket.sock?db=0[&password=password]
  133. Three URL schemes are supported:
  134. - `redis://` creates a TCP socket connection. See more at:
  135. <https://www.iana.org/assignments/uri-schemes/prov/redis>
  136. - `rediss://` creates a SSL wrapped TCP socket connection. See more at:
  137. <https://www.iana.org/assignments/uri-schemes/prov/rediss>
  138. - ``unix://``: creates a Unix Domain Socket connection.
  139. The username, password, hostname, path and all querystring values
  140. are passed through urllib.parse.unquote in order to replace any
  141. percent-encoded values with their corresponding characters.
  142. There are several ways to specify a database number. The first value
  143. found will be used:
  144. 1. A ``db`` querystring option, e.g. redis://localhost?db=0
  145. 2. If using the redis:// or rediss:// schemes, the path argument
  146. of the url, e.g. redis://localhost/0
  147. 3. A ``db`` keyword argument to this function.
  148. If none of these options are specified, the default db=0 is used.
  149. All querystring options are cast to their appropriate Python types.
  150. Boolean arguments can be specified with string values "True"/"False"
  151. or "Yes"/"No". Values that cannot be properly cast cause a
  152. ``ValueError`` to be raised. Once parsed, the querystring arguments
  153. and keyword arguments are passed to the ``ConnectionPool``'s
  154. class initializer. In the case of conflicting arguments, querystring
  155. arguments always win.
  156. """
  157. connection_pool = ConnectionPool.from_url(url, **kwargs)
  158. client = cls(
  159. connection_pool=connection_pool,
  160. single_connection_client=single_connection_client,
  161. )
  162. if auto_close_connection_pool is not None:
  163. warnings.warn(
  164. DeprecationWarning(
  165. '"auto_close_connection_pool" is deprecated '
  166. "since version 5.0.1. "
  167. "Please create a ConnectionPool explicitly and "
  168. "provide to the Redis() constructor instead."
  169. )
  170. )
  171. else:
  172. auto_close_connection_pool = True
  173. client.auto_close_connection_pool = auto_close_connection_pool
  174. return client
  175. @classmethod
  176. def from_pool(
  177. cls: Type["Redis"],
  178. connection_pool: ConnectionPool,
  179. ) -> "Redis":
  180. """
  181. Return a Redis client from the given connection pool.
  182. The Redis client will take ownership of the connection pool and
  183. close it when the Redis client is closed.
  184. """
  185. client = cls(
  186. connection_pool=connection_pool,
  187. )
  188. client.auto_close_connection_pool = True
  189. return client
  190. @deprecated_args(
  191. args_to_warn=["retry_on_timeout"],
  192. reason="TimeoutError is included by default.",
  193. version="6.0.0",
  194. )
  195. @deprecated_args(
  196. args_to_warn=["lib_name", "lib_version"],
  197. reason="Use 'driver_info' parameter instead. "
  198. "lib_name and lib_version will be removed in a future version.",
  199. )
  200. def __init__(
  201. self,
  202. *,
  203. host: str = "localhost",
  204. port: int = 6379,
  205. db: Union[str, int] = 0,
  206. password: Optional[str] = None,
  207. socket_timeout: Optional[float] = None,
  208. socket_connect_timeout: Optional[float] = None,
  209. socket_keepalive: Optional[bool] = None,
  210. socket_keepalive_options: Optional[Mapping[int, Union[int, bytes]]] = None,
  211. connection_pool: Optional[ConnectionPool] = None,
  212. unix_socket_path: Optional[str] = None,
  213. encoding: str = "utf-8",
  214. encoding_errors: str = "strict",
  215. decode_responses: bool = False,
  216. retry_on_timeout: bool = False,
  217. retry: Retry = Retry(
  218. backoff=ExponentialWithJitterBackoff(base=1, cap=10), retries=3
  219. ),
  220. retry_on_error: Optional[list] = None,
  221. ssl: bool = False,
  222. ssl_keyfile: Optional[str] = None,
  223. ssl_certfile: Optional[str] = None,
  224. ssl_cert_reqs: Union[str, VerifyMode] = "required",
  225. ssl_include_verify_flags: Optional[List[VerifyFlags]] = None,
  226. ssl_exclude_verify_flags: Optional[List[VerifyFlags]] = None,
  227. ssl_ca_certs: Optional[str] = None,
  228. ssl_ca_data: Optional[str] = None,
  229. ssl_ca_path: Optional[str] = None,
  230. ssl_check_hostname: bool = True,
  231. ssl_min_version: Optional[TLSVersion] = None,
  232. ssl_ciphers: Optional[str] = None,
  233. ssl_password: Optional[str] = None,
  234. max_connections: Optional[int] = None,
  235. single_connection_client: bool = False,
  236. health_check_interval: int = 0,
  237. client_name: Optional[str] = None,
  238. lib_name: Optional[str] = None,
  239. lib_version: Optional[str] = None,
  240. driver_info: Optional["DriverInfo"] = None,
  241. username: Optional[str] = None,
  242. auto_close_connection_pool: Optional[bool] = None,
  243. redis_connect_func=None,
  244. credential_provider: Optional[CredentialProvider] = None,
  245. protocol: Optional[int] = 2,
  246. event_dispatcher: Optional[EventDispatcher] = None,
  247. ):
  248. """
  249. Initialize a new Redis client.
  250. To specify a retry policy for specific errors, you have two options:
  251. 1. Set the `retry_on_error` to a list of the error/s to retry on, and
  252. you can also set `retry` to a valid `Retry` object(in case the default
  253. one is not appropriate) - with this approach the retries will be triggered
  254. on the default errors specified in the Retry object enriched with the
  255. errors specified in `retry_on_error`.
  256. 2. Define a `Retry` object with configured 'supported_errors' and set
  257. it to the `retry` parameter - with this approach you completely redefine
  258. the errors on which retries will happen.
  259. `retry_on_timeout` is deprecated - please include the TimeoutError
  260. either in the Retry object or in the `retry_on_error` list.
  261. When 'connection_pool' is provided - the retry configuration of the
  262. provided pool will be used.
  263. """
  264. kwargs: Dict[str, Any]
  265. if event_dispatcher is None:
  266. self._event_dispatcher = EventDispatcher()
  267. else:
  268. self._event_dispatcher = event_dispatcher
  269. # auto_close_connection_pool only has an effect if connection_pool is
  270. # None. It is assumed that if connection_pool is not None, the user
  271. # wants to manage the connection pool themselves.
  272. if auto_close_connection_pool is not None:
  273. warnings.warn(
  274. DeprecationWarning(
  275. '"auto_close_connection_pool" is deprecated '
  276. "since version 5.0.1. "
  277. "Please create a ConnectionPool explicitly and "
  278. "provide to the Redis() constructor instead."
  279. )
  280. )
  281. else:
  282. auto_close_connection_pool = True
  283. if not connection_pool:
  284. # Create internal connection pool, expected to be closed by Redis instance
  285. if not retry_on_error:
  286. retry_on_error = []
  287. # Handle driver_info: if provided, use it; otherwise create from lib_name/lib_version
  288. computed_driver_info = resolve_driver_info(
  289. driver_info, lib_name, lib_version
  290. )
  291. kwargs = {
  292. "db": db,
  293. "username": username,
  294. "password": password,
  295. "credential_provider": credential_provider,
  296. "socket_timeout": socket_timeout,
  297. "encoding": encoding,
  298. "encoding_errors": encoding_errors,
  299. "decode_responses": decode_responses,
  300. "retry_on_error": retry_on_error,
  301. "retry": copy.deepcopy(retry),
  302. "max_connections": max_connections,
  303. "health_check_interval": health_check_interval,
  304. "client_name": client_name,
  305. "driver_info": computed_driver_info,
  306. "redis_connect_func": redis_connect_func,
  307. "protocol": protocol,
  308. }
  309. # based on input, setup appropriate connection args
  310. if unix_socket_path is not None:
  311. kwargs.update(
  312. {
  313. "path": unix_socket_path,
  314. "connection_class": UnixDomainSocketConnection,
  315. }
  316. )
  317. else:
  318. # TCP specific options
  319. kwargs.update(
  320. {
  321. "host": host,
  322. "port": port,
  323. "socket_connect_timeout": socket_connect_timeout,
  324. "socket_keepalive": socket_keepalive,
  325. "socket_keepalive_options": socket_keepalive_options,
  326. }
  327. )
  328. if ssl:
  329. kwargs.update(
  330. {
  331. "connection_class": SSLConnection,
  332. "ssl_keyfile": ssl_keyfile,
  333. "ssl_certfile": ssl_certfile,
  334. "ssl_cert_reqs": ssl_cert_reqs,
  335. "ssl_include_verify_flags": ssl_include_verify_flags,
  336. "ssl_exclude_verify_flags": ssl_exclude_verify_flags,
  337. "ssl_ca_certs": ssl_ca_certs,
  338. "ssl_ca_data": ssl_ca_data,
  339. "ssl_ca_path": ssl_ca_path,
  340. "ssl_check_hostname": ssl_check_hostname,
  341. "ssl_min_version": ssl_min_version,
  342. "ssl_ciphers": ssl_ciphers,
  343. "ssl_password": ssl_password,
  344. }
  345. )
  346. # This arg only used if no pool is passed in
  347. self.auto_close_connection_pool = auto_close_connection_pool
  348. connection_pool = ConnectionPool(**kwargs)
  349. self._event_dispatcher.dispatch(
  350. AfterPooledConnectionsInstantiationEvent(
  351. [connection_pool], ClientType.ASYNC, credential_provider
  352. )
  353. )
  354. else:
  355. # If a pool is passed in, do not close it
  356. self.auto_close_connection_pool = False
  357. self._event_dispatcher.dispatch(
  358. AfterPooledConnectionsInstantiationEvent(
  359. [connection_pool], ClientType.ASYNC, credential_provider
  360. )
  361. )
  362. self.connection_pool = connection_pool
  363. self.single_connection_client = single_connection_client
  364. self.connection: Optional[Connection] = None
  365. self.response_callbacks = CaseInsensitiveDict(_RedisCallbacks)
  366. if self.connection_pool.connection_kwargs.get("protocol") in ["3", 3]:
  367. self.response_callbacks.update(_RedisCallbacksRESP3)
  368. else:
  369. self.response_callbacks.update(_RedisCallbacksRESP2)
  370. # If using a single connection client, we need to lock creation-of and use-of
  371. # the client in order to avoid race conditions such as using asyncio.gather
  372. # on a set of redis commands
  373. self._single_conn_lock = asyncio.Lock()
  374. # When used as an async context manager, we need to increment and decrement
  375. # a usage counter so that we can close the connection pool when no one is
  376. # using the client.
  377. self._usage_counter = 0
  378. self._usage_lock = asyncio.Lock()
  379. def __repr__(self):
  380. return (
  381. f"<{self.__class__.__module__}.{self.__class__.__name__}"
  382. f"({self.connection_pool!r})>"
  383. )
  384. def __await__(self):
  385. return self.initialize().__await__()
  386. async def initialize(self: _RedisT) -> _RedisT:
  387. if self.single_connection_client:
  388. async with self._single_conn_lock:
  389. if self.connection is None:
  390. self.connection = await self.connection_pool.get_connection()
  391. self._event_dispatcher.dispatch(
  392. AfterSingleConnectionInstantiationEvent(
  393. self.connection, ClientType.ASYNC, self._single_conn_lock
  394. )
  395. )
  396. return self
  397. def set_response_callback(self, command: str, callback: ResponseCallbackT):
  398. """Set a custom Response Callback"""
  399. self.response_callbacks[command] = callback
  400. def get_encoder(self):
  401. """Get the connection pool's encoder"""
  402. return self.connection_pool.get_encoder()
  403. def get_connection_kwargs(self):
  404. """Get the connection's key-word arguments"""
  405. return self.connection_pool.connection_kwargs
  406. def get_retry(self) -> Optional[Retry]:
  407. return self.get_connection_kwargs().get("retry")
  408. def set_retry(self, retry: Retry) -> None:
  409. self.get_connection_kwargs().update({"retry": retry})
  410. self.connection_pool.set_retry(retry)
  411. def load_external_module(self, funcname, func):
  412. """
  413. This function can be used to add externally defined redis modules,
  414. and their namespaces to the redis client.
  415. funcname - A string containing the name of the function to create
  416. func - The function, being added to this class.
  417. ex: Assume that one has a custom redis module named foomod that
  418. creates command named 'foo.dothing' and 'foo.anotherthing' in redis.
  419. To load function functions into this namespace:
  420. from redis import Redis
  421. from foomodule import F
  422. r = Redis()
  423. r.load_external_module("foo", F)
  424. r.foo().dothing('your', 'arguments')
  425. For a concrete example see the reimport of the redisjson module in
  426. tests/test_connection.py::test_loading_external_modules
  427. """
  428. setattr(self, funcname, func)
  429. def pipeline(
  430. self, transaction: bool = True, shard_hint: Optional[str] = None
  431. ) -> "Pipeline":
  432. """
  433. Return a new pipeline object that can queue multiple commands for
  434. later execution. ``transaction`` indicates whether all commands
  435. should be executed atomically. Apart from making a group of operations
  436. atomic, pipelines are useful for reducing the back-and-forth overhead
  437. between the client and server.
  438. """
  439. return Pipeline(
  440. self.connection_pool, self.response_callbacks, transaction, shard_hint
  441. )
  442. async def transaction(
  443. self,
  444. func: Callable[["Pipeline"], Union[Any, Awaitable[Any]]],
  445. *watches: KeyT,
  446. shard_hint: Optional[str] = None,
  447. value_from_callable: bool = False,
  448. watch_delay: Optional[float] = None,
  449. ):
  450. """
  451. Convenience method for executing the callable `func` as a transaction
  452. while watching all keys specified in `watches`. The 'func' callable
  453. should expect a single argument which is a Pipeline object.
  454. """
  455. pipe: Pipeline
  456. async with self.pipeline(True, shard_hint) as pipe:
  457. while True:
  458. try:
  459. if watches:
  460. await pipe.watch(*watches)
  461. func_value = func(pipe)
  462. if inspect.isawaitable(func_value):
  463. func_value = await func_value
  464. exec_value = await pipe.execute()
  465. return func_value if value_from_callable else exec_value
  466. except WatchError:
  467. if watch_delay is not None and watch_delay > 0:
  468. await asyncio.sleep(watch_delay)
  469. continue
  470. def lock(
  471. self,
  472. name: KeyT,
  473. timeout: Optional[float] = None,
  474. sleep: float = 0.1,
  475. blocking: bool = True,
  476. blocking_timeout: Optional[float] = None,
  477. lock_class: Optional[Type[Lock]] = None,
  478. thread_local: bool = True,
  479. raise_on_release_error: bool = True,
  480. ) -> Lock:
  481. """
  482. Return a new Lock object using key ``name`` that mimics
  483. the behavior of threading.Lock.
  484. If specified, ``timeout`` indicates a maximum life for the lock.
  485. By default, it will remain locked until release() is called.
  486. ``sleep`` indicates the amount of time to sleep per loop iteration
  487. when the lock is in blocking mode and another client is currently
  488. holding the lock.
  489. ``blocking`` indicates whether calling ``acquire`` should block until
  490. the lock has been acquired or to fail immediately, causing ``acquire``
  491. to return False and the lock not being acquired. Defaults to True.
  492. Note this value can be overridden by passing a ``blocking``
  493. argument to ``acquire``.
  494. ``blocking_timeout`` indicates the maximum amount of time in seconds to
  495. spend trying to acquire the lock. A value of ``None`` indicates
  496. continue trying forever. ``blocking_timeout`` can be specified as a
  497. float or integer, both representing the number of seconds to wait.
  498. ``lock_class`` forces the specified lock implementation. Note that as
  499. of redis-py 3.0, the only lock class we implement is ``Lock`` (which is
  500. a Lua-based lock). So, it's unlikely you'll need this parameter, unless
  501. you have created your own custom lock class.
  502. ``thread_local`` indicates whether the lock token is placed in
  503. thread-local storage. By default, the token is placed in thread local
  504. storage so that a thread only sees its token, not a token set by
  505. another thread. Consider the following timeline:
  506. time: 0, thread-1 acquires `my-lock`, with a timeout of 5 seconds.
  507. thread-1 sets the token to "abc"
  508. time: 1, thread-2 blocks trying to acquire `my-lock` using the
  509. Lock instance.
  510. time: 5, thread-1 has not yet completed. redis expires the lock
  511. key.
  512. time: 5, thread-2 acquired `my-lock` now that it's available.
  513. thread-2 sets the token to "xyz"
  514. time: 6, thread-1 finishes its work and calls release(). if the
  515. token is *not* stored in thread local storage, then
  516. thread-1 would see the token value as "xyz" and would be
  517. able to successfully release the thread-2's lock.
  518. ``raise_on_release_error`` indicates whether to raise an exception when
  519. the lock is no longer owned when exiting the context manager. By default,
  520. this is True, meaning an exception will be raised. If False, the warning
  521. will be logged and the exception will be suppressed.
  522. In some use cases it's necessary to disable thread local storage. For
  523. example, if you have code where one thread acquires a lock and passes
  524. that lock instance to a worker thread to release later. If thread
  525. local storage isn't disabled in this case, the worker thread won't see
  526. the token set by the thread that acquired the lock. Our assumption
  527. is that these cases aren't common and as such default to using
  528. thread local storage."""
  529. if lock_class is None:
  530. lock_class = Lock
  531. return lock_class(
  532. self,
  533. name,
  534. timeout=timeout,
  535. sleep=sleep,
  536. blocking=blocking,
  537. blocking_timeout=blocking_timeout,
  538. thread_local=thread_local,
  539. raise_on_release_error=raise_on_release_error,
  540. )
  541. def pubsub(self, **kwargs) -> "PubSub":
  542. """
  543. Return a Publish/Subscribe object. With this object, you can
  544. subscribe to channels and listen for messages that get published to
  545. them.
  546. """
  547. return PubSub(
  548. self.connection_pool, event_dispatcher=self._event_dispatcher, **kwargs
  549. )
  550. def monitor(self) -> "Monitor":
  551. return Monitor(self.connection_pool)
  552. def client(self) -> "Redis":
  553. return self.__class__(
  554. connection_pool=self.connection_pool, single_connection_client=True
  555. )
  556. async def __aenter__(self: _RedisT) -> _RedisT:
  557. """
  558. Async context manager entry. Increments a usage counter so that the
  559. connection pool is only closed (via aclose()) when no context is using
  560. the client.
  561. """
  562. await self._increment_usage()
  563. try:
  564. # Initialize the client (i.e. establish connection, etc.)
  565. return await self.initialize()
  566. except Exception:
  567. # If initialization fails, decrement the counter to keep it in sync
  568. await self._decrement_usage()
  569. raise
  570. async def _increment_usage(self) -> int:
  571. """
  572. Helper coroutine to increment the usage counter while holding the lock.
  573. Returns the new value of the usage counter.
  574. """
  575. async with self._usage_lock:
  576. self._usage_counter += 1
  577. return self._usage_counter
  578. async def _decrement_usage(self) -> int:
  579. """
  580. Helper coroutine to decrement the usage counter while holding the lock.
  581. Returns the new value of the usage counter.
  582. """
  583. async with self._usage_lock:
  584. self._usage_counter -= 1
  585. return self._usage_counter
  586. async def __aexit__(self, exc_type, exc_value, traceback):
  587. """
  588. Async context manager exit. Decrements a usage counter. If this is the
  589. last exit (counter becomes zero), the client closes its connection pool.
  590. """
  591. current_usage = await asyncio.shield(self._decrement_usage())
  592. if current_usage == 0:
  593. # This was the last active context, so disconnect the pool.
  594. await asyncio.shield(self.aclose())
  595. _DEL_MESSAGE = "Unclosed Redis client"
  596. # passing _warnings and _grl as argument default since they may be gone
  597. # by the time __del__ is called at shutdown
  598. def __del__(
  599. self,
  600. _warn: Any = warnings.warn,
  601. _grl: Any = asyncio.get_running_loop,
  602. ) -> None:
  603. if hasattr(self, "connection") and (self.connection is not None):
  604. _warn(f"Unclosed client session {self!r}", ResourceWarning, source=self)
  605. try:
  606. context = {"client": self, "message": self._DEL_MESSAGE}
  607. _grl().call_exception_handler(context)
  608. except RuntimeError:
  609. pass
  610. self.connection._close()
  611. async def aclose(self, close_connection_pool: Optional[bool] = None) -> None:
  612. """
  613. Closes Redis client connection
  614. Args:
  615. close_connection_pool:
  616. decides whether to close the connection pool used by this Redis client,
  617. overriding Redis.auto_close_connection_pool.
  618. By default, let Redis.auto_close_connection_pool decide
  619. whether to close the connection pool.
  620. """
  621. conn = self.connection
  622. if conn:
  623. self.connection = None
  624. await self.connection_pool.release(conn)
  625. if close_connection_pool or (
  626. close_connection_pool is None and self.auto_close_connection_pool
  627. ):
  628. await self.connection_pool.disconnect()
  629. @deprecated_function(version="5.0.1", reason="Use aclose() instead", name="close")
  630. async def close(self, close_connection_pool: Optional[bool] = None) -> None:
  631. """
  632. Alias for aclose(), for backwards compatibility
  633. """
  634. await self.aclose(close_connection_pool)
  635. async def _send_command_parse_response(self, conn, command_name, *args, **options):
  636. """
  637. Send a command and parse the response
  638. """
  639. await conn.send_command(*args)
  640. return await self.parse_response(conn, command_name, **options)
  641. async def _close_connection(
  642. self,
  643. conn: Connection,
  644. error: Optional[BaseException] = None,
  645. failure_count: Optional[int] = None,
  646. start_time: Optional[float] = None,
  647. command_name: Optional[str] = None,
  648. ):
  649. """
  650. Close the connection before retrying.
  651. The supported exceptions are already checked in the
  652. retry object so we don't need to do it here.
  653. After we disconnect the connection, it will try to reconnect and
  654. do a health check as part of the send_command logic(on connection level).
  655. """
  656. if (
  657. error
  658. and failure_count is not None
  659. and failure_count <= conn.retry.get_retries()
  660. ):
  661. await record_operation_duration(
  662. command_name=command_name,
  663. duration_seconds=time.monotonic() - start_time,
  664. server_address=getattr(conn, "host", None),
  665. server_port=getattr(conn, "port", None),
  666. db_namespace=str(conn.db),
  667. error=error,
  668. retry_attempts=failure_count,
  669. )
  670. await conn.disconnect(error=error, failure_count=failure_count)
  671. # COMMAND EXECUTION AND PROTOCOL PARSING
  672. async def execute_command(self, *args, **options):
  673. """Execute a command and return a parsed response"""
  674. await self.initialize()
  675. pool = self.connection_pool
  676. command_name = args[0]
  677. conn = self.connection or await pool.get_connection()
  678. # Start timing for observability
  679. start_time = time.monotonic()
  680. # Track actual retry attempts for error reporting
  681. actual_retry_attempts = 0
  682. def failure_callback(error, failure_count):
  683. nonlocal actual_retry_attempts
  684. actual_retry_attempts = failure_count
  685. return self._close_connection(
  686. conn, error, failure_count, start_time, command_name
  687. )
  688. if self.single_connection_client:
  689. await self._single_conn_lock.acquire()
  690. try:
  691. result = await conn.retry.call_with_retry(
  692. lambda: self._send_command_parse_response(
  693. conn, command_name, *args, **options
  694. ),
  695. failure_callback,
  696. with_failure_count=True,
  697. )
  698. await record_operation_duration(
  699. command_name=command_name,
  700. duration_seconds=time.monotonic() - start_time,
  701. server_address=getattr(conn, "host", None),
  702. server_port=getattr(conn, "port", None),
  703. db_namespace=str(conn.db),
  704. )
  705. return result
  706. except Exception as e:
  707. await record_error_count(
  708. server_address=getattr(conn, "host", None),
  709. server_port=getattr(conn, "port", None),
  710. network_peer_address=getattr(conn, "host", None),
  711. network_peer_port=getattr(conn, "port", None),
  712. error_type=e,
  713. retry_attempts=actual_retry_attempts,
  714. is_internal=False,
  715. )
  716. raise
  717. finally:
  718. if self.single_connection_client:
  719. self._single_conn_lock.release()
  720. if not self.connection:
  721. await pool.release(conn)
  722. async def parse_response(
  723. self, connection: Connection, command_name: Union[str, bytes], **options
  724. ):
  725. """Parses a response from the Redis server"""
  726. try:
  727. if NEVER_DECODE in options:
  728. response = await connection.read_response(disable_decoding=True)
  729. options.pop(NEVER_DECODE)
  730. else:
  731. response = await connection.read_response()
  732. except ResponseError:
  733. if EMPTY_RESPONSE in options:
  734. return options[EMPTY_RESPONSE]
  735. raise
  736. if EMPTY_RESPONSE in options:
  737. options.pop(EMPTY_RESPONSE)
  738. # Remove keys entry, it needs only for cache.
  739. options.pop("keys", None)
  740. if command_name in self.response_callbacks:
  741. # Mypy bug: https://github.com/python/mypy/issues/10977
  742. command_name = cast(str, command_name)
  743. retval = self.response_callbacks[command_name](response, **options)
  744. return await retval if inspect.isawaitable(retval) else retval
  745. return response
  746. StrictRedis = Redis
  747. class MonitorCommandInfo(TypedDict):
  748. time: float
  749. db: int
  750. client_address: str
  751. client_port: str
  752. client_type: str
  753. command: str
  754. class Monitor:
  755. """
  756. Monitor is useful for handling the MONITOR command to the redis server.
  757. next_command() method returns one command from monitor
  758. listen() method yields commands from monitor.
  759. """
  760. monitor_re = re.compile(r"\[(\d+) (.*?)\] (.*)")
  761. command_re = re.compile(r'"(.*?)(?<!\\)"')
  762. def __init__(self, connection_pool: ConnectionPool):
  763. self.connection_pool = connection_pool
  764. self.connection: Optional[Connection] = None
  765. async def connect(self):
  766. if self.connection is None:
  767. self.connection = await self.connection_pool.get_connection()
  768. async def __aenter__(self):
  769. await self.connect()
  770. await self.connection.send_command("MONITOR")
  771. # check that monitor returns 'OK', but don't return it to user
  772. response = await self.connection.read_response()
  773. if not bool_ok(response):
  774. raise RedisError(f"MONITOR failed: {response}")
  775. return self
  776. async def __aexit__(self, *args):
  777. await self.connection.disconnect()
  778. await self.connection_pool.release(self.connection)
  779. async def next_command(self) -> MonitorCommandInfo:
  780. """Parse the response from a monitor command"""
  781. await self.connect()
  782. response = await self.connection.read_response()
  783. if isinstance(response, bytes):
  784. response = self.connection.encoder.decode(response, force=True)
  785. command_time, command_data = response.split(" ", 1)
  786. m = self.monitor_re.match(command_data)
  787. db_id, client_info, command = m.groups()
  788. command = " ".join(self.command_re.findall(command))
  789. # Redis escapes double quotes because each piece of the command
  790. # string is surrounded by double quotes. We don't have that
  791. # requirement so remove the escaping and leave the quote.
  792. command = command.replace('\\"', '"')
  793. if client_info == "lua":
  794. client_address = "lua"
  795. client_port = ""
  796. client_type = "lua"
  797. elif client_info.startswith("unix"):
  798. client_address = "unix"
  799. client_port = client_info[5:]
  800. client_type = "unix"
  801. else:
  802. # use rsplit as ipv6 addresses contain colons
  803. client_address, client_port = client_info.rsplit(":", 1)
  804. client_type = "tcp"
  805. return {
  806. "time": float(command_time),
  807. "db": int(db_id),
  808. "client_address": client_address,
  809. "client_port": client_port,
  810. "client_type": client_type,
  811. "command": command,
  812. }
  813. async def listen(self) -> AsyncIterator[MonitorCommandInfo]:
  814. """Listen for commands coming to the server."""
  815. while True:
  816. yield await self.next_command()
  817. class PubSub:
  818. """
  819. PubSub provides publish, subscribe and listen support to Redis channels.
  820. After subscribing to one or more channels, the listen() method will block
  821. until a message arrives on one of the subscribed channels. That message
  822. will be returned and it's safe to start listening again.
  823. """
  824. PUBLISH_MESSAGE_TYPES = ("message", "pmessage", "smessage")
  825. UNSUBSCRIBE_MESSAGE_TYPES = ("unsubscribe", "punsubscribe", "sunsubscribe")
  826. HEALTH_CHECK_MESSAGE = "redis-py-health-check"
  827. def __init__(
  828. self,
  829. connection_pool: ConnectionPool,
  830. shard_hint: Optional[str] = None,
  831. ignore_subscribe_messages: bool = False,
  832. encoder=None,
  833. push_handler_func: Optional[Callable] = None,
  834. event_dispatcher: Optional["EventDispatcher"] = None,
  835. ):
  836. if event_dispatcher is None:
  837. self._event_dispatcher = EventDispatcher()
  838. else:
  839. self._event_dispatcher = event_dispatcher
  840. self.connection_pool = connection_pool
  841. self.shard_hint = shard_hint
  842. self.ignore_subscribe_messages = ignore_subscribe_messages
  843. self.connection = None
  844. # we need to know the encoding options for this connection in order
  845. # to lookup channel and pattern names for callback handlers.
  846. self.encoder = encoder
  847. self.push_handler_func = push_handler_func
  848. if self.encoder is None:
  849. self.encoder = self.connection_pool.get_encoder()
  850. if self.encoder.decode_responses:
  851. self.health_check_response = [
  852. ["pong", self.HEALTH_CHECK_MESSAGE],
  853. self.HEALTH_CHECK_MESSAGE,
  854. ]
  855. else:
  856. self.health_check_response = [
  857. [b"pong", self.encoder.encode(self.HEALTH_CHECK_MESSAGE)],
  858. self.encoder.encode(self.HEALTH_CHECK_MESSAGE),
  859. ]
  860. if self.push_handler_func is None:
  861. _set_info_logger()
  862. self.channels = {}
  863. self.pending_unsubscribe_channels = set()
  864. self.patterns = {}
  865. self.pending_unsubscribe_patterns = set()
  866. self._lock = asyncio.Lock()
  867. async def __aenter__(self):
  868. return self
  869. async def __aexit__(self, exc_type, exc_value, traceback):
  870. await self.aclose()
  871. def __del__(self):
  872. if self.connection:
  873. self.connection.deregister_connect_callback(self.on_connect)
  874. async def aclose(self):
  875. # In case a connection property does not yet exist
  876. # (due to a crash earlier in the Redis() constructor), return
  877. # immediately as there is nothing to clean-up.
  878. if not hasattr(self, "connection"):
  879. return
  880. async with self._lock:
  881. if self.connection:
  882. await self.connection.disconnect()
  883. self.connection.deregister_connect_callback(self.on_connect)
  884. await self.connection_pool.release(self.connection)
  885. self.connection = None
  886. self.channels = {}
  887. self.pending_unsubscribe_channels = set()
  888. self.patterns = {}
  889. self.pending_unsubscribe_patterns = set()
  890. @deprecated_function(version="5.0.1", reason="Use aclose() instead", name="close")
  891. async def close(self) -> None:
  892. """Alias for aclose(), for backwards compatibility"""
  893. await self.aclose()
  894. @deprecated_function(version="5.0.1", reason="Use aclose() instead", name="reset")
  895. async def reset(self) -> None:
  896. """Alias for aclose(), for backwards compatibility"""
  897. await self.aclose()
  898. async def on_connect(self, connection: Connection):
  899. """Re-subscribe to any channels and patterns previously subscribed to"""
  900. # NOTE: for python3, we can't pass bytestrings as keyword arguments
  901. # so we need to decode channel/pattern names back to unicode strings
  902. # before passing them to [p]subscribe.
  903. #
  904. # However, channels subscribed without a callback (positional args) may
  905. # have binary names that are not valid in the current encoding (e.g.
  906. # arbitrary bytes that are not valid UTF-8). These channels are stored
  907. # with a ``None`` handler. We re-subscribe them as positional args so
  908. # that no decoding is required.
  909. self.pending_unsubscribe_channels.clear()
  910. self.pending_unsubscribe_patterns.clear()
  911. if self.channels:
  912. channels_with_handlers = {}
  913. channels_without_handlers = []
  914. for k, v in self.channels.items():
  915. if v is not None:
  916. channels_with_handlers[self.encoder.decode(k, force=True)] = v
  917. else:
  918. channels_without_handlers.append(k)
  919. if channels_with_handlers or channels_without_handlers:
  920. await self.subscribe(
  921. *channels_without_handlers, **channels_with_handlers
  922. )
  923. if self.patterns:
  924. patterns_with_handlers = {}
  925. patterns_without_handlers = []
  926. for k, v in self.patterns.items():
  927. if v is not None:
  928. patterns_with_handlers[self.encoder.decode(k, force=True)] = v
  929. else:
  930. patterns_without_handlers.append(k)
  931. if patterns_with_handlers or patterns_without_handlers:
  932. await self.psubscribe(
  933. *patterns_without_handlers, **patterns_with_handlers
  934. )
  935. @property
  936. def subscribed(self):
  937. """Indicates if there are subscriptions to any channels or patterns"""
  938. return bool(self.channels or self.patterns)
  939. async def execute_command(self, *args: EncodableT):
  940. """Execute a publish/subscribe command"""
  941. # NOTE: don't parse the response in this function -- it could pull a
  942. # legitimate message off the stack if the connection is already
  943. # subscribed to one or more channels
  944. await self.connect()
  945. connection = self.connection
  946. kwargs = {"check_health": not self.subscribed}
  947. await self._execute(connection, connection.send_command, *args, **kwargs)
  948. async def connect(self):
  949. """
  950. Ensure that the PubSub is connected
  951. """
  952. if self.connection is None:
  953. self.connection = await self.connection_pool.get_connection()
  954. # register a callback that re-subscribes to any channels we
  955. # were listening to when we were disconnected
  956. self.connection.register_connect_callback(self.on_connect)
  957. else:
  958. await self.connection.connect()
  959. if self.push_handler_func is not None:
  960. self.connection._parser.set_pubsub_push_handler(self.push_handler_func)
  961. self._event_dispatcher.dispatch(
  962. AfterPubSubConnectionInstantiationEvent(
  963. self.connection, self.connection_pool, ClientType.ASYNC, self._lock
  964. )
  965. )
  966. async def _reconnect(
  967. self,
  968. conn,
  969. error: Optional[BaseException] = None,
  970. failure_count: Optional[int] = None,
  971. start_time: Optional[float] = None,
  972. command_name: Optional[str] = None,
  973. ):
  974. """
  975. The supported exceptions are already checked in the
  976. retry object so we don't need to do it here.
  977. In this error handler we are trying to reconnect to the server.
  978. """
  979. if (
  980. error
  981. and failure_count is not None
  982. and failure_count <= conn.retry.get_retries()
  983. ):
  984. if command_name:
  985. await record_operation_duration(
  986. command_name=command_name,
  987. duration_seconds=time.monotonic() - start_time,
  988. server_address=getattr(conn, "host", None),
  989. server_port=getattr(conn, "port", None),
  990. db_namespace=str(conn.db),
  991. error=error,
  992. retry_attempts=failure_count,
  993. )
  994. await conn.disconnect(error=error, failure_count=failure_count)
  995. await conn.connect()
  996. async def _execute(self, conn, command, *args, **kwargs):
  997. """
  998. Connect manually upon disconnection. If the Redis server is down,
  999. this will fail and raise a ConnectionError as desired.
  1000. After reconnection, the ``on_connect`` callback should have been
  1001. called by the # connection to resubscribe us to any channels and
  1002. patterns we were previously listening to
  1003. """
  1004. if not len(args) == 0:
  1005. command_name = args[0]
  1006. else:
  1007. command_name = None
  1008. # Start timing for observability
  1009. start_time = time.monotonic()
  1010. # Track actual retry attempts for error reporting
  1011. actual_retry_attempts = 0
  1012. def failure_callback(error, failure_count):
  1013. nonlocal actual_retry_attempts
  1014. actual_retry_attempts = failure_count
  1015. return self._reconnect(conn, error, failure_count, start_time, command_name)
  1016. try:
  1017. response = await conn.retry.call_with_retry(
  1018. lambda: command(*args, **kwargs),
  1019. failure_callback,
  1020. with_failure_count=True,
  1021. )
  1022. if command_name:
  1023. await record_operation_duration(
  1024. command_name=command_name,
  1025. duration_seconds=time.monotonic() - start_time,
  1026. server_address=getattr(conn, "host", None),
  1027. server_port=getattr(conn, "port", None),
  1028. db_namespace=str(conn.db),
  1029. )
  1030. return response
  1031. except Exception as e:
  1032. await record_error_count(
  1033. server_address=getattr(conn, "host", None),
  1034. server_port=getattr(conn, "port", None),
  1035. network_peer_address=getattr(conn, "host", None),
  1036. network_peer_port=getattr(conn, "port", None),
  1037. error_type=e,
  1038. retry_attempts=actual_retry_attempts,
  1039. is_internal=False,
  1040. )
  1041. raise
  1042. async def parse_response(self, block: bool = True, timeout: float = 0):
  1043. """Parse the response from a publish/subscribe command"""
  1044. conn = self.connection
  1045. if conn is None:
  1046. raise RuntimeError(
  1047. "pubsub connection not set: "
  1048. "did you forget to call subscribe() or psubscribe()?"
  1049. )
  1050. await self.check_health()
  1051. if not conn.is_connected:
  1052. await conn.connect()
  1053. read_timeout = None if block else timeout
  1054. response = await self._execute(
  1055. conn,
  1056. conn.read_response,
  1057. timeout=read_timeout,
  1058. disconnect_on_error=False,
  1059. push_request=True,
  1060. )
  1061. if conn.health_check_interval and response in self.health_check_response:
  1062. # ignore the health check message as user might not expect it
  1063. return None
  1064. return response
  1065. async def check_health(self):
  1066. conn = self.connection
  1067. if conn is None:
  1068. raise RuntimeError(
  1069. "pubsub connection not set: "
  1070. "did you forget to call subscribe() or psubscribe()?"
  1071. )
  1072. if (
  1073. conn.health_check_interval
  1074. and asyncio.get_running_loop().time() > conn.next_health_check
  1075. ):
  1076. await conn.send_command(
  1077. "PING", self.HEALTH_CHECK_MESSAGE, check_health=False
  1078. )
  1079. def _normalize_keys(self, data: _NormalizeKeysT) -> _NormalizeKeysT:
  1080. """
  1081. normalize channel/pattern names to be either bytes or strings
  1082. based on whether responses are automatically decoded. this saves us
  1083. from coercing the value for each message coming in.
  1084. """
  1085. encode = self.encoder.encode
  1086. decode = self.encoder.decode
  1087. return {decode(encode(k)): v for k, v in data.items()} # type: ignore[return-value] # noqa: E501
  1088. async def psubscribe(self, *args: ChannelT, **kwargs: PubSubHandler):
  1089. """
  1090. Subscribe to channel patterns. Patterns supplied as keyword arguments
  1091. expect a pattern name as the key and a callable as the value. A
  1092. pattern's callable will be invoked automatically when a message is
  1093. received on that pattern rather than producing a message via
  1094. ``listen()``.
  1095. """
  1096. parsed_args = list_or_args((args[0],), args[1:]) if args else args
  1097. new_patterns: Dict[ChannelT, PubSubHandler] = dict.fromkeys(parsed_args)
  1098. # Mypy bug: https://github.com/python/mypy/issues/10970
  1099. new_patterns.update(kwargs) # type: ignore[arg-type]
  1100. ret_val = await self.execute_command("PSUBSCRIBE", *new_patterns.keys())
  1101. # update the patterns dict AFTER we send the command. we don't want to
  1102. # subscribe twice to these patterns, once for the command and again
  1103. # for the reconnection.
  1104. new_patterns = self._normalize_keys(new_patterns)
  1105. self.patterns.update(new_patterns)
  1106. self.pending_unsubscribe_patterns.difference_update(new_patterns)
  1107. return ret_val
  1108. def punsubscribe(self, *args: ChannelT) -> Awaitable:
  1109. """
  1110. Unsubscribe from the supplied patterns. If empty, unsubscribe from
  1111. all patterns.
  1112. """
  1113. patterns: Iterable[ChannelT]
  1114. if args:
  1115. parsed_args = list_or_args((args[0],), args[1:])
  1116. patterns = self._normalize_keys(dict.fromkeys(parsed_args)).keys()
  1117. else:
  1118. parsed_args = []
  1119. patterns = self.patterns
  1120. self.pending_unsubscribe_patterns.update(patterns)
  1121. return self.execute_command("PUNSUBSCRIBE", *parsed_args)
  1122. async def subscribe(self, *args: ChannelT, **kwargs: Callable):
  1123. """
  1124. Subscribe to channels. Channels supplied as keyword arguments expect
  1125. a channel name as the key and a callable as the value. A channel's
  1126. callable will be invoked automatically when a message is received on
  1127. that channel rather than producing a message via ``listen()`` or
  1128. ``get_message()``.
  1129. """
  1130. parsed_args = list_or_args((args[0],), args[1:]) if args else ()
  1131. new_channels = dict.fromkeys(parsed_args)
  1132. # Mypy bug: https://github.com/python/mypy/issues/10970
  1133. new_channels.update(kwargs) # type: ignore[arg-type]
  1134. ret_val = await self.execute_command("SUBSCRIBE", *new_channels.keys())
  1135. # update the channels dict AFTER we send the command. we don't want to
  1136. # subscribe twice to these channels, once for the command and again
  1137. # for the reconnection.
  1138. new_channels = self._normalize_keys(new_channels)
  1139. self.channels.update(new_channels)
  1140. self.pending_unsubscribe_channels.difference_update(new_channels)
  1141. return ret_val
  1142. def unsubscribe(self, *args) -> Awaitable:
  1143. """
  1144. Unsubscribe from the supplied channels. If empty, unsubscribe from
  1145. all channels
  1146. """
  1147. if args:
  1148. parsed_args = list_or_args(args[0], args[1:])
  1149. channels = self._normalize_keys(dict.fromkeys(parsed_args))
  1150. else:
  1151. parsed_args = []
  1152. channels = self.channels
  1153. self.pending_unsubscribe_channels.update(channels)
  1154. return self.execute_command("UNSUBSCRIBE", *parsed_args)
  1155. async def listen(self) -> AsyncIterator:
  1156. """Listen for messages on channels this client has been subscribed to"""
  1157. while self.subscribed:
  1158. response = await self.handle_message(await self.parse_response(block=True))
  1159. if response is not None:
  1160. yield response
  1161. async def get_message(
  1162. self, ignore_subscribe_messages: bool = False, timeout: Optional[float] = 0.0
  1163. ):
  1164. """
  1165. Get the next message if one is available, otherwise None.
  1166. If timeout is specified, the system will wait for `timeout` seconds
  1167. before returning. Timeout should be specified as a floating point
  1168. number or None to wait indefinitely.
  1169. """
  1170. response = await self.parse_response(block=(timeout is None), timeout=timeout)
  1171. if response:
  1172. return await self.handle_message(response, ignore_subscribe_messages)
  1173. return None
  1174. def ping(self, message=None) -> Awaitable[bool]:
  1175. """
  1176. Ping the Redis server to test connectivity.
  1177. Sends a PING command to the Redis server and returns True if the server
  1178. responds with "PONG".
  1179. """
  1180. args = ["PING", message] if message is not None else ["PING"]
  1181. return self.execute_command(*args)
  1182. async def handle_message(self, response, ignore_subscribe_messages=False):
  1183. """
  1184. Parses a pub/sub message. If the channel or pattern was subscribed to
  1185. with a message handler, the handler is invoked instead of a parsed
  1186. message being returned.
  1187. """
  1188. if response is None:
  1189. return None
  1190. if isinstance(response, bytes):
  1191. response = [b"pong", response] if response != b"PONG" else [b"pong", b""]
  1192. message_type = str_if_bytes(response[0])
  1193. if message_type == "pmessage":
  1194. message = {
  1195. "type": message_type,
  1196. "pattern": response[1],
  1197. "channel": response[2],
  1198. "data": response[3],
  1199. }
  1200. elif message_type == "pong":
  1201. message = {
  1202. "type": message_type,
  1203. "pattern": None,
  1204. "channel": None,
  1205. "data": response[1],
  1206. }
  1207. else:
  1208. message = {
  1209. "type": message_type,
  1210. "pattern": None,
  1211. "channel": response[1],
  1212. "data": response[2],
  1213. }
  1214. if message_type in ["message", "pmessage"]:
  1215. channel = str_if_bytes(message["channel"])
  1216. await record_pubsub_message(
  1217. direction=PubSubDirection.RECEIVE,
  1218. channel=channel,
  1219. )
  1220. # if this is an unsubscribe message, remove it from memory
  1221. if message_type in self.UNSUBSCRIBE_MESSAGE_TYPES:
  1222. if message_type == "punsubscribe":
  1223. pattern = response[1]
  1224. if pattern in self.pending_unsubscribe_patterns:
  1225. self.pending_unsubscribe_patterns.remove(pattern)
  1226. self.patterns.pop(pattern, None)
  1227. else:
  1228. channel = response[1]
  1229. if channel in self.pending_unsubscribe_channels:
  1230. self.pending_unsubscribe_channels.remove(channel)
  1231. self.channels.pop(channel, None)
  1232. if message_type in self.PUBLISH_MESSAGE_TYPES:
  1233. # if there's a message handler, invoke it
  1234. if message_type == "pmessage":
  1235. handler = self.patterns.get(message["pattern"], None)
  1236. else:
  1237. handler = self.channels.get(message["channel"], None)
  1238. if handler:
  1239. if inspect.iscoroutinefunction(handler):
  1240. await handler(message)
  1241. else:
  1242. handler(message)
  1243. return None
  1244. elif message_type != "pong":
  1245. # this is a subscribe/unsubscribe message. ignore if we don't
  1246. # want them
  1247. if ignore_subscribe_messages or self.ignore_subscribe_messages:
  1248. return None
  1249. return message
  1250. async def run(
  1251. self,
  1252. *,
  1253. exception_handler: Optional["PSWorkerThreadExcHandlerT"] = None,
  1254. poll_timeout: float = 1.0,
  1255. pubsub=None,
  1256. ) -> None:
  1257. """Process pub/sub messages using registered callbacks.
  1258. This is the equivalent of :py:meth:`redis.PubSub.run_in_thread` in
  1259. redis-py, but it is a coroutine. To launch it as a separate task, use
  1260. ``asyncio.create_task``:
  1261. >>> task = asyncio.create_task(pubsub.run())
  1262. To shut it down, use asyncio cancellation:
  1263. >>> task.cancel()
  1264. >>> await task
  1265. """
  1266. for channel, handler in self.channels.items():
  1267. if handler is None:
  1268. raise PubSubError(f"Channel: '{channel}' has no handler registered")
  1269. for pattern, handler in self.patterns.items():
  1270. if handler is None:
  1271. raise PubSubError(f"Pattern: '{pattern}' has no handler registered")
  1272. await self.connect()
  1273. while True:
  1274. try:
  1275. if pubsub is None:
  1276. await self.get_message(
  1277. ignore_subscribe_messages=True, timeout=poll_timeout
  1278. )
  1279. else:
  1280. await pubsub.get_message(
  1281. ignore_subscribe_messages=True, timeout=poll_timeout
  1282. )
  1283. except asyncio.CancelledError:
  1284. raise
  1285. except BaseException as e:
  1286. if exception_handler is None:
  1287. raise
  1288. res = exception_handler(e, self)
  1289. if inspect.isawaitable(res):
  1290. await res
  1291. # Ensure that other tasks on the event loop get a chance to run
  1292. # if we didn't have to block for I/O anywhere.
  1293. await asyncio.sleep(0)
  1294. class PubsubWorkerExceptionHandler(Protocol):
  1295. def __call__(self, e: BaseException, pubsub: PubSub): ...
  1296. class AsyncPubsubWorkerExceptionHandler(Protocol):
  1297. async def __call__(self, e: BaseException, pubsub: PubSub): ...
  1298. PSWorkerThreadExcHandlerT = Union[
  1299. PubsubWorkerExceptionHandler, AsyncPubsubWorkerExceptionHandler
  1300. ]
  1301. CommandT = Tuple[Tuple[Union[str, bytes], ...], Mapping[str, Any]]
  1302. CommandStackT = List[CommandT]
  1303. class Pipeline(Redis): # lgtm [py/init-calls-subclass]
  1304. """
  1305. Pipelines provide a way to transmit multiple commands to the Redis server
  1306. in one transmission. This is convenient for batch processing, such as
  1307. saving all the values in a list to Redis.
  1308. All commands executed within a pipeline(when running in transactional mode,
  1309. which is the default behavior) are wrapped with MULTI and EXEC
  1310. calls. This guarantees all commands executed in the pipeline will be
  1311. executed atomically.
  1312. Any command raising an exception does *not* halt the execution of
  1313. subsequent commands in the pipeline. Instead, the exception is caught
  1314. and its instance is placed into the response list returned by execute().
  1315. Code iterating over the response list should be able to deal with an
  1316. instance of an exception as a potential value. In general, these will be
  1317. ResponseError exceptions, such as those raised when issuing a command
  1318. on a key of a different datatype.
  1319. """
  1320. UNWATCH_COMMANDS = {"DISCARD", "EXEC", "UNWATCH"}
  1321. def __init__(
  1322. self,
  1323. connection_pool: ConnectionPool,
  1324. response_callbacks: MutableMapping[Union[str, bytes], ResponseCallbackT],
  1325. transaction: bool,
  1326. shard_hint: Optional[str],
  1327. ):
  1328. self.connection_pool = connection_pool
  1329. self.connection = None
  1330. self.response_callbacks = response_callbacks
  1331. self.is_transaction = transaction
  1332. self.shard_hint = shard_hint
  1333. self.watching = False
  1334. self.command_stack: CommandStackT = []
  1335. self.scripts: Set[Script] = set()
  1336. self.explicit_transaction = False
  1337. async def __aenter__(self: _RedisT) -> _RedisT:
  1338. return self
  1339. async def __aexit__(self, exc_type, exc_value, traceback):
  1340. await self.reset()
  1341. def __await__(self):
  1342. return self._async_self().__await__()
  1343. _DEL_MESSAGE = "Unclosed Pipeline client"
  1344. def __len__(self):
  1345. return len(self.command_stack)
  1346. def __bool__(self):
  1347. """Pipeline instances should always evaluate to True"""
  1348. return True
  1349. async def _async_self(self):
  1350. return self
  1351. async def reset(self):
  1352. self.command_stack = []
  1353. self.scripts = set()
  1354. # make sure to reset the connection state in the event that we were
  1355. # watching something
  1356. if self.watching and self.connection:
  1357. try:
  1358. # call this manually since our unwatch or
  1359. # immediate_execute_command methods can call reset()
  1360. await self.connection.send_command("UNWATCH")
  1361. await self.connection.read_response()
  1362. except ConnectionError:
  1363. # disconnect will also remove any previous WATCHes
  1364. if self.connection:
  1365. await self.connection.disconnect()
  1366. # clean up the other instance attributes
  1367. self.watching = False
  1368. self.explicit_transaction = False
  1369. # we can safely return the connection to the pool here since we're
  1370. # sure we're no longer WATCHing anything
  1371. if self.connection:
  1372. await self.connection_pool.release(self.connection)
  1373. self.connection = None
  1374. async def aclose(self) -> None:
  1375. """Alias for reset(), a standard method name for cleanup"""
  1376. await self.reset()
  1377. def multi(self):
  1378. """
  1379. Start a transactional block of the pipeline after WATCH commands
  1380. are issued. End the transactional block with `execute`.
  1381. """
  1382. if self.explicit_transaction:
  1383. raise RedisError("Cannot issue nested calls to MULTI")
  1384. if self.command_stack:
  1385. raise RedisError(
  1386. "Commands without an initial WATCH have already been issued"
  1387. )
  1388. self.explicit_transaction = True
  1389. def execute_command(
  1390. self, *args, **kwargs
  1391. ) -> Union["Pipeline", Awaitable["Pipeline"]]:
  1392. if (self.watching or args[0] == "WATCH") and not self.explicit_transaction:
  1393. return self.immediate_execute_command(*args, **kwargs)
  1394. return self.pipeline_execute_command(*args, **kwargs)
  1395. async def _disconnect_reset_raise_on_watching(
  1396. self,
  1397. conn: Connection,
  1398. error: Exception,
  1399. failure_count: Optional[int] = None,
  1400. start_time: Optional[float] = None,
  1401. command_name: Optional[str] = None,
  1402. ) -> None:
  1403. """
  1404. Close the connection reset watching state and
  1405. raise an exception if we were watching.
  1406. The supported exceptions are already checked in the
  1407. retry object so we don't need to do it here.
  1408. After we disconnect the connection, it will try to reconnect and
  1409. do a health check as part of the send_command logic(on connection level).
  1410. """
  1411. if (
  1412. error
  1413. and failure_count is not None
  1414. and failure_count <= conn.retry.get_retries()
  1415. ):
  1416. await record_operation_duration(
  1417. command_name=command_name,
  1418. duration_seconds=time.monotonic() - start_time,
  1419. server_address=getattr(conn, "host", None),
  1420. server_port=getattr(conn, "port", None),
  1421. db_namespace=str(conn.db),
  1422. error=error,
  1423. retry_attempts=failure_count,
  1424. )
  1425. await conn.disconnect(error=error, failure_count=failure_count)
  1426. # if we were already watching a variable, the watch is no longer
  1427. # valid since this connection has died. raise a WatchError, which
  1428. # indicates the user should retry this transaction.
  1429. if self.watching:
  1430. await self.reset()
  1431. raise WatchError(
  1432. f"A {type(error).__name__} occurred while watching one or more keys"
  1433. )
  1434. async def immediate_execute_command(self, *args, **options):
  1435. """
  1436. Execute a command immediately, but don't auto-retry on the supported
  1437. errors for retry if we're already WATCHing a variable.
  1438. Used when issuing WATCH or subsequent commands retrieving their values but before
  1439. MULTI is called.
  1440. """
  1441. command_name = args[0]
  1442. conn = self.connection
  1443. # if this is the first call, we need a connection
  1444. if not conn:
  1445. conn = await self.connection_pool.get_connection()
  1446. self.connection = conn
  1447. # Start timing for observability
  1448. start_time = time.monotonic()
  1449. # Track actual retry attempts for error reporting
  1450. actual_retry_attempts = 0
  1451. def failure_callback(error, failure_count):
  1452. nonlocal actual_retry_attempts
  1453. actual_retry_attempts = failure_count
  1454. return self._disconnect_reset_raise_on_watching(
  1455. conn, error, failure_count, start_time, command_name
  1456. )
  1457. try:
  1458. response = await conn.retry.call_with_retry(
  1459. lambda: self._send_command_parse_response(
  1460. conn, command_name, *args, **options
  1461. ),
  1462. failure_callback,
  1463. with_failure_count=True,
  1464. )
  1465. await record_operation_duration(
  1466. command_name=command_name,
  1467. duration_seconds=time.monotonic() - start_time,
  1468. server_address=getattr(conn, "host", None),
  1469. server_port=getattr(conn, "port", None),
  1470. db_namespace=str(conn.db),
  1471. )
  1472. return response
  1473. except Exception as e:
  1474. await record_error_count(
  1475. server_address=getattr(conn, "host", None),
  1476. server_port=getattr(conn, "port", None),
  1477. network_peer_address=getattr(conn, "host", None),
  1478. network_peer_port=getattr(conn, "port", None),
  1479. error_type=e,
  1480. retry_attempts=actual_retry_attempts,
  1481. is_internal=False,
  1482. )
  1483. raise
  1484. def pipeline_execute_command(self, *args, **options):
  1485. """
  1486. Stage a command to be executed when execute() is next called
  1487. Returns the current Pipeline object back so commands can be
  1488. chained together, such as:
  1489. pipe = pipe.set('foo', 'bar').incr('baz').decr('bang')
  1490. At some other point, you can then run: pipe.execute(),
  1491. which will execute all commands queued in the pipe.
  1492. """
  1493. self.command_stack.append((args, options))
  1494. return self
  1495. async def _execute_transaction( # noqa: C901
  1496. self, connection: Connection, commands: CommandStackT, raise_on_error
  1497. ):
  1498. pre: CommandT = (("MULTI",), {})
  1499. post: CommandT = (("EXEC",), {})
  1500. cmds = (pre, *commands, post)
  1501. all_cmds = connection.pack_commands(
  1502. args for args, options in cmds if EMPTY_RESPONSE not in options
  1503. )
  1504. await connection.send_packed_command(all_cmds)
  1505. errors = []
  1506. # parse off the response for MULTI
  1507. # NOTE: we need to handle ResponseErrors here and continue
  1508. # so that we read all the additional command messages from
  1509. # the socket
  1510. try:
  1511. await self.parse_response(connection, "_")
  1512. except ResponseError as err:
  1513. errors.append((0, err))
  1514. # and all the other commands
  1515. for i, command in enumerate(commands):
  1516. if EMPTY_RESPONSE in command[1]:
  1517. errors.append((i, command[1][EMPTY_RESPONSE]))
  1518. else:
  1519. try:
  1520. await self.parse_response(connection, "_")
  1521. except ResponseError as err:
  1522. self.annotate_exception(err, i + 1, command[0])
  1523. errors.append((i, err))
  1524. # parse the EXEC.
  1525. try:
  1526. response = await self.parse_response(connection, "_")
  1527. except ExecAbortError as err:
  1528. if errors:
  1529. raise errors[0][1] from err
  1530. raise
  1531. # EXEC clears any watched keys
  1532. self.watching = False
  1533. if response is None:
  1534. raise WatchError("Watched variable changed.") from None
  1535. # put any parse errors into the response
  1536. for i, e in errors:
  1537. response.insert(i, e)
  1538. if len(response) != len(commands):
  1539. if self.connection:
  1540. await self.connection.disconnect()
  1541. raise ResponseError(
  1542. "Wrong number of response items from pipeline execution"
  1543. ) from None
  1544. # find any errors in the response and raise if necessary
  1545. if raise_on_error:
  1546. self.raise_first_error(commands, response)
  1547. # We have to run response callbacks manually
  1548. data = []
  1549. for r, cmd in zip(response, commands):
  1550. if not isinstance(r, Exception):
  1551. args, options = cmd
  1552. command_name = args[0]
  1553. # Remove keys entry, it needs only for cache.
  1554. options.pop("keys", None)
  1555. if command_name in self.response_callbacks:
  1556. r = self.response_callbacks[command_name](r, **options)
  1557. if inspect.isawaitable(r):
  1558. r = await r
  1559. data.append(r)
  1560. return data
  1561. async def _execute_pipeline(
  1562. self, connection: Connection, commands: CommandStackT, raise_on_error: bool
  1563. ):
  1564. # build up all commands into a single request to increase network perf
  1565. all_cmds = connection.pack_commands([args for args, _ in commands])
  1566. await connection.send_packed_command(all_cmds)
  1567. response = []
  1568. for args, options in commands:
  1569. try:
  1570. response.append(
  1571. await self.parse_response(connection, args[0], **options)
  1572. )
  1573. except ResponseError as e:
  1574. response.append(e)
  1575. if raise_on_error:
  1576. self.raise_first_error(commands, response)
  1577. return response
  1578. def raise_first_error(self, commands: CommandStackT, response: Iterable[Any]):
  1579. for i, r in enumerate(response):
  1580. if isinstance(r, ResponseError):
  1581. self.annotate_exception(r, i + 1, commands[i][0])
  1582. raise r
  1583. def annotate_exception(
  1584. self, exception: Exception, number: int, command: Iterable[object]
  1585. ) -> None:
  1586. cmd = " ".join(map(safe_str, command))
  1587. msg = (
  1588. f"Command # {number} ({truncate_text(cmd)}) "
  1589. f"of pipeline caused error: {exception.args}"
  1590. )
  1591. exception.args = (msg,) + exception.args[1:]
  1592. async def parse_response(
  1593. self, connection: Connection, command_name: Union[str, bytes], **options
  1594. ):
  1595. result = await super().parse_response(connection, command_name, **options)
  1596. if command_name in self.UNWATCH_COMMANDS:
  1597. self.watching = False
  1598. elif command_name == "WATCH":
  1599. self.watching = True
  1600. return result
  1601. async def load_scripts(self):
  1602. # make sure all scripts that are about to be run on this pipeline exist
  1603. scripts = list(self.scripts)
  1604. immediate = self.immediate_execute_command
  1605. shas = [s.sha for s in scripts]
  1606. # we can't use the normal script_* methods because they would just
  1607. # get buffered in the pipeline.
  1608. exists = await immediate("SCRIPT EXISTS", *shas)
  1609. if not all(exists):
  1610. for s, exist in zip(scripts, exists):
  1611. if not exist:
  1612. s.sha = await immediate("SCRIPT LOAD", s.script)
  1613. async def _disconnect_raise_on_watching(
  1614. self,
  1615. conn: Connection,
  1616. error: Exception,
  1617. failure_count: Optional[int] = None,
  1618. start_time: Optional[float] = None,
  1619. command_name: Optional[str] = None,
  1620. ):
  1621. """
  1622. Close the connection, raise an exception if we were watching.
  1623. The supported exceptions are already checked in the
  1624. retry object so we don't need to do it here.
  1625. After we disconnect the connection, it will try to reconnect and
  1626. do a health check as part of the send_command logic(on connection level).
  1627. """
  1628. if (
  1629. error
  1630. and failure_count is not None
  1631. and failure_count <= conn.retry.get_retries()
  1632. ):
  1633. await record_operation_duration(
  1634. command_name=command_name,
  1635. duration_seconds=time.monotonic() - start_time,
  1636. server_address=getattr(conn, "host", None),
  1637. server_port=getattr(conn, "port", None),
  1638. db_namespace=str(conn.db),
  1639. error=error,
  1640. retry_attempts=failure_count,
  1641. )
  1642. await conn.disconnect(error=error, failure_count=failure_count)
  1643. # if we were watching a variable, the watch is no longer valid
  1644. # since this connection has died. raise a WatchError, which
  1645. # indicates the user should retry this transaction.
  1646. if self.watching:
  1647. raise WatchError(
  1648. f"A {type(error).__name__} occurred while watching one or more keys"
  1649. )
  1650. async def execute(self, raise_on_error: bool = True) -> List[Any]:
  1651. """Execute all the commands in the current pipeline"""
  1652. stack = self.command_stack
  1653. if not stack and not self.watching:
  1654. return []
  1655. if self.scripts:
  1656. await self.load_scripts()
  1657. if self.is_transaction or self.explicit_transaction:
  1658. execute = self._execute_transaction
  1659. operation_name = "MULTI"
  1660. else:
  1661. execute = self._execute_pipeline
  1662. operation_name = "PIPELINE"
  1663. conn = self.connection
  1664. if not conn:
  1665. conn = await self.connection_pool.get_connection()
  1666. # assign to self.connection so reset() releases the connection
  1667. # back to the pool after we're done
  1668. self.connection = conn
  1669. conn = cast(Connection, conn)
  1670. # Start timing for observability
  1671. start_time = time.monotonic()
  1672. # Track actual retry attempts for error reporting
  1673. actual_retry_attempts = 0
  1674. def failure_callback(error, failure_count):
  1675. nonlocal actual_retry_attempts
  1676. actual_retry_attempts = failure_count
  1677. return self._disconnect_raise_on_watching(
  1678. conn, error, failure_count, start_time, operation_name
  1679. )
  1680. try:
  1681. response = await conn.retry.call_with_retry(
  1682. lambda: execute(conn, stack, raise_on_error),
  1683. failure_callback,
  1684. with_failure_count=True,
  1685. )
  1686. await record_operation_duration(
  1687. command_name=operation_name,
  1688. duration_seconds=time.monotonic() - start_time,
  1689. server_address=getattr(conn, "host", None),
  1690. server_port=getattr(conn, "port", None),
  1691. db_namespace=str(conn.db),
  1692. )
  1693. return response
  1694. except Exception as e:
  1695. await record_error_count(
  1696. server_address=getattr(conn, "host", None),
  1697. server_port=getattr(conn, "port", None),
  1698. network_peer_address=getattr(conn, "host", None),
  1699. network_peer_port=getattr(conn, "port", None),
  1700. error_type=e,
  1701. retry_attempts=actual_retry_attempts,
  1702. is_internal=False,
  1703. )
  1704. raise
  1705. finally:
  1706. await self.reset()
  1707. async def discard(self):
  1708. """Flushes all previously queued commands
  1709. See: https://redis.io/commands/DISCARD
  1710. """
  1711. await self.execute_command("DISCARD")
  1712. async def watch(self, *names: KeyT):
  1713. """Watches the values at keys ``names``"""
  1714. if self.explicit_transaction:
  1715. raise RedisError("Cannot issue a WATCH after a MULTI")
  1716. return await self.execute_command("WATCH", *names)
  1717. async def unwatch(self):
  1718. """Unwatches all previously specified keys"""
  1719. return self.watching and await self.execute_command("UNWATCH") or True