sushichao před 2 dny
rodič
revize
42adc254e2
100 změnil soubory, kde provedl 14466 přidání a 35 odebrání
  1. binární
      app/.DS_Store
  2. 8 0
      app/ispeaker-service/LICENSE
  3. 3 0
      app/ispeaker-service/README.md
  4. 56 0
      app/ispeaker-service/app/__init__.py
  5. 88 0
      app/ispeaker-service/app/app_config.py
  6. 145 0
      app/ispeaker-service/app/config.py
  7. 1280 0
      app/ispeaker-service/app/device.py
  8. 32 0
      app/ispeaker-service/app/log.py
  9. 506 0
      app/ispeaker-service/app/multicast_player.py
  10. 219 0
      app/ispeaker-service/app/player.py
  11. 53 0
      app/ispeaker-service/app/redisSub.py
  12. 69 0
      app/ispeaker-service/app/register.py
  13. 312 0
      app/ispeaker-service/app/views.py
  14. 27 0
      app/ispeaker-service/bin/ispeakerctl
  15. binární
      app/ispeaker-service/bin/rtsp_pull_push
  16. 32 0
      app/ispeaker-service/ispeaker
  17. 3 0
      app/ispeaker-service/requirements.txt
  18. binární
      bin/rk_parser
  19. 11 0
      etc/rc.local
  20. 1 12
      etc/scripts/getmodel.sh
  21. 68 0
      etc/scripts/ispeaker
  22. 1 0
      etc/scripts/upgrade.sh
  23. 10 0
      etc/scripts/watch_process.sh
  24. 7 20
      oem/etc/speaker.conf
  25. 5 3
      readyForTarget.sh
  26. 1 0
      usr/local/lib/python3.10/dist-packages/async_timeout-5.0.1.dist-info/INSTALLER
  27. 13 0
      usr/local/lib/python3.10/dist-packages/async_timeout-5.0.1.dist-info/LICENSE
  28. 163 0
      usr/local/lib/python3.10/dist-packages/async_timeout-5.0.1.dist-info/METADATA
  29. 10 0
      usr/local/lib/python3.10/dist-packages/async_timeout-5.0.1.dist-info/RECORD
  30. 5 0
      usr/local/lib/python3.10/dist-packages/async_timeout-5.0.1.dist-info/WHEEL
  31. 1 0
      usr/local/lib/python3.10/dist-packages/async_timeout-5.0.1.dist-info/top_level.txt
  32. 1 0
      usr/local/lib/python3.10/dist-packages/async_timeout-5.0.1.dist-info/zip-safe
  33. 276 0
      usr/local/lib/python3.10/dist-packages/async_timeout/__init__.py
  34. binární
      usr/local/lib/python3.10/dist-packages/async_timeout/__pycache__/__init__.cpython-310.pyc
  35. 1 0
      usr/local/lib/python3.10/dist-packages/async_timeout/py.typed
  36. 1 0
      usr/local/lib/python3.10/dist-packages/redis-7.4.0.dist-info/INSTALLER
  37. 271 0
      usr/local/lib/python3.10/dist-packages/redis-7.4.0.dist-info/METADATA
  38. 231 0
      usr/local/lib/python3.10/dist-packages/redis-7.4.0.dist-info/RECORD
  39. 0 0
      usr/local/lib/python3.10/dist-packages/redis-7.4.0.dist-info/REQUESTED
  40. 4 0
      usr/local/lib/python3.10/dist-packages/redis-7.4.0.dist-info/WHEEL
  41. 21 0
      usr/local/lib/python3.10/dist-packages/redis-7.4.0.dist-info/licenses/LICENSE
  42. 91 0
      usr/local/lib/python3.10/dist-packages/redis/__init__.py
  43. binární
      usr/local/lib/python3.10/dist-packages/redis/__pycache__/__init__.cpython-310.pyc
  44. binární
      usr/local/lib/python3.10/dist-packages/redis/__pycache__/background.cpython-310.pyc
  45. binární
      usr/local/lib/python3.10/dist-packages/redis/__pycache__/backoff.cpython-310.pyc
  46. binární
      usr/local/lib/python3.10/dist-packages/redis/__pycache__/cache.cpython-310.pyc
  47. binární
      usr/local/lib/python3.10/dist-packages/redis/__pycache__/client.cpython-310.pyc
  48. binární
      usr/local/lib/python3.10/dist-packages/redis/__pycache__/cluster.cpython-310.pyc
  49. binární
      usr/local/lib/python3.10/dist-packages/redis/__pycache__/connection.cpython-310.pyc
  50. binární
      usr/local/lib/python3.10/dist-packages/redis/__pycache__/crc.cpython-310.pyc
  51. binární
      usr/local/lib/python3.10/dist-packages/redis/__pycache__/credentials.cpython-310.pyc
  52. binární
      usr/local/lib/python3.10/dist-packages/redis/__pycache__/data_structure.cpython-310.pyc
  53. binární
      usr/local/lib/python3.10/dist-packages/redis/__pycache__/driver_info.cpython-310.pyc
  54. binární
      usr/local/lib/python3.10/dist-packages/redis/__pycache__/event.cpython-310.pyc
  55. binární
      usr/local/lib/python3.10/dist-packages/redis/__pycache__/exceptions.cpython-310.pyc
  56. binární
      usr/local/lib/python3.10/dist-packages/redis/__pycache__/lock.cpython-310.pyc
  57. binární
      usr/local/lib/python3.10/dist-packages/redis/__pycache__/maint_notifications.cpython-310.pyc
  58. binární
      usr/local/lib/python3.10/dist-packages/redis/__pycache__/ocsp.cpython-310.pyc
  59. binární
      usr/local/lib/python3.10/dist-packages/redis/__pycache__/retry.cpython-310.pyc
  60. binární
      usr/local/lib/python3.10/dist-packages/redis/__pycache__/sentinel.cpython-310.pyc
  61. binární
      usr/local/lib/python3.10/dist-packages/redis/__pycache__/typing.cpython-310.pyc
  62. binární
      usr/local/lib/python3.10/dist-packages/redis/__pycache__/utils.cpython-310.pyc
  63. 27 0
      usr/local/lib/python3.10/dist-packages/redis/_parsers/__init__.py
  64. binární
      usr/local/lib/python3.10/dist-packages/redis/_parsers/__pycache__/__init__.cpython-310.pyc
  65. binární
      usr/local/lib/python3.10/dist-packages/redis/_parsers/__pycache__/base.cpython-310.pyc
  66. binární
      usr/local/lib/python3.10/dist-packages/redis/_parsers/__pycache__/commands.cpython-310.pyc
  67. binární
      usr/local/lib/python3.10/dist-packages/redis/_parsers/__pycache__/encoders.cpython-310.pyc
  68. binární
      usr/local/lib/python3.10/dist-packages/redis/_parsers/__pycache__/helpers.cpython-310.pyc
  69. binární
      usr/local/lib/python3.10/dist-packages/redis/_parsers/__pycache__/hiredis.cpython-310.pyc
  70. binární
      usr/local/lib/python3.10/dist-packages/redis/_parsers/__pycache__/resp2.cpython-310.pyc
  71. binární
      usr/local/lib/python3.10/dist-packages/redis/_parsers/__pycache__/resp3.cpython-310.pyc
  72. binární
      usr/local/lib/python3.10/dist-packages/redis/_parsers/__pycache__/socket.cpython-310.pyc
  73. 565 0
      usr/local/lib/python3.10/dist-packages/redis/_parsers/base.py
  74. 692 0
      usr/local/lib/python3.10/dist-packages/redis/_parsers/commands.py
  75. 44 0
      usr/local/lib/python3.10/dist-packages/redis/_parsers/encoders.py
  76. 947 0
      usr/local/lib/python3.10/dist-packages/redis/_parsers/helpers.py
  77. 302 0
      usr/local/lib/python3.10/dist-packages/redis/_parsers/hiredis.py
  78. 132 0
      usr/local/lib/python3.10/dist-packages/redis/_parsers/resp2.py
  79. 270 0
      usr/local/lib/python3.10/dist-packages/redis/_parsers/resp3.py
  80. 162 0
      usr/local/lib/python3.10/dist-packages/redis/_parsers/socket.py
  81. 64 0
      usr/local/lib/python3.10/dist-packages/redis/asyncio/__init__.py
  82. binární
      usr/local/lib/python3.10/dist-packages/redis/asyncio/__pycache__/__init__.cpython-310.pyc
  83. binární
      usr/local/lib/python3.10/dist-packages/redis/asyncio/__pycache__/client.cpython-310.pyc
  84. binární
      usr/local/lib/python3.10/dist-packages/redis/asyncio/__pycache__/cluster.cpython-310.pyc
  85. binární
      usr/local/lib/python3.10/dist-packages/redis/asyncio/__pycache__/connection.cpython-310.pyc
  86. binární
      usr/local/lib/python3.10/dist-packages/redis/asyncio/__pycache__/lock.cpython-310.pyc
  87. binární
      usr/local/lib/python3.10/dist-packages/redis/asyncio/__pycache__/retry.cpython-310.pyc
  88. binární
      usr/local/lib/python3.10/dist-packages/redis/asyncio/__pycache__/sentinel.cpython-310.pyc
  89. binární
      usr/local/lib/python3.10/dist-packages/redis/asyncio/__pycache__/utils.cpython-310.pyc
  90. 1951 0
      usr/local/lib/python3.10/dist-packages/redis/asyncio/client.py
  91. 2957 0
      usr/local/lib/python3.10/dist-packages/redis/asyncio/cluster.py
  92. 1717 0
      usr/local/lib/python3.10/dist-packages/redis/asyncio/connection.py
  93. 0 0
      usr/local/lib/python3.10/dist-packages/redis/asyncio/http/__init__.py
  94. binární
      usr/local/lib/python3.10/dist-packages/redis/asyncio/http/__pycache__/__init__.cpython-310.pyc
  95. binární
      usr/local/lib/python3.10/dist-packages/redis/asyncio/http/__pycache__/http_client.cpython-310.pyc
  96. 265 0
      usr/local/lib/python3.10/dist-packages/redis/asyncio/http/http_client.py
  97. 345 0
      usr/local/lib/python3.10/dist-packages/redis/asyncio/lock.py
  98. 0 0
      usr/local/lib/python3.10/dist-packages/redis/asyncio/multidb/__init__.py
  99. binární
      usr/local/lib/python3.10/dist-packages/redis/asyncio/multidb/__pycache__/__init__.cpython-310.pyc
  100. 0 0
      usr/local/lib/python3.10/dist-packages/redis/asyncio/multidb/__pycache__/client.cpython-310.pyc

binární
app/.DS_Store


+ 8 - 0
app/ispeaker-service/LICENSE

@@ -0,0 +1,8 @@
+MIT License
+Copyright (c) <year> <copyright holders>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 3 - 0
app/ispeaker-service/README.md

@@ -0,0 +1,3 @@
+# ispeaker
+
+该项目为广播系统服务器控制ispeaker的客户端程序

+ 56 - 0
app/ispeaker-service/app/__init__.py

@@ -0,0 +1,56 @@
+from app.app_config import SPEAKER_CONFIG_FILE
+import configparser
+import sys
+
+def singleton(cls, *args, **kwargs):
+    instances = {}
+    
+    def _singleton():
+        if cls not in instances:
+            instances[cls] = cls(*args, **kwargs)
+        return instances[cls]
+    return _singleton
+
+from app.log import Logger
+"""
+    desc:
+        日志实例初始化
+ 
+    Parameters:
+ 
+    Returns:
+        返回日志一个日志实例
+"""
+log = Logger()
+
+"""
+    desc:
+        配置解析器
+"""
+app_enanle_cf = configparser.RawConfigParser()
+app_enanle_cf.read(SPEAKER_CONFIG_FILE, encoding='utf-8')
+
+"""
+    desc:
+        启动开关读取
+"""
+
+multicast_enanle = app_enanle_cf.get("multicast_player", "enable")
+
+if multicast_enanle != 'yes':
+    log.logger.warning("Multicast service disbaled!")
+    sys.exit(0)
+
+from app.redisSub import RedisHelper
+redisObj = RedisHelper()
+
+project_cf = configparser.RawConfigParser()
+speaker_cf = configparser.RawConfigParser()
+volume_cf = configparser.RawConfigParser()
+
+if multicast_enanle == 'yes':
+    from app.config import MulticastConfig
+    multicastConfig = MulticastConfig()
+
+    from app.multicast_player import MulticastPlayer
+    multicast_palyer = MulticastPlayer()

+ 88 - 0
app/ispeaker-service/app/app_config.py

@@ -0,0 +1,88 @@
+#每个日志文件的大小
+PER_LOG_FILESIZE = 1024000 * 2
+#最多保留的日志文件个数
+LOG_FILE_NUM = 2
+#日志文件路径
+LOG_PATH = "/userdata/ispeaker.log"
+#系统信息配置文件
+SPEAKER_CONFIG_FILE = "/etc/speaker.conf"
+#音量信息配置文件
+VOLUME_CONFIG_FILE = "/oem/etc/volctrl.conf"
+#mqtt服务器端口号
+SERVER_MQTT_PORT = 1883
+#mqtt指令主题名
+TOPIC_COMMAND = "operation"
+#mqtt响应主题名
+TOPIC_RESPONSE = "response"
+#设备状态改变上报主题
+TOPIC_UPDATE = "update"
+#设备事件主题
+TOPIC_EVENT = "event"
+ID = "id"
+ACTION_NAME = "action"
+STATUS = "statusText"
+SOLF_VOLUME = "soft-volume"
+HARD_VOLUME = "hard-volume"
+HARD_VOLUME_CONTROL = "hard-volume-control"
+PRIVATE_IP = "private_ip"
+ERROR = "error"
+PLAY = "play"
+PAUSE = "pause"
+RESUME = "resume"
+STOP = "stop"
+SET_EXTEN = "set-exten"
+DATA = "data"
+VOLUME = "volume"
+URL = "url"
+HOST = "host"
+EXTEN = "exten"
+PASSWORD = "password"
+MQTT_QOS = 1
+PLAYER_IDLE = "idle"
+PLAYER_PLAYING = "playing"
+PLAYER_PAUSE = "pause"
+PLAYER_ERROR = "error"
+SLEEP_SEC = 0.5
+SET_SOLF_VOLUME = "set-soft-volume"
+SET_HARD_VOLUME = "set-hard-volume"
+MQTT_CONNECT_TIMEOUT = 5
+RETRY_SEC = 3
+DEVICE_MODEL = "model"
+#项目配置文件,用于保存一些运行时变量,如音量,播放状态
+PROJECT_CONFIG = "/etc/ispeaker.conf"
+#各状态检测事件间隔
+CHECK_INTERVAL = 2
+SET_HARD_VOLUME_CONTROL = "set-hard-volume-control"
+VALUE = "value"
+ON = "on"
+OFF = "off"
+#libvlc c库的日志级别
+VLC_DEBUG_LEVEL = "DEBUG"
+VLC_BUF_LEN = 1024
+#多少各关心的警告日志出现就会重播源
+VLC_WARNING_COUNT = 6
+#多少各关心的错误日志出现就会重播源
+VLC_ERROR_COUNT = 100 
+VLC_SOURCE_ERROR_COUNT = 5
+SET_SERVICE_SETTINGS = "set_service_settings"
+#rtsp推送执行程序
+RTSP_PUSHER = "/app/ispeaker-service/bin/rtsp_pull_push"
+FIFO_PATH = "/userdata/ispeaker.fifo"
+SERVER_ICE_PORT = "8001"
+BUFFER_LATE_INTERVAL = 8
+# 组播地址个数
+ADDRESS_COUNT = 9
+# 地址
+ANY = "0.0.0.0"
+# 组播超时计时器
+TIME_COUNTER = 3
+# 播放器默认音量
+PLAYER_VOLUME = 90
+# 最大线程数量
+MAX_WORKERS = 12
+#speaker事件通道
+EVENTCHSUB  = "volume-event-channel"
+#音量接收通道
+VALUECHPUB  = "volume-value-channel"
+#设置单个音量的通道
+WEBCHPUB  = "volume-web-set-channel"

+ 145 - 0
app/ispeaker-service/app/config.py

@@ -0,0 +1,145 @@
+import os
+import uuid
+import socket
+from app.app_config import SPEAKER_CONFIG_FILE, PROJECT_CONFIG, VOLUME_CONFIG_FILE, ADDRESS_COUNT
+from app import project_cf
+from app import speaker_cf
+from app import volume_cf
+
+class Config(object):
+    def __init__(self):
+        speaker_cf.read(SPEAKER_CONFIG_FILE, encoding='utf-8')
+        volume_cf.read(VOLUME_CONFIG_FILE, encoding='utf-8')
+        project_cf.read(PROJECT_CONFIG, encoding='utf-8')
+        #设置型号
+        self.model = speaker_cf.get("system", "model")
+        #系统软件版本号
+        self.soft_version = speaker_cf.get("system", "firmware")
+        #系统硬件版本号
+        self.hard_version = speaker_cf.get("system", "hard_version")
+        #系统主机名
+        self.hostname = socket.getfqdn(socket.gethostname())
+        #系统ip地址
+        self.ipaddr = self._get_host_ip()
+        #系统硬件音量
+        self.hard_volume = int(volume_cf.get("volume", "volume_out"))
+        #系统强控开关
+        self.hard_volume_control = volume_cf.get("volume", "hard_volume_control")
+        #系统分机号
+        self.exten = speaker_cf.get("account_info_1", "username")
+        #系统分机密码
+        self.exten_password = speaker_cf.get("account_info_1", "passwd")
+        #广播系统服务器地址
+        self.server_ipaddr = speaker_cf.get("account_info_1", "server")
+        #播放器初始化音量,播放器初始化状态,播放源
+
+        #重启时,是否恢复播放
+        self.music_auto_resume = speaker_cf.get("system", "music_auto_resume")
+        if self.music_auto_resume == "yes":
+            if project_cf.has_option("general", "init_player_state") and project_cf.has_option("general", "current_uri"):
+                self.player_current_volume = int(volume_cf.get("volume", "broadcast_volume"))
+                self.init_player_state = int(project_cf.get("general", "init_player_state"))
+                self.current_uri = project_cf.get("general", "current_uri")
+            else:
+                if project_cf.has_section("general"):
+                    volume_cf.set("volume", "broadcast_volume", "50")
+                    project_cf.set("general", "init_player_state", "-1")
+                    project_cf.set("general", "current_uri", "")
+                else:
+                    project_cf.add_section("general")
+                    volume_cf.set("volume", "broadcast_volume", "50")
+                    project_cf.set("general", "init_player_state", "-1")
+                    project_cf.set("general", "current_uri", "")
+
+                with open(PROJECT_CONFIG, "w") as f:
+                    project_cf.write(f, space_around_delimiters=False)
+                
+                with open(VOLUME_CONFIG_FILE, "w") as f:
+                    volume_cf.write(f, space_around_delimiters=False)
+                os.system("sync")
+
+                self.player_current_volume = int(volume_cf.get("volume", "broadcast_volume"))
+                self.init_player_state = int(project_cf.get("general", "init_player_state"))
+                self.current_uri = project_cf.get("general", "current_uri")
+        else:
+            if project_cf.has_option("general", "init_player_state") and project_cf.has_option("general", "current_uri"):
+                project_cf.set("general", "init_player_state", "-1")
+                project_cf.set("general", "current_uri", "")
+
+                with open(PROJECT_CONFIG, "w") as f:
+                    project_cf.write(f, space_around_delimiters=False)
+                os.system("sync")
+
+                self.player_current_volume = int(volume_cf.get("volume", "broadcast_volume"))
+                self.init_player_state = int(project_cf.get("general", "init_player_state"))
+                self.current_uri = project_cf.get("general", "current_uri")
+            else:
+                if project_cf.has_section("general"):
+                    volume_cf.set("volume", "broadcast_volume", "50")
+                    project_cf.set("general", "init_player_state", "-1")
+                    project_cf.set("general", "current_uri", "")
+                else:
+                    project_cf.add_section("general")
+                    volume_cf.set("volume", "broadcast_volume", "50")
+                    project_cf.set("general", "init_player_state", "-1")
+                    project_cf.set("general", "current_uri", "")
+
+                with open(PROJECT_CONFIG, "w") as f:
+                    project_cf.write(f, space_around_delimiters=False)
+                os.system("sync")
+
+                self.player_current_volume = int(volume_cf.get("volume", "broadcast_volume"))
+                self.init_player_state = int(project_cf.get("general", "init_player_state"))
+                self.current_uri = project_cf.get("general", "current_uri")
+
+        #设置mac
+        self.mac = self._get_mac_address()
+
+    """
+        desc:
+            获取设备mac地址
+    """
+    def _get_mac_address(self): 
+        mac=uuid.UUID(int = uuid.getnode()).hex[-12:] 
+        return ":".join([mac[e:e+2] for e in range(0,11,2)]).replace(":", "").lower()
+
+    """
+        desc:
+            获取设置出局ip地址
+    """
+    def _get_host_ip(self):
+        try:
+            s=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
+            s.connect(('8.8.8.8',80))
+            ip=s.getsockname()[0]
+        finally:
+            s.close()
+        return ip
+    
+class MulticastConfig(object):
+    def __init__(self):
+        speaker_cf.read(SPEAKER_CONFIG_FILE, encoding='utf-8')
+        volume_cf.read(VOLUME_CONFIG_FILE, encoding='utf-8')
+        self.network_caching = int(speaker_cf.get("multicast_player", "network_caching"))
+        self.address = []
+        for i in range(1, ADDRESS_COUNT + 1):
+            item = {}
+            try:
+                col = speaker_cf.get("multicast_player", "address"+str(i))
+                gpio = speaker_cf.get("multicast_player", "gpio"+str(i))
+                volume = int(volume_cf.get("volume", "multicast_"+str(i)+"_volume"))
+            except:
+                continue
+            if col:
+                item["id"] = i
+                item["priority"] = i
+                item["update_status"] = 0
+                item["player_status"] = 0
+                item["available_status"] = 0
+                item["counter"] = 0
+                item["url"] = col
+                item["ip"] = col.split("@", 1)[1].lstrip().rstrip().split(":", 1)[0]
+                item["port"] = int(col.split("@", 1)[1].lstrip().rstrip().split(":", 1)[1])
+                item["gpio"] = gpio
+                item["volume"] = volume
+                self.address.append(item)

Rozdílová data souboru nebyla zobrazena, protože soubor je příliš velký
+ 1280 - 0
app/ispeaker-service/app/device.py


+ 32 - 0
app/ispeaker-service/app/log.py

@@ -0,0 +1,32 @@
+import logging
+from logging import handlers
+from app.app_config import PER_LOG_FILESIZE,LOG_FILE_NUM,LOG_PATH
+
+"""
+    desc:
+        系统日志类
+"""
+class Logger(object):
+    level_relations = {
+        'debug':logging.DEBUG,
+        'info':logging.INFO,
+        'warning':logging.WARNING,
+        'error':logging.ERROR,
+        'crit':logging.CRITICAL
+    }
+
+    def __init__(self,filename = LOG_PATH, level = 'info', when = 'D',backCount = LOG_FILE_NUM, \
+        fmt='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s', filesize = PER_LOG_FILESIZE):
+        self.logger = logging.getLogger(filename)
+        format_str = logging.Formatter(fmt)
+        self.logger.setLevel(self.level_relations.get(level))
+        sh = logging.StreamHandler()
+        sh.setFormatter(format_str)
+        th = handlers.RotatingFileHandler(
+              filename = filename,
+              maxBytes=filesize,
+              backupCount=backCount,
+              encoding = 'utf-8')
+        th.setFormatter(format_str)
+        self.logger.addHandler(sh)
+        self.logger.addHandler(th)

+ 506 - 0
app/ispeaker-service/app/multicast_player.py

@@ -0,0 +1,506 @@
+# multicast_player_optimized.py
+import time
+import json
+import socket
+import threading
+import struct
+from time import sleep
+from queue import Queue, Empty
+from concurrent.futures import ThreadPoolExecutor, TimeoutError
+
+from app import multicastConfig, log, redisObj
+from app.app_config import *
+from app.player import VLCController
+
+# Constants:
+PLAY_TIMEOUT = 2.0  # 原来 timeout(2.0)
+REDIS_QUEUE_MAXSIZE = 1024
+MULTICAST_RETRY_DELAY = 1.0  # socket 异常后重试间隔(秒)
+
+
+class MulticastPlayer(object):
+    def __init__(self, config=multicastConfig):
+        # config
+        self.network_caching = config.network_caching
+        self.address = config.address  # list of dicts with keys id, ip, port, url, priority, volume, gpio...
+        self.player_volume = PLAYER_VOLUME
+
+        # runtime state
+        self.priority = 0
+        self.playing_id = 0
+        self.current_uri = ""
+        self.currentScreenName = ""
+
+        # locks
+        self.status_lock = threading.Lock()
+        self.priority_lock = threading.Lock()
+        self.multicast_volume_lock = threading.Lock()
+
+        # available items map (id -> item dict)
+        self.available_item = {}
+        for item in self.address:
+            # ensure expected keys exist and have defaults
+            item.setdefault("update_status", 0)
+            item.setdefault("available_status", 0)
+            item.setdefault("counter", 0)
+            item.setdefault("volume", item.get("volume", self.player_volume))
+            item.setdefault("priority", item.get("priority", 0))
+            self.available_item[item["id"]] = item
+
+        # VLC controller (assume this object manages libvlc and has .start_vlc(), .play(url=), .set_volume(), .quit(), .running)
+        self.player = VLCController(cmd=self._get_vlc_cmd(), on_state_change=self.vlc_callback)
+
+        # Executor used only for play timeouts (single persistent thread, avoids thread-pool leaks)
+        self.play_executor = ThreadPoolExecutor(max_workers=1)
+
+        # Threads and control
+        self.stop_event = threading.Event()
+        self.threads = []  # list of threading.Thread running long-lived loops (multicast binds, monitor, redis parser, redis handler)
+
+        # Redis subscription handling: one parser thread + one worker thread with queue
+        self.redis_sub_volume = redisObj.subscribe_volume()
+        self.redis_queue = Queue(maxsize=REDIS_QUEUE_MAXSIZE)
+
+        # Start nothing in __init__. Call start() to run threads.
+        log.logger.info("MulticastPlayer initialized")
+
+    def _get_vlc_cmd(self):
+        # Keep same arguments as original
+        return [
+            "/usr/sbin/vlc",
+            "--control", "rc",
+            "--rc-fake-tty",
+            "--quiet",
+            "--aout=alsa",
+            "--gain-init=0.0",
+            "--network-caching=%d" % self.network_caching,
+            "--network-synchronisation",
+            "--clock-synchro=1",
+        ]
+
+    # ---- VLC state callback ----
+    def vlc_callback(self, state):
+        # state mapping retained from original
+        try:
+            if state == 3:
+                self.MediaPlayerPlaying_cb()
+            elif state in (-1, 0, 5):
+                self.MediaPlayerStopped_cb()
+        except Exception as e:
+            log.logger.exception("vlc_callback exception: %s", e)
+
+    # ---- Redis message handling ----
+    def _redis_parser_loop(self):
+        """Continuously parse redis sub responses and enqueue the parsed JSON to redis_queue."""
+        log.logger.info("Redis parser thread started")
+        while not self.stop_event.is_set():
+            try:
+                msg = self.redis_sub_volume.parse_response()  # blocking
+                # Expected original format: msg[2] contains JSON string
+                if not msg or len(msg) < 3:
+                    continue
+                raw = msg[2]
+                try:
+                    data = json.loads(raw)
+                except Exception:
+                    log.logger.error("redis msg not json: %s", raw)
+                    continue
+                # enqueue (drop if full to avoid memory growth)
+                try:
+                    self.redis_queue.put_nowait(data)
+                except Exception:
+                    # queue full; drop oldest to make room (prevent unbounded memory growth)
+                    try:
+                        _ = self.redis_queue.get_nowait()
+                        self.redis_queue.put_nowait(data)
+                    except Exception:
+                        log.logger.warning("redis queue full; dropped message")
+            except Exception as e:
+                # parse_response may raise on connection issues; if so, sleep briefly and retry
+                log.logger.exception("Exception in redis parser loop: %s", e)
+                time.sleep(0.5)
+        log.logger.info("Redis parser thread exiting")
+
+    def _redis_worker_loop(self):
+        """Consume parsed redis messages and handle volume updates in a single worker thread."""
+        log.logger.info("Redis worker thread started")
+        while not self.stop_event.is_set():
+            try:
+                data = self.redis_queue.get(timeout=0.5)
+            except Empty:
+                continue
+            try:
+                self.handleRedisVolumeData(data)
+            except Exception as e:
+                log.logger.exception("Exception handling redis volume data: %s", e)
+            finally:
+                # mark task done if using task tracking (Queue.task_done not required)
+                pass
+        log.logger.info("Redis worker thread exiting")
+
+    def handleRedisVolumeData(self, data):
+        """Process a single redis message dict to update multicast volumes."""
+        if not data:
+            return
+        # check multicast keys exist (original logic used "multicast_1_volume" as marker)
+        if "multicast_1_volume" in data:
+            # update available_item volumes in-place. Use lock only around mutations.
+            for i in range(1, ADDRESS_COUNT + 1):
+                key = f"multicast_{i}_volume"
+                if key in data:
+                    with self.status_lock:
+                        if i in self.available_item:
+                            self.available_item[i]["volume"] = data[key]
+            # if something is playing, apply new volume
+            if self.playing_id > 0:
+                with self.multicast_volume_lock:
+                    volume = self.available_item.get(self.playing_id, {}).get("volume", self.player_volume)
+                    try:
+                        self.player.set_volume(volume)
+                        log.logger.info("Event set multicast player volume:%d", volume)
+                    except Exception:
+                        log.logger.exception("Failed to set volume to %s", volume)
+        else:
+            # ignore other messages per original behavior
+            pass
+
+    # ---- MediaPlayer event handlers ----
+    def MediaPlayerPlaying_cb(self):
+        try:
+            # set initial volume
+            with self.multicast_volume_lock:
+                self.player.set_volume(self.player_volume)
+            log.logger.info("MediaPlayerPlaying_cb - set multicast volume:%d", self.player_volume)
+
+            # volume event
+            redisObj.volumeEventPush(json.dumps({"name": "MULTICAST", "action": "on"}))
+
+            # screen on
+            time.sleep(2)
+            name = f"Screen{self.playing_id}"
+            self.currentScreenName = name
+            redisObj.screen_publish(json.dumps({"action": "on", "name": name}))
+            log.logger.info("Trigger %s to screen", name)
+
+            # gpio
+            gpio = self.available_item.get(self.playing_id, {}).get('gpio')
+            if gpio and gpio != 'Disabled':
+                redisObj.lpush(json.dumps({"id": "gpio135", "type": gpio}))
+
+            # log push
+            item = self.available_item.get(self.playing_id, {})
+            redisObj.logpush("MULTICAST,START PLAY,Priority:%d <%s:%d>" % (self.playing_id, item.get("ip", ""), item.get("port", 0)))
+        except Exception:
+            log.logger.exception("MediaPlayerPlaying_cb exception")
+
+    def MediaPlayerStopped_cb(self):
+        try:
+            log.logger.info("MediaPlayerStopped_cb")
+            redisObj.volumeEventPush(json.dumps({"name": "MULTICAST", "action": "off"}))
+            redisObj.logpush("MULTICAST,STOP PLAY,")
+        except Exception:
+            log.logger.exception("MediaPlayerStopped_cb exception")
+
+    # ---- Play control (with safe timeout using persistent executor) ----
+    def _play_task(self, uri):
+        """Actual play invocation executed in play_executor worker."""
+        try:
+            if not self.player.running:
+                # start vlc and play
+                self.player.start_vlc()
+                self.player.play(url=uri)
+            else:
+                # toggle off gpio & screen per original logic then play new uri
+                try:
+                    redisObj.lpush(json.dumps({"id": "gpio135", "type": "Off"}))
+                    redisObj.screen_publish(json.dumps({"action": "off", "name": self.currentScreenName}))
+                    log.logger.info("Trigger off to screen")
+                except Exception:
+                    log.logger.exception("Error sending off gpio/screen")
+                self.player.play(url=uri)
+                with self.multicast_volume_lock:
+                    self.player.set_volume(self.player_volume)
+        except Exception:
+            log.logger.exception("_play_task exception")
+
+    def play(self):
+        """Public play method. Uses a persistent single-thread executor and a timeout to avoid blocking.
+           Returns True if play completed within timeout, False if timed out."""
+        if not self.current_uri:
+            log.logger.warning("play called but current_uri empty")
+            return False
+        try:
+            future = self.play_executor.submit(self._play_task, self.current_uri)
+            # wait for completion up to PLAY_TIMEOUT seconds
+            future.result(timeout=PLAY_TIMEOUT)
+            return True
+        except TimeoutError:
+            log.logger.warning("play timed out after %.2fs", PLAY_TIMEOUT)
+            # if timed out, try to quit player to recover (best-effort)
+            try:
+                self.player.quit()
+            except Exception:
+                log.logger.exception("Error quitting player after play timeout")
+            return False
+        except Exception:
+            log.logger.exception("Exception in play()")
+            return False
+
+    # ---- Monitor thread ----
+    def _monitor_channel_loop(self):
+        """Monitor channel: update counters, adjust priorities and trigger multicast callbacks."""
+        log.logger.info("Monitor thread started")
+        last_priority = -1
+        sleep_interval = 0.5
+        while not self.stop_event.is_set():
+            sleep(sleep_interval)
+            # skip monitoring if priority == 100 (call event holds)
+            if self.priority == 100:
+                continue
+
+            # log priority change at most when it actually changes
+            if last_priority != self.priority:
+                last_priority = self.priority
+                log.logger.info("Monitor priority: %d", self.priority)
+
+            # update counters and availability
+            # minimize allocations by local referencing
+            with self.status_lock:
+                for item in self.available_item.values():
+                    if item.get("update_status", 0) == 1:
+                        item["update_status"] = 0
+                        item["available_status"] = 1
+                        item["counter"] = TIME_COUNTER
+                    else:
+                        # decrement but guard negative
+                        c = item.get("counter", 0) - 1
+                        item["counter"] = c
+                        if c <= 0:
+                            item["available_status"] = 0
+
+            # if nothing playing, continue
+            if self.playing_id == 0:
+                continue
+
+            # if current playing has expired, find next highest priority available
+            current = self.available_item.get(self.playing_id)
+            if current and current.get("counter", 0) == 0:
+                tmp_id = 0
+                tmp_priority = -1
+                # find best available
+                for per_item in self.available_item.values():
+                    if per_item.get("available_status", 0) == 1:
+                        p = per_item.get("priority", 0)
+                        if p > tmp_priority:
+                            tmp_priority = p
+                            tmp_id = per_item.get("id", 0)
+                if tmp_id != 0:
+                    chosen = self.available_item[tmp_id]
+                    self.current_uri = chosen["url"]
+                    with self.priority_lock:
+                        self.priority = chosen.get("priority", 0)
+                    # trigger change
+                    self.multicast_callback(tmp_id, 1)
+                else:
+                    # stop playing
+                    self.multicast_callback(0, 0)
+
+        log.logger.info("Monitor thread exiting")
+
+    # ---- Multicast bind thread (one per address) ----
+    def _multicast_bind_loop(self, item):
+        """Listen to multicast address and update available_item on packets.
+           Will recreate socket on recoverable errors with a small delay to avoid tight loop."""
+        thread_name = f"multicast-{item.get('id')}"
+        log.logger.info("id:%d Thread start successfully...", item.get("id"))
+        while not self.stop_event.is_set():
+            s = None
+            try:
+                # create UDP socket
+                s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
+                s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+                # On some systems binding to multicast group ip requires ''/INADDR_ANY
+                bind_addr = (item["ip"], item["port"])
+                try:
+                    s.bind(bind_addr)
+                except OSError:
+                    # if cannot bind to group address, try binding to ('', port)
+                    s.bind(('', item["port"]))
+                # join multicast group
+                # struct.pack('4s4s', inet_aton(group), inet_aton(iface))
+                mreq = socket.inet_aton(item["ip"]) + socket.inet_aton(ANY)
+                s.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
+                s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 255)
+                s.setblocking(True)
+
+                # loop receiving
+                while not self.stop_event.is_set():
+                    try:
+                        data, address = s.recvfrom(2048)
+                        # mark update_status safely
+                        with self.status_lock:
+                            self.available_item[item["id"]]["update_status"] = 1
+                        # if this address has higher priority, trigger play
+                        if item.get("priority", 0) > self.priority:
+                            with self.priority_lock:
+                                self.priority = item.get("priority", 0)
+                            self.current_uri = item.get("url", "")
+                            self.multicast_callback(item["id"], 1)
+                    except socket.timeout:
+                        continue
+                    except OSError as e:
+                        # socket error -> break to recreate socket
+                        log.logger.exception("Socket error on %s: %s", thread_name, e)
+                        break
+                    except Exception as e:
+                        # unexpected exception, log and continue
+                        log.logger.exception("Exception in multicast recv loop: %s", e)
+                        break
+            except Exception as e:
+                log.logger.exception("Exception creating multicast socket for id %s: %s", item.get("id"), e)
+            finally:
+                # ensure socket closed
+                try:
+                    if s:
+                        s.close()
+                except Exception:
+                    pass
+            # if we reach here due to exception, sleep a bit before retrying to avoid tight restart loops
+            if not self.stop_event.is_set():
+                time.sleep(MULTICAST_RETRY_DELAY)
+        log.logger.info("Multicast thread for id %s exiting", item.get("id"))
+
+    # ---- Multicast callback (triggered by recv / monitor) ----
+    def multicast_callback(self, id, action):
+        """action:
+            1 - start play this id
+            2 - stop play for Call Event (pause/hold)
+            3 - resume play for Call Event
+            other - stop playing entirely
+        """
+        try:
+            if action == 1:
+                log.logger.info("Change url:%s", self.available_item[id]["url"])
+                log.logger.info("Start play address id:%d", id)
+                # set volume and playing id, then call play()
+                self.player_volume = self.available_item[id].get("volume", self.player_volume)
+                self.playing_id = id
+                # update current uri
+                self.current_uri = self.available_item[id].get("url", self.current_uri)
+                # call play (non-blocking/with timeout)
+                self.play()
+            elif action == 2:
+                # set priority high to prevent switches
+                with self.priority_lock:
+                    self.priority = 100
+                log.logger.info("Stop play address id:%d for Call Event", self.playing_id)
+            elif action == 3:
+                # resume: reset priority to 0 (after short wait)
+                if self.priority != 0:
+                    sleep(1)
+                    with self.priority_lock:
+                        self.priority = 0
+                log.logger.info("resume play address id:%d for Call Event", self.playing_id)
+            else:
+                # full stop
+                try:
+                    self.player.quit()
+                except Exception:
+                    log.logger.exception("Error quitting player on stop")
+                try:
+                    redisObj.lpush(json.dumps({"id": "gpio135", "type": "Off"}))
+                    redisObj.screen_publish(json.dumps({"action": "off", "name": self.currentScreenName}))
+                    log.logger.info("Trigger off to screen")
+                except Exception:
+                    log.logger.exception("Error publishing off gpio/screen")
+                log.logger.info("Stop play address id:%d", self.playing_id)
+                self.playing_id = 0
+                with self.priority_lock:
+                    self.priority = 0
+        except Exception:
+            log.logger.exception("Exception in multicast_callback")
+
+    # ---- Lifecycle management ----
+    def start(self):
+        """Start all background threads: multicast listeners, monitor, redis parser+worker."""
+        log.logger.info("Start MulticastPlayer threads")
+        # multicast threads (one per address)
+        for item in self.address:
+            t = threading.Thread(target=self._multicast_bind_loop, args=(item,), daemon=True)
+            t.start()
+            self.threads.append(t)
+
+        # monitor thread
+        mon_t = threading.Thread(target=self._monitor_channel_loop, daemon=True)
+        mon_t.start()
+        self.threads.append(mon_t)
+
+        # redis parser and worker
+        parser_t = threading.Thread(target=self._redis_parser_loop, daemon=True)
+        worker_t = threading.Thread(target=self._redis_worker_loop, daemon=True)
+        parser_t.start(); worker_t.start()
+        self.threads.extend([parser_t, worker_t])
+
+        log.logger.info("MulticastPlayer started with %d threads", len(self.threads))
+
+    def stop(self, wait=True, timeout=5.0):
+        """Signal threads to stop, shutdown executors and player. Optionally wait for threads to finish."""
+        log.logger.info("Stopping MulticastPlayer...")
+        self.stop_event.set()
+
+        # close redis subscription if API allows (best-effort)
+        try:
+            if hasattr(self.redis_sub_volume, "close"):
+                try:
+                    self.redis_sub_volume.close()
+                except Exception:
+                    pass
+        except Exception:
+            pass
+
+        # attempt to quit player
+        try:
+            self.player.quit()
+        except Exception:
+            log.logger.exception("Error quitting player on stop")
+
+        # shutdown play executor
+        try:
+            self.play_executor.shutdown(wait=False)
+        except Exception:
+            log.logger.exception("Error shutting down play_executor")
+
+        # wait for threads to finish (best-effort)
+        if wait:
+            end = time.time() + timeout
+            for t in self.threads:
+                remaining = max(0.0, end - time.time())
+                if remaining <= 0:
+                    break
+                try:
+                    t.join(timeout=remaining)
+                except Exception:
+                    pass
+        log.logger.info("MulticastPlayer stopped")
+
+    # ---- signal handler wrapper for integration with external signal handling ----
+    def singal_handler(self, signum=None, frame=None):
+        # original behavior called sys.exit(0) after cleanup. We will attempt graceful stop then exit.
+        log.logger.info("Signal received, shutting down...")
+        try:
+            self.stop(wait=True, timeout=5.0)
+        finally:
+            # explicit exit as original did
+            try:
+                sys.exit(0)
+            except SystemExit:
+                raise
+            except Exception:
+                pass
+
+    # destructor fallback
+    def __del__(self):
+        try:
+            self.stop(wait=False)
+        except Exception:
+            pass

+ 219 - 0
app/ispeaker-service/app/player.py

@@ -0,0 +1,219 @@
+import collections
+import subprocess
+import threading
+import time
+import re
+from threading import Lock
+from app import log
+
+cmd = [
+    "/usr/sbin/vlc",
+    "--control", "rc",
+    "--rc-fake-tty",
+    "--quiet",
+    "--aout=alsa",
+    "--gain-init=0.0",
+]
+
+class VLCController:
+    def __init__(self, cmd=cmd, volume=0, on_state_change=None):
+        self.process = None
+        self.volume = volume
+        self.on_state_change = on_state_change  # 回调函数
+        self.running = False
+        self.playing = False
+        self.current_state = -1
+        self.url = ""
+        self.reconnect = False
+        self.output_history = collections.deque(maxlen=50)
+        self.MAX_VLC_VOLUME = 256.0
+        self.cmd = cmd
+        self.cmd_lock = Lock()
+        self.threadSendNull = None
+    
+    def _send_cmd_null(self):
+        i = 0
+        while self.process:
+            if self.send_cmd("\r"):
+                i += 1
+                if i >= 10:
+                    break
+            time.sleep(0.5)
+        log.logger.info("[VLC] send_cmd_null thread exited")
+
+    def start_vlc(self):
+        if self.process is not None:
+            return
+
+        self.process = subprocess.Popen(
+            self.cmd,
+            stdin=subprocess.PIPE,
+            stdout=subprocess.PIPE,
+            stderr=subprocess.STDOUT,
+            universal_newlines=True,
+            bufsize=1,
+        )
+
+        self.running = True
+
+        self.threadSendNull = threading.Thread(target=self._send_cmd_null, daemon=True)
+        self.threadSendNull.start()
+
+        self.threadRead = threading.Thread(target=self._read_stdout, daemon=True)
+        self.threadRead.start()
+        log.logger.info("[VLC] started")
+
+    def _read_stdout(self):
+        """读取VLC输出并触发状态变化回调"""
+        for line in self.process.stdout:
+            line = line.strip()
+            if not line:
+                continue
+            self.output_history.append(line)
+
+            if "status change:" in line:
+                m = re.search(r'status change: \( play state: (\w+)', line)
+                if m:
+                    new_state = int(m.group(1))
+                    self.current_state = new_state
+                    if self.on_state_change:
+                        self.selfCallback(new_state)
+                        self.on_state_change(new_state)
+
+        self.running = False
+        log.logger.info("[VLC] stdout thread exited")
+
+    def send_cmd(self, cmd):
+        """向VLC发送命令"""
+        if not self.process or not self.process.stdin:
+            # log.logger.error("[VLC] not started")
+            return 1
+        try:
+            self.cmd_lock.acquire()
+            self.process.stdin.write(cmd + "\n")
+            self.process.stdin.flush()
+            self.cmd_lock.release()
+        except Exception as e:
+            self.cmd_lock.release()
+            log.logger.info("[VLC] send_cmd error")
+            return 1
+        return 0
+
+    def play(self, url = "", volume = 0):
+        """播放流"""
+        if url == "":
+            url = self.url
+        else:
+            self.url = url
+        self.send_cmd("stop")
+        self.send_cmd("clear")
+        self.send_cmd(f"add {url}")
+        self.send_cmd("play")
+        log.logger.info(f"[VLC] playing {url}")
+
+    def stop(self):
+        """停止播放"""
+        self.send_cmd("stop")
+        self.current_state = -1
+
+    def quit(self):
+        """退出VLC"""
+        if not self.process:
+            return
+        self.send_cmd("quit")
+        self.process.stdin.close()
+        self.process.wait(timeout=3)
+        self.process = None
+        self.running = False
+        self.playing = False
+        self.threadSendNull.join(timeout=2)
+        self.threadSendNull = None
+        self.current_state = -1
+        self.url = ""
+        self.reconnect = False
+        self.on_state_change(5)
+        log.logger.info("[VLC] stopped")
+    
+    def reconnect_player(self):
+        self.reconnect = True
+        while True:
+            if not self.reconnect:
+                break
+            log.logger.info("[VLC] not playing, reconnecting...")
+            self.send_cmd("play")
+            time.sleep(3)
+    
+    def selfCallback(self, new_state):
+        log.logger.info(f"[VLC] state changed to {new_state}")
+        if new_state != 3 and new_state != 2 and not self.reconnect:  # 如果不是播放状态
+            self.playing = False
+            self.threadCheck = threading.Thread(target=self.reconnect_player,)
+            self.threadCheck.start()
+        else:
+            if new_state == 3:
+                self.playing = True
+                self.reconnect = False
+    
+    def get_volume(self, timeout=1):
+        """获取当前音量(返回百分比)"""
+        if self.send_cmd("status"):
+            return 0
+        time.sleep(0.1)
+        start = time.time()
+        while time.time() - start < timeout:
+                for l in reversed(self.output_history):
+                    if "audio volume:" in l:
+                        m = re.search(r"audio volume:\s*(\d+)", l)
+                        if m:
+                            raw = int(m.group(1))
+                            volume = round(raw / self.MAX_VLC_VOLUME * 100.0)
+                            return int(volume)
+                time.sleep(0.1)
+        return None
+    
+    def set_volume(self, volume):
+        """按百分比设置音量(0–100%)"""
+        volume = max(0, min(100, volume))
+        self.volume = volume
+        if not self.playing:
+            return
+        value = round(volume * self.MAX_VLC_VOLUME / 100.0)
+        if self.process:
+            self.send_cmd(f"volume {value}")
+            log.logger.info(f"[VLC] set volume to {volume}")
+    
+    def get_state(self, timeout=1):
+        # """获取当前播放状态 (play state 的值)"""
+        # if self.send_cmd("status"):
+        #     return -1
+        # time.sleep(0.1)
+        # start = time.time()
+        # while time.time() - start < timeout:
+        #     # 在最近输出中查找 play state
+        #     for l in reversed(self.output_history):
+        #         if "play state:" in l:
+        #             m = re.search(r"play state:\s*(\d+)", l)
+        #             if m:
+        #                 return int(m.group(1))
+        #     time.sleep(0.1)
+        # return None
+        return self.current_state
+    
+def state_callback(new_state):
+    log.logger.info(f"[Callback] VLC state changed: {new_state}")
+
+if __name__ == "__main__":
+    vlc = VLCController(on_state_change=state_callback)
+    vlc.start_vlc()
+    vlc.play("http://192.168.11.109:8001/sourceId-13")
+    vlc.set_volume(49)
+    print(f"Current volume: {vlc.get_volume()}")
+    time.sleep(3)
+    vlc.set_volume(0)
+    print(f"Current volume: {vlc.get_volume()}")
+    time.sleep(3)
+    vlc.set_volume(100)
+    print(f"Current volume: {vlc.get_volume()}")
+    
+    while True:
+        time.sleep(1)

+ 53 - 0
app/ispeaker-service/app/redisSub.py

@@ -0,0 +1,53 @@
+import redis
+from app.app_config import *
+ 
+ 
+class RedisHelper:
+ 
+    def __init__(self):
+        self.__conn = redis.StrictRedis(host='127.0.0.1', port=6379, db=0)
+        self.chan_sub = 'session-channel'
+        self.chan_pub = 'output-channel'
+        self.chan_log = 'writelog-channel'
+        self.chan_api = 'api-channel'
+        self.chan_screen_pub = 'screen-channel'
+ 
+    def public(self, msg):
+        self.__conn.publish(self.chan_pub, msg)
+        return True
+ 
+    def subscribe_session(self):
+        pub = self.__conn.pubsub()
+        pub.subscribe(self.chan_sub)
+        pub.parse_response()
+        return pub
+    
+    def subscribe_volume(self):
+        pub = self.__conn.pubsub()
+        pub.subscribe(VALUECHPUB)
+        pub.parse_response()
+        return pub
+    
+    def lpush(self, data):
+        self.__conn.lpush(self.chan_pub, data)
+        return True
+    
+    def logpush(self, data):
+        self.__conn.lpush(self.chan_log, data)
+        return True
+    
+    def volumeEventPush(self, data):
+        self.__conn.publish(EVENTCHSUB, data)
+        return True
+    
+    def volumeItemPush(self, data):
+        self.__conn.publish(WEBCHPUB, data)
+        return True
+    
+    def apipush(self, data):
+        self.__conn.lpush(self.chan_api, data)
+        return True
+    
+    def screen_publish(self, msg):
+        self.__conn.publish(self.chan_screen_pub, msg)
+        return True

+ 69 - 0
app/ispeaker-service/app/register.py

@@ -0,0 +1,69 @@
+from app import log, redisObj
+from app.app_config import *
+import json
+import threading
+import traceback
+import sys
+
+"""
+    desc:
+        线程封装类,便于更好的捕获子线程跑出的异常
+"""
+class runFunctionThread(threading.Thread):
+    def __init__(self, funcName, *args):
+        threading.Thread.__init__(self)
+        self.args = args
+        self.funcName = funcName
+        self.exitcode = 0
+        self.exception = None
+        self.exc_traceback = ''
+
+    def run(self): #Overwrite run() method, put what you want the thread do here
+        try:
+            self._run()
+        except Exception as e:
+            self.exitcode = 1       # 如果线程异常退出,将该标志位设置为1,正常退出为0
+            self.exception = e
+            self.exc_traceback = ''.join(traceback.format_exception(*sys.exc_info()))  #在改成员变量中记录异常信息
+    
+    def _run(self):
+        try:
+            self.funcName(*(self.args)) 
+        except Exception as e:
+            raise e
+
+"""
+    desc:
+        用于mqtt指令到达时匹配相应的control函数
+"""
+class Register(object):
+    def __init__(self):
+        self.func_dict = {}
+ 
+    def action(self, action):
+        def wrapper(func):
+            self.func_dict.setdefault(action, func)
+            return func
+ 
+        return wrapper
+
+    def recieve_run(self, client, userdata, message):
+        redisObj.logpush("MQTT,SERVER COMMAND,%s" % str(message.payload.decode("utf-8")).replace(",", ";").replace('"', ''))
+        try:
+            data = json.loads(str(message.payload.decode("utf-8")))
+            log.logger.info(data)
+        except:
+            log.logger.error("Recieve data is not json format:%s" % (str(message.payload.decode("utf-8"))))
+        if data.get(ACTION_NAME) in self.func_dict:
+            try:
+                td = runFunctionThread(self.func_dict.get(data.get(ACTION_NAME)), data)
+                td.start()
+                td.join() 
+                if td.exitcode != 0:
+                    log.logger.error('Exception in ' + td.getName() + ' (catch by main)')
+                    log.logger.error(td.exc_traceback)
+                    sys.exit(0)
+            except Exception as e:
+                log.logger.error(td.exc_traceback)
+        else:
+            log.logger.error("Not support this action: %s" % (data.get(ACTION_NAME)))

+ 312 - 0
app/ispeaker-service/app/views.py

@@ -0,0 +1,312 @@
+from datetime import datetime
+import re
+from app import log
+from app import register
+from app import device
+from app.app_config import *
+
+"""
+    desc:
+        播放指令执行函数
+ 
+    Parameters:
+        param1 - 播放相关的数据
+ 
+    Returns:
+        执行状态
+"""
+@register.action(PLAY)
+def action_play(data):
+    if "data" in data:
+        if not device.play(data.get(DATA)):
+            log.logger.error("Play uri:%s on id:%s error" % (data.get(DATA).get("url"), data.get(ID)))
+            return False
+
+        log.logger.info("Play uri:%s on id:%s successfully" % (data.get(DATA).get("url"), data.get(ID)))
+        return True
+    else:
+        log.logger.info("Data not found")
+        return False
+
+"""
+    desc:
+        暂停指令执行函数
+ 
+    Parameters:
+        param1 - 暂停相关的数据
+ 
+    Returns:
+        执行状态
+"""
+@register.action(PAUSE)
+def action_pause(data):
+    if "value" in data.get(DATA):
+        value = data.get(DATA).get("value")
+    else:
+        log.logger.info("Value not found")
+        return False
+    if not device.pause(value):
+        log.logger.error("Player pause on id:%s error" % (data.get(ID)))
+        return False
+    else:
+        log.logger.info("Player pause on id:%s successfully" % (data.get(ID)))
+        return True
+
+"""
+    desc:
+        恢复指令执行函数
+ 
+    Parameters:
+        param1 - 恢复相关的数据
+ 
+    Returns:
+        执行状态
+"""
+@register.action(RESUME)
+def action_resume(data):
+    if "value" in data.get(DATA):
+        value = data.get(DATA).get("value")
+    else:
+        log.logger.info("Value not found")
+        return False
+    if not device.resume(value):
+        log.logger.error("Player resume on id:%s error" % (data.get(ID)))
+        return False
+    else:
+        log.logger.info("Player resume on id:%s successfully" % (data.get(ID)))
+        return True
+
+"""
+    desc:
+        停止播放指令执行函数
+ 
+    Parameters:
+        param1 - 停止播放相关的数据
+ 
+    Returns:
+        执行状态
+"""
+@register.action(STOP)
+def action_stop(data):
+    if not device.stop():
+        log.logger.error("Player stop on id:%s stop" % (data.get(ID)))
+
+    log.logger.info("Player stop on id:%s successfully" % (data.get(ID)))
+    return True
+
+"""
+    desc:
+        设置分机号指令执行函数
+ 
+    Parameters:
+        param1 - 设置分机号相关的数据
+ 
+    Returns:
+        执行状态
+"""
+@register.action(SET_EXTEN)
+def action_set_exten(data):
+    if "data" in data:
+        if HOST in data.get(DATA):
+            host = data.get(DATA).get(HOST)
+
+        if EXTEN in data.get(DATA):
+            exten = data.get(DATA).get(EXTEN)
+        
+        if PASSWORD in data.get(DATA):
+            password = data.get(DATA).get(PASSWORD)
+        
+        if not (host and exten and password):
+            log.logger.warning("data not compelete")
+            return False
+    else:
+        log.logger.info("data not found")
+        return False
+    if not device.set_exten(host, exten, password):
+        log.logger.error("Set exten:%s on id:%s error" % (exten, data.get(ID)))
+
+    log.logger.info("Set exten:%s on id:%s successfully" % (exten, data.get(ID)))
+    return True
+
+"""
+    desc:
+        设置播放器音量指令执行函数
+ 
+    Parameters:
+        param1 - 音量相关的数据
+ 
+    Returns:
+        执行状态
+"""
+@register.action(SET_SOLF_VOLUME)
+def action_set_soft_volume(data):
+    if "volume" in data.get(DATA):
+        volume = int(data.get(DATA).get(VOLUME))
+    else:
+        log.logger.warning("volume not found!")
+        return False
+    if not device.set_soft_volume(volume):
+        log.logger.error("Set volume:%d on id:%s error" % (volume, data.get(ID)))
+
+    log.logger.info("Set soft volume:%d on id:%s successfully" % (volume, data.get(ID)))
+    return True
+
+"""
+    desc:
+        设置硬件音量指令执行函数
+ 
+    Parameters:
+        param1 - 硬件音量相关的数据
+ 
+    Returns:
+        执行状态
+"""
+@register.action(SET_HARD_VOLUME)
+def action_set_hard_volume(data):
+    if "volume" in data.get(DATA):
+        volume = int(data.get(DATA).get(VOLUME))
+    else:
+        log.logger.warning("volume not found!")
+        return False
+    if not device.set_hard_volume(volume):
+        log.logger.error("Set hard volume:%d on id:%s error" % (volume, data.get(ID)))
+
+    log.logger.info("Set hard volume:%d on id:%s successfully" % (volume, data.get(ID)))
+    return True
+
+"""
+    desc:
+        设置强控指令执行函数
+ 
+    Parameters:
+        param1 - 设置强控相关的数据
+ 
+    Returns:
+        执行状态
+"""
+@register.action(SET_HARD_VOLUME_CONTROL)
+def action_set_hard_volume_control(data):
+    if "value" in data.get(DATA):
+        value = data.get(DATA).get(VALUE)
+    else:
+        log.logger.warning("value not found!")
+        return False
+    if not device.set_hard_volume_control(value):
+        log.logger.error("Set hard volume control:%s on id:%s error" % (value, data.get(ID)))
+
+    log.logger.info("Set hard volume control:%s on id:%s successfully" % (value, data.get(ID)))
+    return True
+
+"""
+    desc:
+        设置相关数据通用指令执行函数
+ 
+    Parameters:
+        param1 - 相关的数据
+ 
+    Returns:
+        执行状态
+"""
+@register.action(SET_SERVICE_SETTINGS)
+def action_set_service_settings(data):
+    if "data" in data:
+        data_obj = data.get(DATA)
+    if not device.set_service_settings(data_obj):
+        log.logger.error("Set service settings:%s on id:%s error" % (data_obj, data.get(ID)))
+        return False
+
+    log.logger.info("Set service settings:%s on id:%s successfully" % (data_obj, data.get(ID)))
+    return True
+
+"""
+    desc:
+        开始推送rtsp指令执行函数
+ 
+    Parameters:
+        param1 - rtsp相关的数据
+ 
+    Returns:
+        执行状态
+"""
+@register.action("start-pull-rtsp")
+def action_start_pull_rtsp(data):
+    if "src" in data.get(DATA):
+        src = data.get(DATA).get("src")
+    else:
+        log.logger.warning("src not found!")
+        return False
+
+    if "dst" in data.get(DATA):
+        dst = data.get(DATA).get("dst")
+    else:
+        log.logger.warning("dst not found!")
+        return False
+
+    if not device.start_pull_rtsp(src, dst):
+        log.logger.error("Start pull rtsp src:%s on id:%s error" % (src, data.get(ID)))
+    else:
+        log.logger.info("Start pull rtsp src:%s on id:%s successfully" % (src, data.get(ID)))
+    return True
+
+"""
+    desc:
+        停止推送rtsp指令执行函数
+ 
+    Parameters:
+        param1 - rtsp相关的数据
+ 
+    Returns:
+        执行状态
+"""
+@register.action("stop-pull-rtsp")
+def action_stop_pull_rtsp(data):
+    if not device.stop_pull_rtsp():
+        log.logger.error("Stop pull rtsp on id:%s error" % (data.get(ID)))
+    else:
+        log.logger.info("Stop pull rtsp on id:%s successfully" % (data.get(ID)))
+    return True
+
+@register.action("reboot")
+def action_reboot(data):
+    device.reboot(data.get("id"))
+    log.logger.info("Reboot on id:%s successfully" % (data.get(ID)))
+    return True
+
+@register.action("reset")
+def action_reset(data):
+    device.reset(data.get("id"))
+    log.logger.info("Reset on id:%s successfully" % (data.get(ID)))
+    return True
+
+@register.action("upgrade")
+def action_upgrade(data):
+    if not "data" in data:
+        log.logger.info("Upgrade on id:%s miss argments" % (data.get(ID)))
+        return False
+    device.upgrade(data.get("id"), data.get(DATA))
+    log.logger.info("Upgrade on id:%s executing..." % (data.get(ID)))
+    return True
+
+@register.action("get-upgrade-status")
+def action_get_upgrade_status(data):
+    device.getUpgradeStatus(data.get("id"))
+    log.logger.info("Get upgrade status on id:%s successfully" % (data.get(ID)))
+    return True
+
+@register.action("import-config")
+def action_import_config(data):
+    if not "data" in data:
+        log.logger.info("Get import config on id:%s miss argments" % (data.get(ID)))
+        return False
+    device.importConfig(data.get("id"), data.get(DATA))
+    log.logger.info("Get import config on id:%s successfully" % (data.get(ID)))
+    return True
+
+@register.action("relay-control")
+def action_relay_control(data):
+    if not "data" in data:
+        log.logger.info("Get relay control on id:%s miss argments" % (data.get(ID)))
+        return False
+    device.relayControl(data.get("id"), data.get(DATA))
+    log.logger.info("Get relay control on id:%s successfully" % (data.get(ID)))
+    return True

+ 27 - 0
app/ispeaker-service/bin/ispeakerctl

@@ -0,0 +1,27 @@
+#!/usr/bin/python3
+import os,sys
+
+write_path = "/userdata/ispeaker.fifo"
+
+if __name__ == "__main__":
+    if len(sys.argv) != 2 or (sys.argv[1] != "resume_music" and sys.argv[1] != "stop_music"):
+        print("Usage:./ispeakerctl <action>")
+        print("\tresume_music\tresume play music.")
+        print("\tstop_music\tstop play music.")
+        exit(0)
+
+
+    wf = os.open(write_path, os.O_SYNC | os.O_CREAT | os.O_RDWR)
+
+    action = sys.argv[1]
+
+    if action == "stop_music":
+        os.write(wf, "0".encode())
+    elif action == "resume_music":
+        os.write(wf, "1".encode())
+    else:
+        os.close(wf)
+        exit(-1)
+
+    os.close(wf)
+    exit(0)

binární
app/ispeaker-service/bin/rtsp_pull_push


+ 32 - 0
app/ispeaker-service/ispeaker

@@ -0,0 +1,32 @@
+#!/usr/bin/python3
+from app import log
+from app import  multicast_enanle
+from time import sleep
+import signal
+
+if __name__ == "__main__":
+    """
+    程序启动入口
+ 
+    Parameters:
+ 
+    Returns:
+    """
+
+    """
+        desc:
+            启动开关读取
+    """
+
+    if multicast_enanle == 'yes':
+        from app import multicast_palyer
+
+        signal.signal(signal.SIGINT, multicast_palyer.singal_handler)
+
+        multicast_palyer.start()
+
+    log.logger.info("App Starting...")
+
+    while True:
+        sleep(60)
+        pass

+ 3 - 0
app/ispeaker-service/requirements.txt

@@ -0,0 +1,3 @@
+configparser==3.7.4
+paho-mqtt==1.4.0
+python-vlc==3.0.6109

binární
bin/rk_parser


+ 11 - 0
etc/rc.local

@@ -19,6 +19,17 @@ chmod 660 /dev/video-*
 #    ln -Tsf lib /usr/lib64
 #fi
 # bash /etc/init.d/rkwifibt.sh &
+
+#audio dir
+if [ ! -d /oem/audio ];then
+    mkdir -p /oem/audio
+    ln -snf /usr/share/baresip/bells_music_tone.wav /oem/audio/1.wav
+    ln -snf /usr/share/baresip/bells_music_tone.wav /oem/audio/2.wav
+    ln -snf /usr/share/baresip/bells_music_tone.wav /oem/audio/3.wav
+    ln -snf /usr/share/baresip/bells_music_tone.wav /oem/audio/4.wav
+    ln -snf /usr/share/baresip/bells_music_tone.wav /oem/audio/5.wav
+fi
+
 sleep 5
 
 /etc/scripts/play_ip

+ 1 - 12
etc/scripts/getmodel.sh

@@ -1,16 +1,5 @@
 #!/bin/bash
 
-SN="`/usr/bin/vendor_storage -r VENDOR_SN_ID | awk '{print $2}'`"
-SN_MODEL="`echo ${SN:0:4}`"
-# erro,using default model
-[ -z "${SN_MODEL}" ] && exit 0
-
-# SQ10-B
-if [ "${SN_MODEL}" = "ZSQB" ];then
-    echo -n "SQ10-B_V2"
-# SQ10-T
-elif [ "${SN_MODEL}" = "ZSQT" ];then
-    echo -n "SQ10-T_V2"
-fi
+echo -n "G2-DIA"
 
 exit 0

+ 68 - 0
etc/scripts/ispeaker

@@ -0,0 +1,68 @@
+#!/bin/sh
+
+### BEGIN INIT INFO
+# Provides:          myservice
+# Required-Start:    $remote_fs $syslog
+# Required-Stop:     $remote_fs $syslog
+# Default-Start:     2 3 4 5
+# Default-Stop:      0 1 6
+# Short-Description: Put a short description of the service here
+# Description:       Put a long description of the service here
+### END INIT INFO
+
+# Change the next 3 lines to suit where you install your script and what you want to call it
+DIR=/app/ispeaker-service
+DAEMON=$DIR/ispeaker
+DAEMON_NAME=ispeaker-service
+
+# Add any command line options for your daemon here
+DAEMON_OPTS=""
+
+# This next line determines what user the script runs as.
+# Root generally not recommended but necessary if you are using the Raspberry Pi GPIO from Python.
+DAEMON_USER=root
+
+# The process ID of the script when it runs is stored here:
+PIDFILE=/var/run/$DAEMON_NAME.pid
+
+#. /lib/lsb/init-functions
+
+do_start () {
+    #log_daemon_msg "Starting system $DAEMON_NAME daemon"
+    export  PYTHON_VLC_LIB_PATH="/usr/lib/libvlc.so.5"
+    echo "Starting system $DAEMON_NAME daemon"
+    start-stop-daemon --start --background --pidfile $PIDFILE --make-pidfile --user $DAEMON_USER --chuid $DAEMON_USER --startas $DAEMON -- $DAEMON_OPTS
+    #log_end_msg $?
+    echo $?
+}
+do_stop () {
+    #log_daemon_msg "Stopping system $DAEMON_NAME daemon"
+    echo "Stoping system $DAEMON_NAME daemon"
+    start-stop-daemon --stop --pidfile $PIDFILE --retry 10
+    /bin/rm -rf /userdata/ispeaker.fifo
+    #log_end_msg $?
+    echo $?
+}
+
+case "$1" in
+
+    start|stop)
+        do_${1}
+        ;;
+
+    restart|reload|force-reload)
+        do_stop
+        do_start
+        ;;
+
+    status)
+        status_of_proc "$DAEMON_NAME" "$DAEMON" && exit 0 || exit $?
+        ;;
+
+    *)
+        echo "Usage: /etc/init.d/$DAEMON_NAME {start|stop|restart|status}"
+        exit 1
+        ;;
+
+esac
+exit 0

+ 1 - 0
etc/scripts/upgrade.sh

@@ -69,6 +69,7 @@ END
                 exit 1
             fi
         fi
+        /usr/sbin/reboot
 esac
 
 sync

+ 10 - 0
etc/scripts/watch_process.sh

@@ -43,6 +43,16 @@ start()
                         wd_c=`expr $wd_c + 1`
                 fi
 
+                ISPEAKER_STAT="`ps -e|grep -w "ispeaker" |grep -v grep`"
+                if [ -z "${ISPEAKER_STAT}" ];then
+                        MULTICAST_ENABLE="`sysconf /etc/speaker.conf get multicast_player enable`"
+                        if [ "foo${MULTICAST_ENABLE}" = "fooyes" ];then
+                                killall ispeaker > /dev/null 2>&1
+                                /etc/scripts/ispeaker start
+                                wd_c=`expr $wd_c + 1`
+                        fi
+                fi
+
                 ONVIFSERVER_STAT="`ps|grep -w onvifserver|grep -v grep`"
                 RTSPSERVER_STAT="`ps|grep -w rtspserver|grep -v grep`"
                 if [ -z "${ONVIFSERVER_STAT}" -o -z "${RTSPSERVER_STAT}" ];then

+ 7 - 20
oem/etc/speaker.conf

@@ -129,26 +129,13 @@ ulaw=yes
 opus=no
 
 [intercom]
-onekey_1_num=
-onekey_1_line=auto
-onekey_2_num=
-onekey_2_line=auto
-repress_1_cancel=no
-repress_2_cancel=no
-key_1_action=call
-key_2_action=call
-http_1_url=http://api.com/test1
-http_2_url=http://api.com/test2
-audio_1_file=alarm_tone1.wav
-audio_2_file=alarm_tone2.wav
-audio_1_repeat=3
-audio_2_repeat=3
-trigger_1_screen=no
-trigger_1_id=
-screen_1_duration=5
-trigger_2_screen=no
-trigger_2_id=
-screen_2_duration=5
+key_action=disabled
+onekey_num=
+onekey_line=auto
+repress_cancel=no
+http_url=http://api.com/test1
+audio_file=alarm_tone1.wav
+audio_repeat=3
 
 [relay_ctrl]
 dtmf=no

+ 5 - 3
readyForTarget.sh

@@ -9,7 +9,7 @@ if [ $# = 1 ];then
 
     #modify build date
     build_date="`date \"+%Y-%m-%d\"`"
-    cd oem/etc && awk -v new_date="$build_date" '/^\[upgrade\]/{f=1} f==1 && /^date=/{sub(/=.*/, "=" new_date); f=0} 1' speaker.conf > temp.conf && mv temp.conf speaker.conf
+    cd oem/etc && awk -v new_date="$build_date" '/^\[upgrade\]/{f=1} f==1 && /^date=/{sub(/=.*/, "=" new_date); f=0} 1' speaker.conf > temp.conf && mv speaker.conf ../../speaker.conf.bak && mv temp.conf speaker.conf
 
     cd oem && tar cpf ../oem_backup.tar .
 
@@ -18,9 +18,11 @@ if [ $# = 1 ];then
 
     name="u_m2_g2_fs_v_${SOFT_VERSION}_d_${date}.img"
 
-    tar -zcvf ${name} oem_backup.tar etc lib  oem usr www
+    tar -zcvf ${name} oem_backup.tar etc lib  oem usr www app
+
+    mv ../../speaker.conf.bak oem/etc/speaker.conf
     
-    rk_generate ${name} IPS-M2 ${SOFT_VERSION}
+    rk_generate ${name} G2-DIA ${SOFT_VERSION}
 else
     echo "usage:$0 branch-name"
 fi

+ 1 - 0
usr/local/lib/python3.10/dist-packages/async_timeout-5.0.1.dist-info/INSTALLER

@@ -0,0 +1 @@
+pip

+ 13 - 0
usr/local/lib/python3.10/dist-packages/async_timeout-5.0.1.dist-info/LICENSE

@@ -0,0 +1,13 @@
+Copyright 2016-2020 aio-libs collaboration.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.

+ 163 - 0
usr/local/lib/python3.10/dist-packages/async_timeout-5.0.1.dist-info/METADATA

@@ -0,0 +1,163 @@
+Metadata-Version: 2.1
+Name: async-timeout
+Version: 5.0.1
+Summary: Timeout context manager for asyncio programs
+Home-page: https://github.com/aio-libs/async-timeout
+Author: Andrew Svetlov <andrew.svetlov@gmail.com>
+Author-email: andrew.svetlov@gmail.com
+License: Apache 2
+Project-URL: Chat: Gitter, https://gitter.im/aio-libs/Lobby
+Project-URL: CI: GitHub Actions, https://github.com/aio-libs/async-timeout/actions
+Project-URL: Coverage: codecov, https://codecov.io/github/aio-libs/async-timeout
+Project-URL: GitHub: issues, https://github.com/aio-libs/async-timeout/issues
+Project-URL: GitHub: repo, https://github.com/aio-libs/async-timeout
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Topic :: Software Development :: Libraries
+Classifier: Framework :: AsyncIO
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3 :: Only
+Requires-Python: >=3.8
+Description-Content-Type: text/x-rst
+License-File: LICENSE
+
+async-timeout
+=============
+.. image:: https://travis-ci.com/aio-libs/async-timeout.svg?branch=master
+    :target: https://travis-ci.com/aio-libs/async-timeout
+.. image:: https://codecov.io/gh/aio-libs/async-timeout/branch/master/graph/badge.svg
+    :target: https://codecov.io/gh/aio-libs/async-timeout
+.. image:: https://img.shields.io/pypi/v/async-timeout.svg
+    :target: https://pypi.python.org/pypi/async-timeout
+.. image:: https://badges.gitter.im/Join%20Chat.svg
+    :target: https://gitter.im/aio-libs/Lobby
+    :alt: Chat on Gitter
+
+asyncio-compatible timeout context manager.
+
+
+
+DEPRECATED
+----------
+
+This library has effectively been upstreamed into Python 3.11+.
+
+Therefore this library is considered deprecated and no longer actively supported.
+
+Version 5.0+ provides dual-mode when executed on Python 3.11+:
+``asyncio_timeout.Timeout`` is fully compatible with ``asyncio.Timeout`` *and* old
+versions of the library.
+
+Anyway, using upstream is highly recommended. ``asyncio_timeout`` exists only for the
+sake of backward compatibility, easy supporting both old and new Python by the same
+code, and easy misgration.
+
+If rescheduling API is not important and only ``async with timeout(...): ...`` functionality is required,
+a user could apply conditional import::
+
+    if sys.version_info >= (3, 11):
+        from asyncio import timeout, timeout_at
+    else:
+        from async_timeout import timeout, timeout_at
+
+
+Usage example
+-------------
+
+
+The context manager is useful in cases when you want to apply timeout
+logic around block of code or in cases when ``asyncio.wait_for()`` is
+not suitable. Also it's much faster than ``asyncio.wait_for()``
+because ``timeout`` doesn't create a new task.
+
+The ``timeout(delay, *, loop=None)`` call returns a context manager
+that cancels a block on *timeout* expiring::
+
+   from async_timeout import timeout
+   async with timeout(1.5):
+       await inner()
+
+1. If ``inner()`` is executed faster than in ``1.5`` seconds nothing
+   happens.
+2. Otherwise ``inner()`` is cancelled internally by sending
+   ``asyncio.CancelledError`` into but ``asyncio.TimeoutError`` is
+   raised outside of context manager scope.
+
+*timeout* parameter could be ``None`` for skipping timeout functionality.
+
+
+Alternatively, ``timeout_at(when)`` can be used for scheduling
+at the absolute time::
+
+   loop = asyncio.get_event_loop()
+   now = loop.time()
+
+   async with timeout_at(now + 1.5):
+       await inner()
+
+
+Please note: it is not POSIX time but a time with
+undefined starting base, e.g. the time of the system power on.
+
+
+Context manager has ``.expired()`` / ``.expired`` for check if timeout happens
+exactly in context manager::
+
+   async with timeout(1.5) as cm:
+       await inner()
+   print(cm.expired())  # recommended api
+   print(cm.expired)    # compatible api
+
+The property is ``True`` if ``inner()`` execution is cancelled by
+timeout context manager.
+
+If ``inner()`` call explicitly raises ``TimeoutError`` ``cm.expired``
+is ``False``.
+
+The scheduled deadline time is available as ``.when()`` / ``.deadline``::
+
+   async with timeout(1.5) as cm:
+       cm.when()    # recommended api
+       cm.deadline  # compatible api
+
+Not finished yet timeout can be rescheduled by ``shift()``
+or ``update()`` methods::
+
+   async with timeout(1.5) as cm:
+       # recommended api
+       cm.reschedule(cm.when() + 1)  # add another second on waiting
+       # compatible api
+       cm.shift(1)  # add another second on waiting
+       cm.update(loop.time() + 5)  # reschedule to now+5 seconds
+
+Rescheduling is forbidden if the timeout is expired or after exit from ``async with``
+code block.
+
+
+Disable scheduled timeout::
+
+   async with timeout(1.5) as cm:
+       cm.reschedule(None)  # recommended api
+       cm.reject()          # compatible api
+
+
+
+Installation
+------------
+
+::
+
+   $ pip install async-timeout
+
+The library is Python 3 only!
+
+
+
+Authors and License
+-------------------
+
+The module is written by Andrew Svetlov.
+
+It's *Apache 2* licensed and freely available.

+ 10 - 0
usr/local/lib/python3.10/dist-packages/async_timeout-5.0.1.dist-info/RECORD

@@ -0,0 +1,10 @@
+async_timeout-5.0.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+async_timeout-5.0.1.dist-info/LICENSE,sha256=4Y17uPUT4sRrtYXJS1hb0wcg3TzLId2weG9y0WZY-Sw,568
+async_timeout-5.0.1.dist-info/METADATA,sha256=RVDNEIPYIBJKPsjThJDaKRX1h79-4QYQNuBLSXPItU8,5131
+async_timeout-5.0.1.dist-info/RECORD,,
+async_timeout-5.0.1.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
+async_timeout-5.0.1.dist-info/top_level.txt,sha256=9oM4e7Twq8iD_7_Q3Mz0E6GPIB6vJvRFo-UBwUQtBDU,14
+async_timeout-5.0.1.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
+async_timeout/__init__.py,sha256=QF0zpfX1vGmxib7kAqNPm9YehPV0oBVozxJ--Mxq9dI,9186
+async_timeout/__pycache__/__init__.cpython-310.pyc,,
+async_timeout/py.typed,sha256=tyozzRT1fziXETDxokmuyt6jhOmtjUbnVNJdZcG7ik0,12

+ 5 - 0
usr/local/lib/python3.10/dist-packages/async_timeout-5.0.1.dist-info/WHEEL

@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: setuptools (75.3.0)
+Root-Is-Purelib: true
+Tag: py3-none-any
+

+ 1 - 0
usr/local/lib/python3.10/dist-packages/async_timeout-5.0.1.dist-info/top_level.txt

@@ -0,0 +1 @@
+async_timeout

+ 1 - 0
usr/local/lib/python3.10/dist-packages/async_timeout-5.0.1.dist-info/zip-safe

@@ -0,0 +1 @@
+

+ 276 - 0
usr/local/lib/python3.10/dist-packages/async_timeout/__init__.py

@@ -0,0 +1,276 @@
+import asyncio
+import enum
+import sys
+from types import TracebackType
+from typing import Optional, Type, final
+
+
+__version__ = "5.0.1"
+
+
+__all__ = ("timeout", "timeout_at", "Timeout")
+
+
+def timeout(delay: Optional[float]) -> "Timeout":
+    """timeout context manager.
+
+    Useful in cases when you want to apply timeout logic around block
+    of code or in cases when asyncio.wait_for is not suitable. For example:
+
+    >>> async with timeout(0.001):
+    ...     async with aiohttp.get('https://github.com') as r:
+    ...         await r.text()
+
+
+    delay - value in seconds or None to disable timeout logic
+    """
+    loop = asyncio.get_running_loop()
+    if delay is not None:
+        deadline = loop.time() + delay  # type: Optional[float]
+    else:
+        deadline = None
+    return Timeout(deadline, loop)
+
+
+def timeout_at(deadline: Optional[float]) -> "Timeout":
+    """Schedule the timeout at absolute time.
+
+    deadline argument points on the time in the same clock system
+    as loop.time().
+
+    Please note: it is not POSIX time but a time with
+    undefined starting base, e.g. the time of the system power on.
+
+    >>> async with timeout_at(loop.time() + 10):
+    ...     async with aiohttp.get('https://github.com') as r:
+    ...         await r.text()
+
+
+    """
+    loop = asyncio.get_running_loop()
+    return Timeout(deadline, loop)
+
+
+class _State(enum.Enum):
+    INIT = "INIT"
+    ENTER = "ENTER"
+    TIMEOUT = "TIMEOUT"
+    EXIT = "EXIT"
+
+
+if sys.version_info >= (3, 11):
+
+    class _Expired:
+        __slots__ = ("_val",)
+
+        def __init__(self, val: bool) -> None:
+            self._val = val
+
+        def __call__(self) -> bool:
+            return self._val
+
+        def __bool__(self) -> bool:
+            return self._val
+
+        def __repr__(self) -> str:
+            return repr(self._val)
+
+        def __str__(self) -> str:
+            return str(self._val)
+
+    @final
+    class Timeout(asyncio.Timeout):  # type: ignore[misc]
+        # Supports full asyncio.Timeout API.
+        # Also provides several asyncio_timeout specific methods
+        # for backward compatibility.
+        def __init__(
+            self, deadline: Optional[float], loop: asyncio.AbstractEventLoop
+        ) -> None:
+            super().__init__(deadline)
+
+        @property
+        def expired(self) -> _Expired:
+            # a hacky property hat can provide both roles:
+            # timeout.expired()  from asyncio
+            # timeout.expired    from asyncio_timeout
+            return _Expired(super().expired())
+
+        @property
+        def deadline(self) -> Optional[float]:
+            return self.when()
+
+        def reject(self) -> None:
+            """Reject scheduled timeout if any."""
+            # cancel is maybe better name but
+            # task.cancel() raises CancelledError in asyncio world.
+            self.reschedule(None)
+
+        def shift(self, delay: float) -> None:
+            """Advance timeout on delay seconds.
+
+            The delay can be negative.
+
+            Raise RuntimeError if shift is called when deadline is not scheduled
+            """
+            deadline = self.when()
+            if deadline is None:
+                raise RuntimeError("cannot shift timeout if deadline is not scheduled")
+            self.reschedule(deadline + delay)
+
+        def update(self, deadline: float) -> None:
+            """Set deadline to absolute value.
+
+            deadline argument points on the time in the same clock system
+            as loop.time().
+
+            If new deadline is in the past the timeout is raised immediately.
+
+            Please note: it is not POSIX time but a time with
+            undefined starting base, e.g. the time of the system power on.
+            """
+            self.reschedule(deadline)
+
+else:
+
+    @final
+    class Timeout:
+        # Internal class, please don't instantiate it directly
+        # Use timeout() and timeout_at() public factories instead.
+        #
+        # Implementation note: `async with timeout()` is preferred
+        # over `with timeout()`.
+        # While technically the Timeout class implementation
+        # doesn't need to be async at all,
+        # the `async with` statement explicitly points that
+        # the context manager should be used from async function context.
+        #
+        # This design allows to avoid many silly misusages.
+        #
+        # TimeoutError is raised immediately when scheduled
+        # if the deadline is passed.
+        # The purpose is to time out as soon as possible
+        # without waiting for the next await expression.
+
+        __slots__ = ("_deadline", "_loop", "_state", "_timeout_handler", "_task")
+
+        def __init__(
+            self, deadline: Optional[float], loop: asyncio.AbstractEventLoop
+        ) -> None:
+            self._loop = loop
+            self._state = _State.INIT
+
+            self._task: Optional["asyncio.Task[object]"] = None
+            self._timeout_handler = None  # type: Optional[asyncio.Handle]
+            if deadline is None:
+                self._deadline = None  # type: Optional[float]
+            else:
+                self.update(deadline)
+
+        async def __aenter__(self) -> "Timeout":
+            self._do_enter()
+            return self
+
+        async def __aexit__(
+            self,
+            exc_type: Optional[Type[BaseException]],
+            exc_val: Optional[BaseException],
+            exc_tb: Optional[TracebackType],
+        ) -> Optional[bool]:
+            self._do_exit(exc_type)
+            return None
+
+        @property
+        def expired(self) -> bool:
+            """Is timeout expired during execution?"""
+            return self._state == _State.TIMEOUT
+
+        @property
+        def deadline(self) -> Optional[float]:
+            return self._deadline
+
+        def reject(self) -> None:
+            """Reject scheduled timeout if any."""
+            # cancel is maybe better name but
+            # task.cancel() raises CancelledError in asyncio world.
+            if self._state not in (_State.INIT, _State.ENTER):
+                raise RuntimeError(f"invalid state {self._state.value}")
+            self._reject()
+
+        def _reject(self) -> None:
+            self._task = None
+            if self._timeout_handler is not None:
+                self._timeout_handler.cancel()
+                self._timeout_handler = None
+
+        def shift(self, delay: float) -> None:
+            """Advance timeout on delay seconds.
+
+            The delay can be negative.
+
+            Raise RuntimeError if shift is called when deadline is not scheduled
+            """
+            deadline = self._deadline
+            if deadline is None:
+                raise RuntimeError("cannot shift timeout if deadline is not scheduled")
+            self.update(deadline + delay)
+
+        def update(self, deadline: float) -> None:
+            """Set deadline to absolute value.
+
+            deadline argument points on the time in the same clock system
+            as loop.time().
+
+            If new deadline is in the past the timeout is raised immediately.
+
+            Please note: it is not POSIX time but a time with
+            undefined starting base, e.g. the time of the system power on.
+            """
+            if self._state == _State.EXIT:
+                raise RuntimeError("cannot reschedule after exit from context manager")
+            if self._state == _State.TIMEOUT:
+                raise RuntimeError("cannot reschedule expired timeout")
+            if self._timeout_handler is not None:
+                self._timeout_handler.cancel()
+            self._deadline = deadline
+            if self._state != _State.INIT:
+                self._reschedule()
+
+        def _reschedule(self) -> None:
+            assert self._state == _State.ENTER
+            deadline = self._deadline
+            if deadline is None:
+                return
+
+            now = self._loop.time()
+            if self._timeout_handler is not None:
+                self._timeout_handler.cancel()
+
+            self._task = asyncio.current_task()
+            if deadline <= now:
+                self._timeout_handler = self._loop.call_soon(self._on_timeout)
+            else:
+                self._timeout_handler = self._loop.call_at(deadline, self._on_timeout)
+
+        def _do_enter(self) -> None:
+            if self._state != _State.INIT:
+                raise RuntimeError(f"invalid state {self._state.value}")
+            self._state = _State.ENTER
+            self._reschedule()
+
+        def _do_exit(self, exc_type: Optional[Type[BaseException]]) -> None:
+            if exc_type is asyncio.CancelledError and self._state == _State.TIMEOUT:
+                assert self._task is not None
+                self._timeout_handler = None
+                self._task = None
+                raise asyncio.TimeoutError
+            # timeout has not expired
+            self._state = _State.EXIT
+            self._reject()
+            return None
+
+        def _on_timeout(self) -> None:
+            assert self._task is not None
+            self._task.cancel()
+            self._state = _State.TIMEOUT
+            # drop the reference early
+            self._timeout_handler = None

binární
usr/local/lib/python3.10/dist-packages/async_timeout/__pycache__/__init__.cpython-310.pyc


+ 1 - 0
usr/local/lib/python3.10/dist-packages/async_timeout/py.typed

@@ -0,0 +1 @@
+Placeholder

+ 1 - 0
usr/local/lib/python3.10/dist-packages/redis-7.4.0.dist-info/INSTALLER

@@ -0,0 +1 @@
+pip

+ 271 - 0
usr/local/lib/python3.10/dist-packages/redis-7.4.0.dist-info/METADATA

@@ -0,0 +1,271 @@
+Metadata-Version: 2.4
+Name: redis
+Version: 7.4.0
+Summary: Python client for Redis database and key-value store
+Project-URL: Changes, https://github.com/redis/redis-py/releases
+Project-URL: Code, https://github.com/redis/redis-py
+Project-URL: Documentation, https://redis.readthedocs.io/en/latest/
+Project-URL: Homepage, https://github.com/redis/redis-py
+Project-URL: Issue tracker, https://github.com/redis/redis-py/issues
+Author-email: "Redis Inc." <oss@redis.com>
+License-Expression: MIT
+License-File: LICENSE
+Keywords: Redis,database,key-value-store
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Console
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
+Classifier: Programming Language :: Python :: 3.13
+Classifier: Programming Language :: Python :: 3.14
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Requires-Python: >=3.10
+Requires-Dist: async-timeout>=4.0.3; python_full_version < '3.11.3'
+Provides-Extra: circuit-breaker
+Requires-Dist: pybreaker>=1.4.0; extra == 'circuit-breaker'
+Provides-Extra: hiredis
+Requires-Dist: hiredis>=3.2.0; extra == 'hiredis'
+Provides-Extra: jwt
+Requires-Dist: pyjwt>=2.9.0; extra == 'jwt'
+Provides-Extra: ocsp
+Requires-Dist: cryptography>=36.0.1; extra == 'ocsp'
+Requires-Dist: pyopenssl>=20.0.1; extra == 'ocsp'
+Requires-Dist: requests>=2.31.0; extra == 'ocsp'
+Provides-Extra: otel
+Requires-Dist: opentelemetry-api>=1.39.1; extra == 'otel'
+Requires-Dist: opentelemetry-exporter-otlp-proto-http>=1.39.1; extra == 'otel'
+Requires-Dist: opentelemetry-sdk>=1.39.1; extra == 'otel'
+Provides-Extra: xxhash
+Requires-Dist: xxhash~=3.6.0; extra == 'xxhash'
+Description-Content-Type: text/markdown
+
+# redis-py
+
+The Python interface to the Redis key-value store.
+
+[![CI](https://github.com/redis/redis-py/workflows/CI/badge.svg?branch=master)](https://github.com/redis/redis-py/actions?query=workflow%3ACI+branch%3Amaster)
+[![docs](https://readthedocs.org/projects/redis/badge/?version=stable&style=flat)](https://redis.readthedocs.io/en/stable/)
+[![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/redis/redis-py/blob/master/LICENSE)
+[![pypi](https://badge.fury.io/py/redis.svg)](https://pypi.org/project/redis/)
+[![pre-release](https://img.shields.io/github/v/release/redis/redis-py?include_prereleases&label=latest-prerelease)](https://github.com/redis/redis-py/releases)
+[![codecov](https://codecov.io/gh/redis/redis-py/branch/master/graph/badge.svg?token=yenl5fzxxr)](https://codecov.io/gh/redis/redis-py)
+
+[Installation](#installation) |  [Usage](#usage) | [Advanced Topics](#advanced-topics) | [Contributing](https://github.com/redis/redis-py/blob/master/CONTRIBUTING.md)
+
+---------------------------------------------
+
+**Note:** redis-py 5.0 is the last version of redis-py that supports Python 3.7, as it has reached [end of life](https://devguide.python.org/versions/). redis-py 5.1 supports Python 3.8+.<br>
+**Note:** redis-py 6.1.0 is the last version of redis-py that supports Python 3.8, as it has reached [end of life](https://devguide.python.org/versions/). redis-py 6.2.0 supports Python 3.9+.
+
+---------------------------------------------
+
+## How do I Redis?
+
+[Learn for free at Redis University](https://redis.io/learn/university)
+
+[Try the Redis Cloud](https://redis.io/try-free/)
+
+[Dive in developer tutorials](https://redis.io/learn)
+
+[Join the Redis community](https://redis.io/community/)
+
+[Work at Redis](https://redis.io/careers/)
+
+## Installation
+
+Start a redis via docker (for Redis versions >= 8.0):
+
+``` bash
+docker run -p 6379:6379 -it redis:latest
+```
+
+Start a redis via docker (for Redis versions < 8.0):
+
+``` bash
+docker run -p 6379:6379 -it redis/redis-stack:latest
+```
+To install redis-py, simply:
+
+``` bash
+$ pip install redis
+```
+
+For faster performance, install redis with hiredis support, this provides a compiled response parser, and *for most cases* requires zero code changes.
+By default, if hiredis >= 1.0 is available, redis-py will attempt to use it for response parsing.
+
+``` bash
+$ pip install "redis[hiredis]"
+```
+
+Looking for a high-level library to handle object mapping? See [redis-om-python](https://github.com/redis/redis-om-python)!
+
+## Supported Redis Versions
+
+The most recent version of this library supports Redis version [7.2](https://github.com/redis/redis/blob/7.2/00-RELEASENOTES), [7.4](https://github.com/redis/redis/blob/7.4/00-RELEASENOTES), [8.0](https://github.com/redis/redis/blob/8.0/00-RELEASENOTES) and [8.2](https://github.com/redis/redis/blob/8.2/00-RELEASENOTES).
+
+The table below highlights version compatibility of the most-recent library versions and redis versions.
+
+| Library version | Supported redis versions |
+|-----------------|-------------------|
+| 3.5.3 | <= 6.2 Family of releases |
+| >= 4.5.0 | Version 5.0 to 7.0 |
+| >= 5.0.0 | Version 5.0 to 7.4 |
+| >= 6.0.0 | Version 7.2 to current |
+
+
+## Usage
+
+### Basic Example
+
+``` python
+>>> import redis
+>>> r = redis.Redis(host='localhost', port=6379, db=0)
+>>> r.set('foo', 'bar')
+True
+>>> r.get('foo')
+b'bar'
+```
+
+The above code connects to localhost on port 6379, sets a value in Redis, and retrieves it. All responses are returned as bytes in Python, to receive decoded strings, set *decode_responses=True*.  For this, and more connection options, see [these examples](https://redis.readthedocs.io/en/stable/examples.html).
+
+
+#### RESP3 Support
+To enable support for RESP3, ensure you have at least version 5.0 of the client, and change your connection object to include *protocol=3*
+
+``` python
+>>> import redis
+>>> r = redis.Redis(host='localhost', port=6379, db=0, protocol=3)
+```
+
+### Connection Pools
+
+By default, redis-py uses a connection pool to manage connections. Each instance of a Redis class receives its own connection pool. You can however define your own [redis.ConnectionPool](https://redis.readthedocs.io/en/stable/connections.html#connection-pools).
+
+``` python
+>>> pool = redis.ConnectionPool(host='localhost', port=6379, db=0)
+>>> r = redis.Redis(connection_pool=pool)
+```
+
+Alternatively, you might want to look at [Async connections](https://redis.readthedocs.io/en/stable/examples/asyncio_examples.html), or [Cluster connections](https://redis.readthedocs.io/en/stable/connections.html#cluster-client), or even [Async Cluster connections](https://redis.readthedocs.io/en/stable/connections.html#async-cluster-client).
+
+### Redis Commands
+
+There is built-in support for all of the [out-of-the-box Redis commands](https://redis.io/commands). They are exposed using the raw Redis command names (`HSET`, `HGETALL`, etc.) except where a word (i.e. del) is reserved by the language. The complete set of commands can be found [here](https://github.com/redis/redis-py/tree/master/redis/commands), or [the documentation](https://redis.readthedocs.io/en/stable/commands.html).
+
+## Advanced Topics
+
+The [official Redis command documentation](https://redis.io/commands)
+does a great job of explaining each command in detail. redis-py attempts
+to adhere to the official command syntax. There are a few exceptions:
+
+-   **MULTI/EXEC**: These are implemented as part of the Pipeline class.
+    The pipeline is wrapped with the MULTI and EXEC statements by
+    default when it is executed, which can be disabled by specifying
+    transaction=False. See more about Pipelines below.
+
+-   **SUBSCRIBE/LISTEN**: Similar to pipelines, PubSub is implemented as
+    a separate class as it places the underlying connection in a state
+    where it can\'t execute non-pubsub commands. Calling the pubsub
+    method from the Redis client will return a PubSub instance where you
+    can subscribe to channels and listen for messages. You can only call
+    PUBLISH from the Redis client (see [this comment on issue
+    #151](https://github.com/redis/redis-py/issues/151#issuecomment-1545015)
+    for details).
+
+For more details, please see the documentation on [advanced topics page](https://redis.readthedocs.io/en/stable/advanced_features.html).
+
+### Pipelines
+
+The following is a basic example of a [Redis pipeline](https://redis.io/docs/manual/pipelining/), a method to optimize round-trip calls, by batching Redis commands, and receiving their results as a list.
+
+
+``` python
+>>> pipe = r.pipeline()
+>>> pipe.set('foo', 5)
+>>> pipe.set('bar', 18.5)
+>>> pipe.set('blee', "hello world!")
+>>> pipe.execute()
+[True, True, True]
+```
+
+### PubSub
+
+The following example shows how to utilize [Redis Pub/Sub](https://redis.io/docs/manual/pubsub/) to subscribe to specific channels.
+
+``` python
+>>> r = redis.Redis(...)
+>>> p = r.pubsub()
+>>> p.subscribe('my-first-channel', 'my-second-channel', ...)
+>>> p.get_message()
+{'pattern': None, 'type': 'subscribe', 'channel': b'my-second-channel', 'data': 1}
+```
+
+### Redis’ search and query capabilities default dialect
+
+Release 6.0.0 introduces a client-side default dialect for Redis’ search and query capabilities.
+By default, the client now overrides the server-side dialect with version 2, automatically appending *DIALECT 2* to commands like *FT.AGGREGATE* and *FT.SEARCH*.
+
+**Important**: Be aware that the query dialect may impact the results returned. If needed, you can revert to a different dialect version by configuring the client accordingly.
+
+``` python
+>>> from redis.commands.search.field import TextField
+>>> from redis.commands.search.query import Query
+>>> from redis.commands.search.index_definition import IndexDefinition
+>>> import redis
+
+>>> r = redis.Redis(host='localhost', port=6379, db=0)
+>>> r.ft().create_index(
+>>>     (TextField("name"), TextField("lastname")),
+>>>     definition=IndexDefinition(prefix=["test:"]),
+>>> )
+
+>>> r.hset("test:1", "name", "James")
+>>> r.hset("test:1", "lastname", "Brown")
+
+>>> # Query with default DIALECT 2
+>>> query = "@name: James Brown"
+>>> q = Query(query)
+>>> res = r.ft().search(q)
+
+>>> # Query with explicit DIALECT 1
+>>> query = "@name: James Brown"
+>>> q = Query(query).dialect(1)
+>>> res = r.ft().search(q)
+```
+
+You can find further details in the [query dialect documentation](https://redis.io/docs/latest/develop/interact/search-and-query/advanced-concepts/dialects/).
+
+### Multi-database client (Active-Active)
+
+The multi-database client allows your application to connect to multiple Redis databases, which are typically replicas of each other. It is designed to work with Redis Software and Redis Cloud Active-Active setups. The client continuously monitors database health, detects failures, and automatically fails over to the next healthy database using a configurable strategy. When the original database becomes healthy again, the client can automatically switch back to it.<br>
+This is useful when:
+
+1. You have more than one Redis deployment. This might include two independent Redis servers or two or more Redis databases replicated across multiple [active-active Redis Enterprise](https://redis.io/docs/latest/operate/rs/databases/active-active/) clusters.
+2. You want your application to connect to one deployment at a time and to fail over to the next available deployment if the first deployment becomes unavailable.
+
+For the complete failover configuration options and examples, see the [Multi-database client docs](https://redis.readthedocs.io/en/latest/multi_database.html).
+
+---------------------------------------------
+
+### Author
+
+redis-py is developed and maintained by [Redis Inc](https://redis.io). It can be found [here](
+https://github.com/redis/redis-py), or downloaded from [pypi](https://pypi.org/project/redis/).
+
+Special thanks to:
+
+-   Andy McCurdy (<sedrik@gmail.com>) the original author of redis-py.
+-   Ludovico Magnocavallo, author of the original Python Redis client,
+    from which some of the socket code is still used.
+-   Alexander Solovyov for ideas on the generic response callback
+    system.
+-   Paul Hubbard for initial packaging support.
+
+[![Redis](https://github.com/redis/redis-py/blob/master/docs/_static/logo-redis.svg)](https://redis.io)

+ 231 - 0
usr/local/lib/python3.10/dist-packages/redis-7.4.0.dist-info/RECORD

@@ -0,0 +1,231 @@
+redis-7.4.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+redis-7.4.0.dist-info/METADATA,sha256=PiOIvSLNJFMiYWWVJen2k1WmGWupyfLmJz3VDebjApU,12434
+redis-7.4.0.dist-info/RECORD,,
+redis-7.4.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+redis-7.4.0.dist-info/WHEEL,sha256=QccIxa26bgl1E6uMy58deGWi-0aeIkkangHcxk2kWfw,87
+redis-7.4.0.dist-info/licenses/LICENSE,sha256=pXslClvwPXr-VbdAYzE_Ktt7ANVGwKsUmok5gzP-PMg,1074
+redis/__init__.py,sha256=tM-0mZ3FMgtoiYnkBu6nW4HMpCCo8WCQK402Tz77QK8,2108
+redis/__pycache__/__init__.cpython-310.pyc,,
+redis/__pycache__/background.cpython-310.pyc,,
+redis/__pycache__/backoff.cpython-310.pyc,,
+redis/__pycache__/cache.cpython-310.pyc,,
+redis/__pycache__/client.cpython-310.pyc,,
+redis/__pycache__/cluster.cpython-310.pyc,,
+redis/__pycache__/connection.cpython-310.pyc,,
+redis/__pycache__/crc.cpython-310.pyc,,
+redis/__pycache__/credentials.cpython-310.pyc,,
+redis/__pycache__/data_structure.cpython-310.pyc,,
+redis/__pycache__/driver_info.cpython-310.pyc,,
+redis/__pycache__/event.cpython-310.pyc,,
+redis/__pycache__/exceptions.cpython-310.pyc,,
+redis/__pycache__/lock.cpython-310.pyc,,
+redis/__pycache__/maint_notifications.cpython-310.pyc,,
+redis/__pycache__/ocsp.cpython-310.pyc,,
+redis/__pycache__/retry.cpython-310.pyc,,
+redis/__pycache__/sentinel.cpython-310.pyc,,
+redis/__pycache__/typing.cpython-310.pyc,,
+redis/__pycache__/utils.cpython-310.pyc,,
+redis/_parsers/__init__.py,sha256=gyf5dp918NuJAkWFl8sX1Z-qAvbX_40-_7YCTM6Rvjc,693
+redis/_parsers/__pycache__/__init__.cpython-310.pyc,,
+redis/_parsers/__pycache__/base.cpython-310.pyc,,
+redis/_parsers/__pycache__/commands.cpython-310.pyc,,
+redis/_parsers/__pycache__/encoders.cpython-310.pyc,,
+redis/_parsers/__pycache__/helpers.cpython-310.pyc,,
+redis/_parsers/__pycache__/hiredis.cpython-310.pyc,,
+redis/_parsers/__pycache__/resp2.cpython-310.pyc,,
+redis/_parsers/__pycache__/resp3.cpython-310.pyc,,
+redis/_parsers/__pycache__/socket.cpython-310.pyc,,
+redis/_parsers/base.py,sha256=aQnmeAxdo9a53ZMGLLuHjcE3O0TQ0lePI_TQmO4APlU,20226
+redis/_parsers/commands.py,sha256=Msq6XT5-CiWxfK02gcTNvETdsQC5QquolU8vLrDEzsQ,28493
+redis/_parsers/encoders.py,sha256=X0jvTp-E4TZUlZxV5LJJ88TuVrF1vly5tuC0xjxGaSc,1734
+redis/_parsers/helpers.py,sha256=1MdEMVaOBWdWv398HZCbOpSVy3wWURcRmO-MYI38Ubw,31457
+redis/_parsers/hiredis.py,sha256=YS34r4viJQmkkT1bbYxL-AzqT4QNKaAUG2qTHCXpFso,11068
+redis/_parsers/resp2.py,sha256=f22kH-_ZP2iNtOn6xOe65MSy_fJpu8OEn1u_hgeeojI,4813
+redis/_parsers/resp3.py,sha256=X8YPoXJZVSPQ6L_0lVKK6bak4z-kwPdmKVPQ1DeEDUc,10406
+redis/_parsers/socket.py,sha256=7t6MCJrvknjMqGSCn7ENKBOSBwr1CUPFhFB8R0UUjhc,5397
+redis/asyncio/__init__.py,sha256=uoDD8XYVi0Kj6mcufYwLDUTQXmBRx7a0bhKF9stZr7I,1489
+redis/asyncio/__pycache__/__init__.cpython-310.pyc,,
+redis/asyncio/__pycache__/client.cpython-310.pyc,,
+redis/asyncio/__pycache__/cluster.cpython-310.pyc,,
+redis/asyncio/__pycache__/connection.cpython-310.pyc,,
+redis/asyncio/__pycache__/lock.cpython-310.pyc,,
+redis/asyncio/__pycache__/retry.cpython-310.pyc,,
+redis/asyncio/__pycache__/sentinel.cpython-310.pyc,,
+redis/asyncio/__pycache__/utils.cpython-310.pyc,,
+redis/asyncio/client.py,sha256=K6RjpOZ5aXacAkwnItXkbmj3D9Db4bxK4xHpid3OhQI,75295
+redis/asyncio/cluster.py,sha256=9VCdJuk3Ja0vF-QkaTYroTWfu6O_Q51yOKakGd15T3A,114280
+redis/asyncio/connection.py,sha256=iLNQ1yhlR_Z-GX0w5WJRX0ODfwFPfyEico8XZeA7NtU,63681
+redis/asyncio/http/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+redis/asyncio/http/__pycache__/__init__.cpython-310.pyc,,
+redis/asyncio/http/__pycache__/http_client.cpython-310.pyc,,
+redis/asyncio/http/http_client.py,sha256=wftF-Yl4LAcBNkxy62HM2x5OSmpfEz6qxBFM-zft9rU,7947
+redis/asyncio/lock.py,sha256=WRLtHNAxNwUzZE1A7xQdw-05q_49emS69WkZ_NvVw7E,13249
+redis/asyncio/multidb/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+redis/asyncio/multidb/__pycache__/__init__.cpython-310.pyc,,
+redis/asyncio/multidb/__pycache__/client.cpython-310.pyc,,
+redis/asyncio/multidb/__pycache__/command_executor.cpython-310.pyc,,
+redis/asyncio/multidb/__pycache__/config.cpython-310.pyc,,
+redis/asyncio/multidb/__pycache__/database.cpython-310.pyc,,
+redis/asyncio/multidb/__pycache__/event.cpython-310.pyc,,
+redis/asyncio/multidb/__pycache__/failover.cpython-310.pyc,,
+redis/asyncio/multidb/__pycache__/failure_detector.cpython-310.pyc,,
+redis/asyncio/multidb/__pycache__/healthcheck.cpython-310.pyc,,
+redis/asyncio/multidb/client.py,sha256=5aB1RWpebtmSTkqqTFEhfhXUZCKoVnjvXy3RK5peVfM,21479
+redis/asyncio/multidb/command_executor.py,sha256=IGtRQKBQXrDDo_zDb1RVwXhANB9DH9ns9VsuzAZfY3s,12390
+redis/asyncio/multidb/config.py,sha256=B7WBKL8N0MuagX4RgqiGWoi-_umuCbINz8-FynL1JIk,9662
+redis/asyncio/multidb/database.py,sha256=HRsi6XFtY5nXpQcpze2Khn7k2dsnlZ0b3SeO-AcEHaA,1931
+redis/asyncio/multidb/event.py,sha256=76GS22NwkeMVwtjr13UMeYD0pAB9KoPDvKWUin4DwHs,2788
+redis/asyncio/multidb/failover.py,sha256=SEhlG2rA50Mz-Rk-W5l_wOREHmKQMDyxSRiSAt1NmMI,3635
+redis/asyncio/multidb/failure_detector.py,sha256=1nipBfcjtLH8XprTQvOBE9y0kRC_YsPszEPzLKbiSU8,1263
+redis/asyncio/multidb/healthcheck.py,sha256=py0o7acCyUn_CDcHznZggI4YTLM8SOIgS1fywY5_krU,17774
+redis/asyncio/observability/__init__.py,sha256=zcDNVo9LkIDYOL8fJjX244Gxzsq1BbnnsadoZZW9HR0,439
+redis/asyncio/observability/__pycache__/__init__.cpython-310.pyc,,
+redis/asyncio/observability/__pycache__/recorder.cpython-310.pyc,,
+redis/asyncio/observability/recorder.py,sha256=TZQH6LJkdexaOXsaqxzHxBOO3F2FbIKhpeHIdkmSH8U,18234
+redis/asyncio/retry.py,sha256=VYQsSri88aMc3xFavcNRe4FGSq5gQf_70LR-ejLB_YU,2515
+redis/asyncio/sentinel.py,sha256=Ppk-jlTubcHpa0lvinZ1pPTtQ5rFHXZkkaCZ7G_TCQs,14868
+redis/asyncio/utils.py,sha256=MkTImQ9SFALFhUhwVbvu26B1RAY8KcgSlpVswz1QlIs,744
+redis/auth/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+redis/auth/__pycache__/__init__.cpython-310.pyc,,
+redis/auth/__pycache__/err.cpython-310.pyc,,
+redis/auth/__pycache__/idp.cpython-310.pyc,,
+redis/auth/__pycache__/token.cpython-310.pyc,,
+redis/auth/__pycache__/token_manager.cpython-310.pyc,,
+redis/auth/err.py,sha256=WYkbuDIzwp1S-eAvsya6QMlO6g9QIXbzMITOsTWX0xk,694
+redis/auth/idp.py,sha256=IMDIIb9q72vbIwtFN8vPdaAKZVTdh0HuC5uj5ufqmw4,631
+redis/auth/token.py,sha256=qYwAgxFW3S93QDUqp1BTsj7Pj9ZohnixGeOX0s7AsjY,3317
+redis/auth/token_manager.py,sha256=puLk9Y0E-UiCmrhx0Sj63c-BE-9hd5MvL0OzsbskAgw,12388
+redis/background.py,sha256=Bs2fOOZpsWwHgJ5W7XGDc1ePvMhPc6KM5Yj-Rn_N0YM,16982
+redis/backoff.py,sha256=tQM6Lh2g2FjMH8iXg94br2sU9eri4mEW9FbOrMt0azs,5285
+redis/cache.py,sha256=yfjWND74f_O4zOWcekd-r9byjNwRk9PAN7eepA9J_YE,12089
+redis/client.py,sha256=9wZ00pRg3aFf8FoaWyn_gMfps3I-MZ4l4a7tkGaWg_I,77481
+redis/cluster.py,sha256=e5eltRy86htC-gzA3tsh2_IHt9pMARCJwExnC89vBbc,160712
+redis/commands/__init__.py,sha256=cTUH-MGvaLYS0WuoytyqtN1wniw2A1KbkUXcpvOSY3I,576
+redis/commands/__pycache__/__init__.cpython-310.pyc,,
+redis/commands/__pycache__/cluster.cpython-310.pyc,,
+redis/commands/__pycache__/core.cpython-310.pyc,,
+redis/commands/__pycache__/helpers.cpython-310.pyc,,
+redis/commands/__pycache__/policies.cpython-310.pyc,,
+redis/commands/__pycache__/redismodules.cpython-310.pyc,,
+redis/commands/__pycache__/sentinel.cpython-310.pyc,,
+redis/commands/bf/__init__.py,sha256=qk4DA9KsMiP4WYqYeP1T5ScBwctsVtlLyMhrYIyq1Zc,8019
+redis/commands/bf/__pycache__/__init__.cpython-310.pyc,,
+redis/commands/bf/__pycache__/commands.cpython-310.pyc,,
+redis/commands/bf/__pycache__/info.cpython-310.pyc,,
+redis/commands/bf/commands.py,sha256=xeKt8E7G8HB-l922J0DLg07CEIZTVNGx_2Lfyw1gIck,21283
+redis/commands/bf/info.py,sha256=_OB2v_hAPI9mdVNiBx8jUtH2MhMoct9ZRm-e8In6wQo,3355
+redis/commands/cluster.py,sha256=bogou-d_eDvCn2WV1QGCtfZ8efjCqf6Hok4VK_YdMMU,40782
+redis/commands/core.py,sha256=2DJ41Gwol5oMBXhkEi3nOl4_q8o3h7qjTQGPDJqrwrk,262603
+redis/commands/helpers.py,sha256=ZfSLkYlJLVMeRbMpudQ8zs4D51zQ1ZJbSHUh-N-mhiE,3152
+redis/commands/json/__init__.py,sha256=bznXhLYR652rfLfLp8cz0ZN0Yr8IRx4FgON_tq9_2Io,4845
+redis/commands/json/__pycache__/__init__.cpython-310.pyc,,
+redis/commands/json/__pycache__/_util.cpython-310.pyc,,
+redis/commands/json/__pycache__/commands.cpython-310.pyc,,
+redis/commands/json/__pycache__/decoders.cpython-310.pyc,,
+redis/commands/json/__pycache__/path.cpython-310.pyc,,
+redis/commands/json/_util.py,sha256=hIBQ1TLCTgUifcLsg0x8kJlecxmXhA9I0zMnHlQk0Ho,137
+redis/commands/json/commands.py,sha256=oeVUhjSAoKEXqKV_JDYHp5xLND073U3HQfyZdaNTzqc,15711
+redis/commands/json/decoders.py,sha256=a_IoMV_wgeJyUifD4P6HTcM9s6FhricwmzQcZRmc-Gw,1411
+redis/commands/json/path.py,sha256=0zaO6_q_FVMk1Bkhkb7Wcr8AF2Tfr69VhkKy1IBVhpA,393
+redis/commands/policies.py,sha256=RjRVeTAuXoZeMgTPW6tPBSZhyD9PMHXN29815MrcSX0,11267
+redis/commands/redismodules.py,sha256=-kLM4RBklDhNh-MXCra81ZTSstIQ-ulRab6v0dYUTdA,2573
+redis/commands/search/__init__.py,sha256=Mm5nbdPoJLnQFjaRkWp3HWPgXjKYrDnlAcCpiAlh4r0,5841
+redis/commands/search/__pycache__/__init__.cpython-310.pyc,,
+redis/commands/search/__pycache__/_util.cpython-310.pyc,,
+redis/commands/search/__pycache__/aggregation.cpython-310.pyc,,
+redis/commands/search/__pycache__/commands.cpython-310.pyc,,
+redis/commands/search/__pycache__/dialect.cpython-310.pyc,,
+redis/commands/search/__pycache__/document.cpython-310.pyc,,
+redis/commands/search/__pycache__/field.cpython-310.pyc,,
+redis/commands/search/__pycache__/hybrid_query.cpython-310.pyc,,
+redis/commands/search/__pycache__/hybrid_result.cpython-310.pyc,,
+redis/commands/search/__pycache__/index_definition.cpython-310.pyc,,
+redis/commands/search/__pycache__/profile_information.cpython-310.pyc,,
+redis/commands/search/__pycache__/query.cpython-310.pyc,,
+redis/commands/search/__pycache__/querystring.cpython-310.pyc,,
+redis/commands/search/__pycache__/reducers.cpython-310.pyc,,
+redis/commands/search/__pycache__/result.cpython-310.pyc,,
+redis/commands/search/__pycache__/suggestion.cpython-310.pyc,,
+redis/commands/search/_util.py,sha256=9Mp72OO5Ib5UbfN7uXb-iB7hQCm1jQLV90ms2P9XSGU,219
+redis/commands/search/aggregation.py,sha256=fPIpcUj_z1u6rsulGgFpgMDA0EhWUVjIJW0j466GH6I,11578
+redis/commands/search/commands.py,sha256=DiqBSJXhQsTqBXBqHpxycvA2igoTLs9CB64e5beqgDY,44070
+redis/commands/search/dialect.py,sha256=-7M6kkr33x0FkMtKmUsbeRAE6qxLUbqdJCqIo0UKIXo,105
+redis/commands/search/document.py,sha256=g2R-PRgq-jN33_GLXzavvse4cpIHBMfjPfPK7tnE9Gc,413
+redis/commands/search/field.py,sha256=KQFKCGVaABn9vDYnAcB0jaMwGxJqiZ8fEJHP_VieBR8,5935
+redis/commands/search/hybrid_query.py,sha256=g9acLqi9eHuX5N14yR1q7j8-4m6qOYCHcI5D1Tq8qF4,13270
+redis/commands/search/hybrid_result.py,sha256=1VQIwVxi5LA99l8r-mfbEifR03ZswYlvOPPoMM_6UiY,815
+redis/commands/search/index_definition.py,sha256=VL2CMzjxN0HEIaTn88evnHX1fCEmytbik4vAmiiYSC8,2489
+redis/commands/search/profile_information.py,sha256=w9SbMiHbcZ1TpsZMe8cMIyO1hGkm5GhnZ_Gqg1feLtc,249
+redis/commands/search/query.py,sha256=9-CCxjakf53BowKLRgLdAhZIZXlWZRjT3bfVyudkGFw,12361
+redis/commands/search/querystring.py,sha256=dE577kOqkCErNgO-IXI4xFVHI8kQE-JiH5ZRI_CKjHE,7597
+redis/commands/search/reducers.py,sha256=Scceylx8BjyqS-TJOdhNW63n6tecL9ojt4U5Sqho5UY,4220
+redis/commands/search/result.py,sha256=iuqmwOeCNo_7N4a_YxxDzVdOTpbwfF1T2uuq5sTqzMo,2624
+redis/commands/search/suggestion.py,sha256=V_re6suDCoNc0ETn_P1t51FeK4pCamPwxZRxCY8jscE,1612
+redis/commands/sentinel.py,sha256=Q1Xuw7qXA0YRZXGlIKsuOtah8UfF0QnkLywOTRvjiMY,5299
+redis/commands/timeseries/__init__.py,sha256=k492_xE_lBD0cVSX82TWBiNxOWuDDrrVZUjINi3LZSc,3450
+redis/commands/timeseries/__pycache__/__init__.cpython-310.pyc,,
+redis/commands/timeseries/__pycache__/commands.cpython-310.pyc,,
+redis/commands/timeseries/__pycache__/info.cpython-310.pyc,,
+redis/commands/timeseries/__pycache__/utils.cpython-310.pyc,,
+redis/commands/timeseries/commands.py,sha256=PxEvOBGKmSFqVmN_1rFllnadoL47Eu2MvQGVjm2u9vY,47291
+redis/commands/timeseries/info.py,sha256=meZYdu7IV9KaUWMKZs9qW4vo3Q9MwhdY-EBtKQzls5o,3223
+redis/commands/timeseries/utils.py,sha256=NLwSOS5Dz9N8dYQSzEyBIvrItOWwfQ0xgDj8un6x3dU,1319
+redis/commands/vectorset/__init__.py,sha256=w2TWc5lCb674jZv8GP9dxYSTGP1yq15ZkF9075nJiIs,1322
+redis/commands/vectorset/__pycache__/__init__.cpython-310.pyc,,
+redis/commands/vectorset/__pycache__/commands.cpython-310.pyc,,
+redis/commands/vectorset/__pycache__/utils.cpython-310.pyc,,
+redis/commands/vectorset/commands.py,sha256=u8mAmXqgnx9zgoAaS0iKn80rRQnS1Z7MpOrM5Z1NQak,14872
+redis/commands/vectorset/utils.py,sha256=kApyWTzG_HEgTj6wSzBuMVz-qWMhPSgr-Do_5cHSS6E,4472
+redis/connection.py,sha256=UhDvzDdvzG8qZe9d-OUf9WZb5U0ApUYvjLDGO3fVlGQ,136420
+redis/crc.py,sha256=Z3kXFtkY2LdgefnQMud1xr4vG5UYvA9LCMqNMX1ywu4,729
+redis/credentials.py,sha256=GOnO3-LSW34efHaIrUbS742Mw8l70mRzF6UrKiKZsMY,1828
+redis/data_structure.py,sha256=qTZq3s7gEmZVwyFBNfKnkKnm9q3-HHxnZfMH9sBIyD4,2527
+redis/driver_info.py,sha256=LB9IPaMsSc5HwApJlZTc5UpdvUfKwC9z8HGifIqPXuw,5488
+redis/event.py,sha256=Y7RE-t6VNGe1eomorEgx_1c8JsLj3M0u0MpyXkkivPM,15137
+redis/exceptions.py,sha256=dQX37Qas5GJyJ-ghv-95sTllZyCrnqxyl7Xa43C7H5Y,8318
+redis/http/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+redis/http/__pycache__/__init__.cpython-310.pyc,,
+redis/http/__pycache__/http_client.cpython-310.pyc,,
+redis/http/http_client.py,sha256=7pjty24rIlrnSfedHx4X89JTL6xZtOHjDCvLjUkNq4Q,15179
+redis/lock.py,sha256=GrvPSxaOqKo7iAL2oi5ZUEPsOkxAXHVE_Tp1ejgO2fY,12760
+redis/maint_notifications.py,sha256=WHMOsZxEsfdQyn1d-x5ufy3VNywdR8aJ8vTKFkEUsFc,45738
+redis/multidb/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+redis/multidb/__pycache__/__init__.cpython-310.pyc,,
+redis/multidb/__pycache__/circuit.cpython-310.pyc,,
+redis/multidb/__pycache__/client.cpython-310.pyc,,
+redis/multidb/__pycache__/command_executor.cpython-310.pyc,,
+redis/multidb/__pycache__/config.cpython-310.pyc,,
+redis/multidb/__pycache__/database.cpython-310.pyc,,
+redis/multidb/__pycache__/event.cpython-310.pyc,,
+redis/multidb/__pycache__/exception.cpython-310.pyc,,
+redis/multidb/__pycache__/failover.cpython-310.pyc,,
+redis/multidb/__pycache__/failure_detector.cpython-310.pyc,,
+redis/multidb/circuit.py,sha256=M3VHfRfIzIIDrrYURi-qy4d-IgK44mBq8E03aWhTXtM,3856
+redis/multidb/client.py,sha256=Gpd850lk7QomDL3yKaEjP4ue4Z_MuRvsISqgl0i57hM,22909
+redis/multidb/command_executor.py,sha256=CSo8vG8EPRcJKwa8wlcvZJvKjX6fbgoqCMvzNP18wQY,12402
+redis/multidb/config.py,sha256=0cL7hy2fMEhvaIqPmVbz_u_eY3wOOzieVlojLzE1dKE,9892
+redis/multidb/database.py,sha256=Z39gpgcwbxq7eDUIApJ6OZTGR7NJ6-0ASBW1on36QgA,3665
+redis/multidb/event.py,sha256=91-8eBGXM5vD_YpQg4lqVHQBf4ZnPnHj-xdZmxft5LI,2978
+redis/multidb/exception.py,sha256=UDl3hN6C9VbHStofBNneILm9AJFZSjYVE9PZGMaT6Nk,633
+redis/multidb/failover.py,sha256=gpbfojRrUiHEedyeJpfruU3qTS01PUSqrVx8ACCDduU,3575
+redis/multidb/failure_detector.py,sha256=hwuEHAMgKLTRjQHKIHtj6iNnt533uzt-6qRieuYB3jE,3788
+redis/observability/__init__.py,sha256=zfh3E8VXMDHk3p-487ThO2sNCCX7TpuGE1F_Cdv990c,694
+redis/observability/__pycache__/__init__.cpython-310.pyc,,
+redis/observability/__pycache__/attributes.cpython-310.pyc,,
+redis/observability/__pycache__/config.cpython-310.pyc,,
+redis/observability/__pycache__/metrics.cpython-310.pyc,,
+redis/observability/__pycache__/providers.cpython-310.pyc,,
+redis/observability/__pycache__/recorder.cpython-310.pyc,,
+redis/observability/__pycache__/registry.cpython-310.pyc,,
+redis/observability/attributes.py,sha256=kc7l9LyIwjGclvroiJlGxPx5_oeS1o-kykpG_SuiXqI,13669
+redis/observability/config.py,sha256=lh1kNcNiEkhL1smEoHjGyaF4dsj2sD1kYJO0fWL8VEM,6515
+redis/observability/metrics.py,sha256=faQsUZwz2a4OC-Nq6W_fHPI2a3IEqQpGpg7dOKcqSh8,24377
+redis/observability/providers.py,sha256=6ZN1-cRmqWZ7uXRAVcDIAjExga6pUtnJD77EbVXOnI8,12287
+redis/observability/recorder.py,sha256=g2dYyEz4KZ460h3bIKz3AKCwEdWC_3Cfu29zCgofHNM,25630
+redis/observability/registry.py,sha256=nBFF2ebDrhfhdoqYzaEwrKKbZ2toOkm_lpyirDF7ZoY,2088
+redis/ocsp.py,sha256=teYSmKnCtk6B3jJLdNYbZN4OE0mxgspt2zUPbkIQzio,11452
+redis/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+redis/retry.py,sha256=ZVRvK6XzK3UQGd93DHS8zuKcQ1rKd7ZkzwuCHEyvdD0,3987
+redis/sentinel.py,sha256=DP1XtO1HRemZMamC1TFHg_hBJRv9eoQgTMlZfPYRUo8,15013
+redis/typing.py,sha256=8ZkSG2G36Nh-oFLMH7Vife2oUWQAFCBFTtG1kozm4Ck,2057
+redis/utils.py,sha256=KjcnTOVwXFEUzV5QXs8Muk1Nf_pDKFQ8JRray_aeazs,14139

+ 0 - 0
usr/local/lib/python3.10/dist-packages/redis-7.4.0.dist-info/REQUESTED


+ 4 - 0
usr/local/lib/python3.10/dist-packages/redis-7.4.0.dist-info/WHEEL

@@ -0,0 +1,4 @@
+Wheel-Version: 1.0
+Generator: hatchling 1.29.0
+Root-Is-Purelib: true
+Tag: py3-none-any

+ 21 - 0
usr/local/lib/python3.10/dist-packages/redis-7.4.0.dist-info/licenses/LICENSE

@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2022-2023, Redis, inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.

+ 91 - 0
usr/local/lib/python3.10/dist-packages/redis/__init__.py

@@ -0,0 +1,91 @@
+from redis import asyncio  # noqa
+from redis.backoff import default_backoff
+from redis.client import Redis, StrictRedis
+from redis.driver_info import DriverInfo
+from redis.cluster import RedisCluster
+from redis.connection import (
+    BlockingConnectionPool,
+    Connection,
+    ConnectionPool,
+    SSLConnection,
+    UnixDomainSocketConnection,
+)
+from redis.credentials import CredentialProvider, UsernamePasswordCredentialProvider
+from redis.exceptions import (
+    AuthenticationError,
+    AuthenticationWrongNumberOfArgsError,
+    BusyLoadingError,
+    ChildDeadlockedError,
+    ConnectionError,
+    CrossSlotTransactionError,
+    DataError,
+    InvalidPipelineStack,
+    InvalidResponse,
+    MaxConnectionsError,
+    OutOfMemoryError,
+    PubSubError,
+    ReadOnlyError,
+    RedisClusterException,
+    RedisError,
+    ResponseError,
+    TimeoutError,
+    WatchError,
+)
+from redis.sentinel import (
+    Sentinel,
+    SentinelConnectionPool,
+    SentinelManagedConnection,
+    SentinelManagedSSLConnection,
+)
+from redis.utils import from_url
+
+
+def int_or_str(value):
+    try:
+        return int(value)
+    except ValueError:
+        return value
+
+
+__version__ = "7.4.0"
+
+VERSION = tuple(map(int_or_str, __version__.split(".")))
+
+
+__all__ = [
+    "AuthenticationError",
+    "AuthenticationWrongNumberOfArgsError",
+    "BlockingConnectionPool",
+    "BusyLoadingError",
+    "ChildDeadlockedError",
+    "Connection",
+    "ConnectionError",
+    "ConnectionPool",
+    "CredentialProvider",
+    "CrossSlotTransactionError",
+    "DataError",
+    "DriverInfo",
+    "from_url",
+    "default_backoff",
+    "InvalidPipelineStack",
+    "InvalidResponse",
+    "MaxConnectionsError",
+    "OutOfMemoryError",
+    "PubSubError",
+    "ReadOnlyError",
+    "Redis",
+    "RedisCluster",
+    "RedisClusterException",
+    "RedisError",
+    "ResponseError",
+    "Sentinel",
+    "SentinelConnectionPool",
+    "SentinelManagedConnection",
+    "SentinelManagedSSLConnection",
+    "SSLConnection",
+    "UsernamePasswordCredentialProvider",
+    "StrictRedis",
+    "TimeoutError",
+    "UnixDomainSocketConnection",
+    "WatchError",
+]

binární
usr/local/lib/python3.10/dist-packages/redis/__pycache__/__init__.cpython-310.pyc


binární
usr/local/lib/python3.10/dist-packages/redis/__pycache__/background.cpython-310.pyc


binární
usr/local/lib/python3.10/dist-packages/redis/__pycache__/backoff.cpython-310.pyc


binární
usr/local/lib/python3.10/dist-packages/redis/__pycache__/cache.cpython-310.pyc


binární
usr/local/lib/python3.10/dist-packages/redis/__pycache__/client.cpython-310.pyc


binární
usr/local/lib/python3.10/dist-packages/redis/__pycache__/cluster.cpython-310.pyc


binární
usr/local/lib/python3.10/dist-packages/redis/__pycache__/connection.cpython-310.pyc


binární
usr/local/lib/python3.10/dist-packages/redis/__pycache__/crc.cpython-310.pyc


binární
usr/local/lib/python3.10/dist-packages/redis/__pycache__/credentials.cpython-310.pyc


binární
usr/local/lib/python3.10/dist-packages/redis/__pycache__/data_structure.cpython-310.pyc


binární
usr/local/lib/python3.10/dist-packages/redis/__pycache__/driver_info.cpython-310.pyc


binární
usr/local/lib/python3.10/dist-packages/redis/__pycache__/event.cpython-310.pyc


binární
usr/local/lib/python3.10/dist-packages/redis/__pycache__/exceptions.cpython-310.pyc


binární
usr/local/lib/python3.10/dist-packages/redis/__pycache__/lock.cpython-310.pyc


binární
usr/local/lib/python3.10/dist-packages/redis/__pycache__/maint_notifications.cpython-310.pyc


binární
usr/local/lib/python3.10/dist-packages/redis/__pycache__/ocsp.cpython-310.pyc


binární
usr/local/lib/python3.10/dist-packages/redis/__pycache__/retry.cpython-310.pyc


binární
usr/local/lib/python3.10/dist-packages/redis/__pycache__/sentinel.cpython-310.pyc


binární
usr/local/lib/python3.10/dist-packages/redis/__pycache__/typing.cpython-310.pyc


binární
usr/local/lib/python3.10/dist-packages/redis/__pycache__/utils.cpython-310.pyc


+ 27 - 0
usr/local/lib/python3.10/dist-packages/redis/_parsers/__init__.py

@@ -0,0 +1,27 @@
+from .base import (
+    AsyncPushNotificationsParser,
+    BaseParser,
+    PushNotificationsParser,
+    _AsyncRESPBase,
+)
+from .commands import AsyncCommandsParser, CommandsParser
+from .encoders import Encoder
+from .hiredis import _AsyncHiredisParser, _HiredisParser
+from .resp2 import _AsyncRESP2Parser, _RESP2Parser
+from .resp3 import _AsyncRESP3Parser, _RESP3Parser
+
+__all__ = [
+    "AsyncCommandsParser",
+    "_AsyncHiredisParser",
+    "_AsyncRESPBase",
+    "_AsyncRESP2Parser",
+    "_AsyncRESP3Parser",
+    "AsyncPushNotificationsParser",
+    "CommandsParser",
+    "Encoder",
+    "BaseParser",
+    "_HiredisParser",
+    "_RESP2Parser",
+    "_RESP3Parser",
+    "PushNotificationsParser",
+]

binární
usr/local/lib/python3.10/dist-packages/redis/_parsers/__pycache__/__init__.cpython-310.pyc


binární
usr/local/lib/python3.10/dist-packages/redis/_parsers/__pycache__/base.cpython-310.pyc


binární
usr/local/lib/python3.10/dist-packages/redis/_parsers/__pycache__/commands.cpython-310.pyc


binární
usr/local/lib/python3.10/dist-packages/redis/_parsers/__pycache__/encoders.cpython-310.pyc


binární
usr/local/lib/python3.10/dist-packages/redis/_parsers/__pycache__/helpers.cpython-310.pyc


binární
usr/local/lib/python3.10/dist-packages/redis/_parsers/__pycache__/hiredis.cpython-310.pyc


binární
usr/local/lib/python3.10/dist-packages/redis/_parsers/__pycache__/resp2.cpython-310.pyc


binární
usr/local/lib/python3.10/dist-packages/redis/_parsers/__pycache__/resp3.cpython-310.pyc


binární
usr/local/lib/python3.10/dist-packages/redis/_parsers/__pycache__/socket.cpython-310.pyc


+ 565 - 0
usr/local/lib/python3.10/dist-packages/redis/_parsers/base.py

@@ -0,0 +1,565 @@
+import logging
+import sys
+from abc import ABC
+from asyncio import IncompleteReadError, StreamReader, TimeoutError
+from typing import Awaitable, Callable, List, Optional, Protocol, Union
+
+from redis.maint_notifications import (
+    MaintenanceNotification,
+    NodeFailedOverNotification,
+    NodeFailingOverNotification,
+    NodeMigratedNotification,
+    NodeMigratingNotification,
+    NodeMovingNotification,
+    OSSNodeMigratedNotification,
+    OSSNodeMigratingNotification,
+)
+from redis.utils import safe_str
+
+if sys.version_info.major >= 3 and sys.version_info.minor >= 11:
+    from asyncio import timeout as async_timeout
+else:
+    from async_timeout import timeout as async_timeout
+
+from ..exceptions import (
+    AskError,
+    AuthenticationError,
+    AuthenticationWrongNumberOfArgsError,
+    BusyLoadingError,
+    ClusterCrossSlotError,
+    ClusterDownError,
+    ConnectionError,
+    ExecAbortError,
+    ExternalAuthProviderError,
+    MasterDownError,
+    ModuleError,
+    MovedError,
+    NoPermissionError,
+    NoScriptError,
+    OutOfMemoryError,
+    ReadOnlyError,
+    ResponseError,
+    TryAgainError,
+)
+from ..typing import EncodableT
+from .encoders import Encoder
+from .socket import SERVER_CLOSED_CONNECTION_ERROR, SocketBuffer
+
+MODULE_LOAD_ERROR = "Error loading the extension. Please check the server logs."
+NO_SUCH_MODULE_ERROR = "Error unloading module: no such module with that name"
+MODULE_UNLOAD_NOT_POSSIBLE_ERROR = "Error unloading module: operation not possible."
+MODULE_EXPORTS_DATA_TYPES_ERROR = (
+    "Error unloading module: the module "
+    "exports one or more module-side data "
+    "types, can't unload"
+)
+# user send an AUTH cmd to a server without authorization configured
+NO_AUTH_SET_ERROR = {
+    # Redis >= 6.0
+    "AUTH <password> called without any password "
+    "configured for the default user. Are you sure "
+    "your configuration is correct?": AuthenticationError,
+    # Redis < 6.0
+    "Client sent AUTH, but no password is set": AuthenticationError,
+}
+
+EXTERNAL_AUTH_PROVIDER_ERROR = {
+    "problem with LDAP service": ExternalAuthProviderError,
+}
+
+logger = logging.getLogger(__name__)
+
+
+class BaseParser(ABC):
+    EXCEPTION_CLASSES = {
+        "ERR": {
+            "max number of clients reached": ConnectionError,
+            "invalid password": AuthenticationError,
+            # some Redis server versions report invalid command syntax
+            # in lowercase
+            "wrong number of arguments "
+            "for 'auth' command": AuthenticationWrongNumberOfArgsError,
+            # some Redis server versions report invalid command syntax
+            # in uppercase
+            "wrong number of arguments "
+            "for 'AUTH' command": AuthenticationWrongNumberOfArgsError,
+            MODULE_LOAD_ERROR: ModuleError,
+            MODULE_EXPORTS_DATA_TYPES_ERROR: ModuleError,
+            NO_SUCH_MODULE_ERROR: ModuleError,
+            MODULE_UNLOAD_NOT_POSSIBLE_ERROR: ModuleError,
+            **NO_AUTH_SET_ERROR,
+            **EXTERNAL_AUTH_PROVIDER_ERROR,
+        },
+        "OOM": OutOfMemoryError,
+        "WRONGPASS": AuthenticationError,
+        "EXECABORT": ExecAbortError,
+        "LOADING": BusyLoadingError,
+        "NOSCRIPT": NoScriptError,
+        "READONLY": ReadOnlyError,
+        "NOAUTH": AuthenticationError,
+        "NOPERM": NoPermissionError,
+        "ASK": AskError,
+        "TRYAGAIN": TryAgainError,
+        "MOVED": MovedError,
+        "CLUSTERDOWN": ClusterDownError,
+        "CROSSSLOT": ClusterCrossSlotError,
+        "MASTERDOWN": MasterDownError,
+    }
+
+    @classmethod
+    def parse_error(cls, response):
+        "Parse an error response"
+        error_code = response.split(" ")[0]
+        if error_code in cls.EXCEPTION_CLASSES:
+            response = response[len(error_code) + 1 :]
+            exception_class = cls.EXCEPTION_CLASSES[error_code]
+            if isinstance(exception_class, dict):
+                exception_class = exception_class.get(response, ResponseError)
+            return exception_class(response, status_code=error_code)
+        return ResponseError(response)
+
+    def on_disconnect(self):
+        raise NotImplementedError()
+
+    def on_connect(self, connection):
+        raise NotImplementedError()
+
+
+class _RESPBase(BaseParser):
+    """Base class for sync-based resp parsing"""
+
+    def __init__(self, socket_read_size):
+        self.socket_read_size = socket_read_size
+        self.encoder = None
+        self._sock = None
+        self._buffer = None
+
+    def __del__(self):
+        try:
+            self.on_disconnect()
+        except Exception:
+            pass
+
+    def on_connect(self, connection):
+        "Called when the socket connects"
+        self._sock = connection._sock
+        self._buffer = SocketBuffer(
+            self._sock, self.socket_read_size, connection.socket_timeout
+        )
+        self.encoder = connection.encoder
+
+    def on_disconnect(self):
+        "Called when the socket disconnects"
+        self._sock = None
+        if self._buffer is not None:
+            self._buffer.close()
+            self._buffer = None
+        self.encoder = None
+
+    def can_read(self, timeout):
+        return self._buffer and self._buffer.can_read(timeout)
+
+
+class AsyncBaseParser(BaseParser):
+    """Base parsing class for the python-backed async parser"""
+
+    __slots__ = "_stream", "_read_size"
+
+    def __init__(self, socket_read_size: int):
+        self._stream: Optional[StreamReader] = None
+        self._read_size = socket_read_size
+
+    async def can_read_destructive(self) -> bool:
+        raise NotImplementedError()
+
+    async def read_response(
+        self, disable_decoding: bool = False
+    ) -> Union[EncodableT, ResponseError, None, List[EncodableT]]:
+        raise NotImplementedError()
+
+
+class MaintenanceNotificationsParser:
+    """Protocol defining maintenance push notification parsing functionality"""
+
+    @staticmethod
+    def parse_oss_maintenance_start_msg(response):
+        # Expected message format is:
+        # SMIGRATING <seq_number> <slot, range1-range2,...>
+        id = response[1]
+        slots = safe_str(response[2])
+        return OSSNodeMigratingNotification(id, slots)
+
+    @staticmethod
+    def parse_oss_maintenance_completed_msg(response):
+        # Expected message format is:
+        # SMIGRATED <seq_number> [[<src_host:port> <dest_host:port> <slot_range>], ...]
+        id = response[1]
+        nodes_to_slots_mapping_data = response[2]
+        # Build the nodes_to_slots_mapping dict structure:
+        # {
+        #     "src_host:port": [
+        #         {"dest_host:port": "slot_range"},
+        #         ...
+        #     ],
+        #     ...
+        # }
+        nodes_to_slots_mapping = {}
+        for src_node, dest_node, slots in nodes_to_slots_mapping_data:
+            src_node_str = safe_str(src_node)
+            dest_node_str = safe_str(dest_node)
+            slots_str = safe_str(slots)
+
+            if src_node_str not in nodes_to_slots_mapping:
+                nodes_to_slots_mapping[src_node_str] = []
+            nodes_to_slots_mapping[src_node_str].append({dest_node_str: slots_str})
+
+        return OSSNodeMigratedNotification(id, nodes_to_slots_mapping)
+
+    @staticmethod
+    def parse_maintenance_start_msg(response, notification_type):
+        # Expected message format is: <notification_type> <seq_number> <time>
+        # Examples:
+        # MIGRATING 1 10
+        # FAILING_OVER 2 20
+        id = response[1]
+        ttl = response[2]
+        return notification_type(id, ttl)
+
+    @staticmethod
+    def parse_maintenance_completed_msg(response, notification_type):
+        # Expected message format is: <notification_type> <seq_number>
+        # Examples:
+        # MIGRATED 1
+        # FAILED_OVER 2
+        id = response[1]
+        return notification_type(id)
+
+    @staticmethod
+    def parse_moving_msg(response):
+        # Expected message format is: MOVING <seq_number> <time> <endpoint>
+        id = response[1]
+        ttl = response[2]
+        if response[3] is None:
+            host, port = None, None
+        else:
+            value = safe_str(response[3])
+            host, port = value.split(":")
+            port = int(port) if port is not None else None
+
+        return NodeMovingNotification(id, host, port, ttl)
+
+
+_INVALIDATION_MESSAGE = "invalidate"
+_MOVING_MESSAGE = "MOVING"
+_MIGRATING_MESSAGE = "MIGRATING"
+_MIGRATED_MESSAGE = "MIGRATED"
+_FAILING_OVER_MESSAGE = "FAILING_OVER"
+_FAILED_OVER_MESSAGE = "FAILED_OVER"
+_SMIGRATING_MESSAGE = "SMIGRATING"
+_SMIGRATED_MESSAGE = "SMIGRATED"
+
+_MAINTENANCE_MESSAGES = (
+    _MIGRATING_MESSAGE,
+    _MIGRATED_MESSAGE,
+    _FAILING_OVER_MESSAGE,
+    _FAILED_OVER_MESSAGE,
+    _SMIGRATING_MESSAGE,
+)
+
+MSG_TYPE_TO_MAINT_NOTIFICATION_PARSER_MAPPING: dict[
+    str, tuple[type[MaintenanceNotification], Callable]
+] = {
+    _MIGRATING_MESSAGE: (
+        NodeMigratingNotification,
+        MaintenanceNotificationsParser.parse_maintenance_start_msg,
+    ),
+    _MIGRATED_MESSAGE: (
+        NodeMigratedNotification,
+        MaintenanceNotificationsParser.parse_maintenance_completed_msg,
+    ),
+    _FAILING_OVER_MESSAGE: (
+        NodeFailingOverNotification,
+        MaintenanceNotificationsParser.parse_maintenance_start_msg,
+    ),
+    _FAILED_OVER_MESSAGE: (
+        NodeFailedOverNotification,
+        MaintenanceNotificationsParser.parse_maintenance_completed_msg,
+    ),
+    _MOVING_MESSAGE: (
+        NodeMovingNotification,
+        MaintenanceNotificationsParser.parse_moving_msg,
+    ),
+    _SMIGRATING_MESSAGE: (
+        OSSNodeMigratingNotification,
+        MaintenanceNotificationsParser.parse_oss_maintenance_start_msg,
+    ),
+    _SMIGRATED_MESSAGE: (
+        OSSNodeMigratedNotification,
+        MaintenanceNotificationsParser.parse_oss_maintenance_completed_msg,
+    ),
+}
+
+
+class PushNotificationsParser(Protocol):
+    """Protocol defining RESP3-specific parsing functionality"""
+
+    pubsub_push_handler_func: Callable
+    invalidation_push_handler_func: Optional[Callable] = None
+    node_moving_push_handler_func: Optional[Callable] = None
+    maintenance_push_handler_func: Optional[Callable] = None
+    oss_cluster_maint_push_handler_func: Optional[Callable] = None
+
+    def handle_pubsub_push_response(self, response):
+        """Handle pubsub push responses"""
+        raise NotImplementedError()
+
+    def handle_push_response(self, response, **kwargs):
+        msg_type = response[0]
+        if isinstance(msg_type, bytes):
+            msg_type = msg_type.decode()
+
+        if msg_type not in (
+            _INVALIDATION_MESSAGE,
+            *_MAINTENANCE_MESSAGES,
+            _MOVING_MESSAGE,
+            _SMIGRATED_MESSAGE,
+        ):
+            return self.pubsub_push_handler_func(response)
+
+        try:
+            if (
+                msg_type == _INVALIDATION_MESSAGE
+                and self.invalidation_push_handler_func
+            ):
+                return self.invalidation_push_handler_func(response)
+
+            if msg_type == _MOVING_MESSAGE and self.node_moving_push_handler_func:
+                parser_function = MSG_TYPE_TO_MAINT_NOTIFICATION_PARSER_MAPPING[
+                    msg_type
+                ][1]
+
+                notification = parser_function(response)
+                return self.node_moving_push_handler_func(notification)
+
+            if msg_type in _MAINTENANCE_MESSAGES and self.maintenance_push_handler_func:
+                parser_function = MSG_TYPE_TO_MAINT_NOTIFICATION_PARSER_MAPPING[
+                    msg_type
+                ][1]
+                if msg_type == _SMIGRATING_MESSAGE:
+                    notification = parser_function(response)
+                else:
+                    notification_type = MSG_TYPE_TO_MAINT_NOTIFICATION_PARSER_MAPPING[
+                        msg_type
+                    ][0]
+                    notification = parser_function(response, notification_type)
+
+                if notification is not None:
+                    return self.maintenance_push_handler_func(notification)
+            if msg_type == _SMIGRATED_MESSAGE and (
+                self.oss_cluster_maint_push_handler_func
+                or self.maintenance_push_handler_func
+            ):
+                parser_function = MSG_TYPE_TO_MAINT_NOTIFICATION_PARSER_MAPPING[
+                    msg_type
+                ][1]
+                notification = parser_function(response)
+
+                if notification is not None:
+                    if self.maintenance_push_handler_func:
+                        self.maintenance_push_handler_func(notification)
+                    if self.oss_cluster_maint_push_handler_func:
+                        self.oss_cluster_maint_push_handler_func(notification)
+        except Exception as e:
+            logger.error(
+                "Error handling {} message ({}): {}".format(msg_type, response, e)
+            )
+
+        return None
+
+    def set_pubsub_push_handler(self, pubsub_push_handler_func):
+        self.pubsub_push_handler_func = pubsub_push_handler_func
+
+    def set_invalidation_push_handler(self, invalidation_push_handler_func):
+        self.invalidation_push_handler_func = invalidation_push_handler_func
+
+    def set_node_moving_push_handler(self, node_moving_push_handler_func):
+        self.node_moving_push_handler_func = node_moving_push_handler_func
+
+    def set_maintenance_push_handler(self, maintenance_push_handler_func):
+        self.maintenance_push_handler_func = maintenance_push_handler_func
+
+    def set_oss_cluster_maint_push_handler(self, oss_cluster_maint_push_handler_func):
+        self.oss_cluster_maint_push_handler_func = oss_cluster_maint_push_handler_func
+
+
+class AsyncPushNotificationsParser(Protocol):
+    """Protocol defining async RESP3-specific parsing functionality"""
+
+    pubsub_push_handler_func: Callable
+    invalidation_push_handler_func: Optional[Callable] = None
+    node_moving_push_handler_func: Optional[Callable[..., Awaitable[None]]] = None
+    maintenance_push_handler_func: Optional[Callable[..., Awaitable[None]]] = None
+    oss_cluster_maint_push_handler_func: Optional[Callable[..., Awaitable[None]]] = None
+
+    async def handle_pubsub_push_response(self, response):
+        """Handle pubsub push responses asynchronously"""
+        raise NotImplementedError()
+
+    async def handle_push_response(self, response, **kwargs):
+        """Handle push responses asynchronously"""
+
+        msg_type = response[0]
+        if isinstance(msg_type, bytes):
+            msg_type = msg_type.decode()
+
+        if msg_type not in (
+            _INVALIDATION_MESSAGE,
+            *_MAINTENANCE_MESSAGES,
+            _MOVING_MESSAGE,
+            _SMIGRATED_MESSAGE,
+        ):
+            return await self.pubsub_push_handler_func(response)
+
+        try:
+            if (
+                msg_type == _INVALIDATION_MESSAGE
+                and self.invalidation_push_handler_func
+            ):
+                return await self.invalidation_push_handler_func(response)
+
+            if isinstance(msg_type, bytes):
+                msg_type = msg_type.decode()
+
+            if msg_type == _MOVING_MESSAGE and self.node_moving_push_handler_func:
+                parser_function = MSG_TYPE_TO_MAINT_NOTIFICATION_PARSER_MAPPING[
+                    msg_type
+                ][1]
+                notification = parser_function(response)
+                return await self.node_moving_push_handler_func(notification)
+
+            if msg_type in _MAINTENANCE_MESSAGES and self.maintenance_push_handler_func:
+                parser_function = MSG_TYPE_TO_MAINT_NOTIFICATION_PARSER_MAPPING[
+                    msg_type
+                ][1]
+                if msg_type == _SMIGRATING_MESSAGE:
+                    notification = parser_function(response)
+                else:
+                    notification_type = MSG_TYPE_TO_MAINT_NOTIFICATION_PARSER_MAPPING[
+                        msg_type
+                    ][0]
+                    notification = parser_function(response, notification_type)
+
+                if notification is not None:
+                    return await self.maintenance_push_handler_func(notification)
+            if (
+                msg_type == _SMIGRATED_MESSAGE
+                and self.oss_cluster_maint_push_handler_func
+            ):
+                parser_function = MSG_TYPE_TO_MAINT_NOTIFICATION_PARSER_MAPPING[
+                    msg_type
+                ][1]
+                notification = parser_function(response)
+                if notification is not None:
+                    return await self.oss_cluster_maint_push_handler_func(notification)
+        except Exception as e:
+            logger.error(
+                "Error handling {} message ({}): {}".format(msg_type, response, e)
+            )
+
+        return None
+
+    def set_pubsub_push_handler(self, pubsub_push_handler_func):
+        """Set the pubsub push handler function"""
+        self.pubsub_push_handler_func = pubsub_push_handler_func
+
+    def set_invalidation_push_handler(self, invalidation_push_handler_func):
+        """Set the invalidation push handler function"""
+        self.invalidation_push_handler_func = invalidation_push_handler_func
+
+    def set_node_moving_push_handler(self, node_moving_push_handler_func):
+        self.node_moving_push_handler_func = node_moving_push_handler_func
+
+    def set_maintenance_push_handler(self, maintenance_push_handler_func):
+        self.maintenance_push_handler_func = maintenance_push_handler_func
+
+    def set_oss_cluster_maint_push_handler(self, oss_cluster_maint_push_handler_func):
+        self.oss_cluster_maint_push_handler_func = oss_cluster_maint_push_handler_func
+
+
+class _AsyncRESPBase(AsyncBaseParser):
+    """Base class for async resp parsing"""
+
+    __slots__ = AsyncBaseParser.__slots__ + ("encoder", "_buffer", "_pos", "_chunks")
+
+    def __init__(self, socket_read_size: int):
+        super().__init__(socket_read_size)
+        self.encoder: Optional[Encoder] = None
+        self._buffer = b""
+        self._chunks = []
+        self._pos = 0
+
+    def _clear(self):
+        self._buffer = b""
+        self._chunks.clear()
+
+    def on_connect(self, connection):
+        """Called when the stream connects"""
+        self._stream = connection._reader
+        if self._stream is None:
+            raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
+        self.encoder = connection.encoder
+        self._clear()
+        self._connected = True
+
+    def on_disconnect(self):
+        """Called when the stream disconnects"""
+        self._connected = False
+
+    async def can_read_destructive(self) -> bool:
+        if not self._connected:
+            raise OSError("Buffer is closed.")
+        if self._buffer:
+            return True
+        try:
+            async with async_timeout(0):
+                return self._stream.at_eof()
+        except TimeoutError:
+            return False
+
+    async def _read(self, length: int) -> bytes:
+        """
+        Read `length` bytes of data.  These are assumed to be followed
+        by a '\r\n' terminator which is subsequently discarded.
+        """
+        want = length + 2
+        end = self._pos + want
+        if len(self._buffer) >= end:
+            result = self._buffer[self._pos : end - 2]
+        else:
+            tail = self._buffer[self._pos :]
+            try:
+                data = await self._stream.readexactly(want - len(tail))
+            except IncompleteReadError as error:
+                raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) from error
+            result = (tail + data)[:-2]
+            self._chunks.append(data)
+        self._pos += want
+        return result
+
+    async def _readline(self) -> bytes:
+        """
+        read an unknown number of bytes up to the next '\r\n'
+        line separator, which is discarded.
+        """
+        found = self._buffer.find(b"\r\n", self._pos)
+        if found >= 0:
+            result = self._buffer[self._pos : found]
+        else:
+            tail = self._buffer[self._pos :]
+            data = await self._stream.readline()
+            if not data.endswith(b"\r\n"):
+                raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
+            result = (tail + data)[:-2]
+            self._chunks.append(data)
+        self._pos += len(result) + 2
+        return result

+ 692 - 0
usr/local/lib/python3.10/dist-packages/redis/_parsers/commands.py

@@ -0,0 +1,692 @@
+from enum import Enum
+from typing import TYPE_CHECKING, Any, Awaitable, Dict, Optional, Tuple, Union
+
+from redis.exceptions import IncorrectPolicyType, RedisError, ResponseError
+from redis.utils import str_if_bytes
+
+if TYPE_CHECKING:
+    from redis.asyncio.cluster import ClusterNode
+
+
+class RequestPolicy(Enum):
+    ALL_NODES = "all_nodes"
+    ALL_SHARDS = "all_shards"
+    ALL_REPLICAS = "all_replicas"
+    MULTI_SHARD = "multi_shard"
+    SPECIAL = "special"
+    DEFAULT_KEYLESS = "default_keyless"
+    DEFAULT_KEYED = "default_keyed"
+    DEFAULT_NODE = "default_node"
+
+
+class ResponsePolicy(Enum):
+    ONE_SUCCEEDED = "one_succeeded"
+    ALL_SUCCEEDED = "all_succeeded"
+    AGG_LOGICAL_AND = "agg_logical_and"
+    AGG_LOGICAL_OR = "agg_logical_or"
+    AGG_MIN = "agg_min"
+    AGG_MAX = "agg_max"
+    AGG_SUM = "agg_sum"
+    SPECIAL = "special"
+    DEFAULT_KEYLESS = "default_keyless"
+    DEFAULT_KEYED = "default_keyed"
+
+
+class CommandPolicies:
+    def __init__(
+        self,
+        request_policy: RequestPolicy = RequestPolicy.DEFAULT_KEYLESS,
+        response_policy: ResponsePolicy = ResponsePolicy.DEFAULT_KEYLESS,
+    ):
+        self.request_policy = request_policy
+        self.response_policy = response_policy
+
+
+PolicyRecords = dict[str, dict[str, CommandPolicies]]
+
+
+class AbstractCommandsParser:
+    def _get_pubsub_keys(self, *args):
+        """
+        Get the keys from pubsub command.
+        Although PubSub commands have predetermined key locations, they are not
+        supported in the 'COMMAND's output, so the key positions are hardcoded
+        in this method
+        """
+        if len(args) < 2:
+            # The command has no keys in it
+            return None
+        args = [str_if_bytes(arg) for arg in args]
+        command = args[0].upper()
+        keys = None
+        if command == "PUBSUB":
+            # the second argument is a part of the command name, e.g.
+            # ['PUBSUB', 'NUMSUB', 'foo'].
+            pubsub_type = args[1].upper()
+            if pubsub_type in ["CHANNELS", "NUMSUB", "SHARDCHANNELS", "SHARDNUMSUB"]:
+                keys = args[2:]
+        elif command in ["SUBSCRIBE", "PSUBSCRIBE", "UNSUBSCRIBE", "PUNSUBSCRIBE"]:
+            # format example:
+            # SUBSCRIBE channel [channel ...]
+            keys = list(args[1:])
+        elif command in ["PUBLISH", "SPUBLISH"]:
+            # format example:
+            # PUBLISH channel message
+            keys = [args[1]]
+        return keys
+
+    def parse_subcommand(self, command, **options):
+        cmd_dict = {}
+        cmd_name = str_if_bytes(command[0])
+        cmd_dict["name"] = cmd_name
+        cmd_dict["arity"] = int(command[1])
+        cmd_dict["flags"] = [str_if_bytes(flag) for flag in command[2]]
+        cmd_dict["first_key_pos"] = command[3]
+        cmd_dict["last_key_pos"] = command[4]
+        cmd_dict["step_count"] = command[5]
+        if len(command) > 7:
+            cmd_dict["tips"] = command[7]
+            cmd_dict["key_specifications"] = command[8]
+            cmd_dict["subcommands"] = command[9]
+        return cmd_dict
+
+
+class CommandsParser(AbstractCommandsParser):
+    """
+    Parses Redis commands to get command keys.
+    COMMAND output is used to determine key locations.
+    Commands that do not have a predefined key location are flagged with
+    'movablekeys', and these commands' keys are determined by the command
+    'COMMAND GETKEYS'.
+    """
+
+    def __init__(self, redis_connection):
+        self.commands = {}
+        self.redis_connection = redis_connection
+        self.initialize(self.redis_connection)
+
+    def initialize(self, r):
+        commands = r.command()
+        uppercase_commands = []
+        for cmd in commands:
+            if any(x.isupper() for x in cmd):
+                uppercase_commands.append(cmd)
+        for cmd in uppercase_commands:
+            commands[cmd.lower()] = commands.pop(cmd)
+        self.commands = commands
+
+    # As soon as this PR is merged into Redis, we should reimplement
+    # our logic to use COMMAND INFO changes to determine the key positions
+    # https://github.com/redis/redis/pull/8324
+    def get_keys(self, redis_conn, *args):
+        """
+        Get the keys from the passed command.
+
+        NOTE: Due to a bug in redis<7.0, this function does not work properly
+        for EVAL or EVALSHA when the `numkeys` arg is 0.
+         - issue: https://github.com/redis/redis/issues/9493
+         - fix: https://github.com/redis/redis/pull/9733
+
+        So, don't use this function with EVAL or EVALSHA.
+        """
+        if len(args) < 2:
+            # The command has no keys in it
+            return None
+
+        cmd_name = args[0].lower()
+        if cmd_name not in self.commands:
+            # try to split the command name and to take only the main command,
+            # e.g. 'memory' for 'memory usage'
+            cmd_name_split = cmd_name.split()
+            cmd_name = cmd_name_split[0]
+            if cmd_name in self.commands:
+                # save the splitted command to args
+                args = cmd_name_split + list(args[1:])
+            else:
+                # We'll try to reinitialize the commands cache, if the engine
+                # version has changed, the commands may not be current
+                self.initialize(redis_conn)
+                if cmd_name not in self.commands:
+                    raise RedisError(
+                        f"{cmd_name.upper()} command doesn't exist in Redis commands"
+                    )
+
+        command = self.commands.get(cmd_name)
+        if "movablekeys" in command["flags"]:
+            keys = self._get_moveable_keys(redis_conn, *args)
+        elif "pubsub" in command["flags"] or command["name"] == "pubsub":
+            keys = self._get_pubsub_keys(*args)
+        else:
+            if (
+                command["step_count"] == 0
+                and command["first_key_pos"] == 0
+                and command["last_key_pos"] == 0
+            ):
+                is_subcmd = False
+                if "subcommands" in command:
+                    subcmd_name = f"{cmd_name}|{args[1].lower()}"
+                    for subcmd in command["subcommands"]:
+                        if str_if_bytes(subcmd[0]) == subcmd_name:
+                            command = self.parse_subcommand(subcmd)
+
+                            if command["first_key_pos"] > 0:
+                                is_subcmd = True
+
+                # The command doesn't have keys in it
+                if not is_subcmd:
+                    return None
+            last_key_pos = command["last_key_pos"]
+            if last_key_pos < 0:
+                last_key_pos = len(args) - abs(last_key_pos)
+            keys_pos = list(
+                range(command["first_key_pos"], last_key_pos + 1, command["step_count"])
+            )
+            keys = [args[pos] for pos in keys_pos]
+
+        return keys
+
+    def _get_moveable_keys(self, redis_conn, *args):
+        """
+        NOTE: Due to a bug in redis<7.0, this function does not work properly
+        for EVAL or EVALSHA when the `numkeys` arg is 0.
+         - issue: https://github.com/redis/redis/issues/9493
+         - fix: https://github.com/redis/redis/pull/9733
+
+        So, don't use this function with EVAL or EVALSHA.
+        """
+        # The command name should be splitted into separate arguments,
+        # e.g. 'MEMORY USAGE' will be splitted into ['MEMORY', 'USAGE']
+        pieces = args[0].split() + list(args[1:])
+        try:
+            keys = redis_conn.execute_command("COMMAND GETKEYS", *pieces)
+        except ResponseError as e:
+            message = e.__str__()
+            if (
+                "Invalid arguments" in message
+                or "The command has no key arguments" in message
+            ):
+                return None
+            else:
+                raise e
+        return keys
+
+    def _is_keyless_command(
+        self, command_name: str, subcommand_name: Optional[str] = None
+    ) -> bool:
+        """
+        Determines whether a given command or subcommand is considered "keyless".
+
+        A keyless command does not operate on specific keys, which is determined based
+        on the first key position in the command or subcommand details. If the command
+        or subcommand's first key position is zero or negative, it is treated as keyless.
+
+        Parameters:
+            command_name: str
+                The name of the command to check.
+            subcommand_name: Optional[str], default=None
+                The name of the subcommand to check, if applicable. If not provided,
+                the check is performed only on the command.
+
+        Returns:
+            bool
+                True if the specified command or subcommand is considered keyless,
+                False otherwise.
+
+        Raises:
+            ValueError
+                If the specified subcommand is not found within the command or the
+                specified command does not exist in the available commands.
+        """
+        if subcommand_name:
+            for subcommand in self.commands.get(command_name)["subcommands"]:
+                if str_if_bytes(subcommand[0]) == subcommand_name:
+                    parsed_subcmd = self.parse_subcommand(subcommand)
+                    return parsed_subcmd["first_key_pos"] <= 0
+            raise ValueError(
+                f"Subcommand {subcommand_name} not found in command {command_name}"
+            )
+        else:
+            command_details = self.commands.get(command_name, None)
+            if command_details is not None:
+                return command_details["first_key_pos"] <= 0
+
+            raise ValueError(f"Command {command_name} not found in commands")
+
+    def get_command_policies(self) -> PolicyRecords:
+        """
+        Retrieve and process the command policies for all commands and subcommands.
+
+        This method traverses through commands and subcommands, extracting policy details
+        from associated data structures and constructing a dictionary of commands with their
+        associated policies. It supports nested data structures and handles both main commands
+        and their subcommands.
+
+        Returns:
+            PolicyRecords: A collection of commands and subcommands associated with their
+            respective policies.
+
+        Raises:
+            IncorrectPolicyType: If an invalid policy type is encountered during policy extraction.
+        """
+        command_with_policies = {}
+
+        def extract_policies(data, module_name, command_name):
+            """
+            Recursively extract policies from nested data structures.
+
+            Args:
+                data: The data structure to search (can be list, dict, str, bytes, etc.)
+                command_name: The command name to associate with found policies
+            """
+            if isinstance(data, (str, bytes)):
+                # Decode bytes to string if needed
+                policy = str_if_bytes(data.decode())
+
+                # Check if this is a policy string
+                if policy.startswith("request_policy") or policy.startswith(
+                    "response_policy"
+                ):
+                    if policy.startswith("request_policy"):
+                        policy_type = policy.split(":")[1]
+
+                        try:
+                            command_with_policies[module_name][
+                                command_name
+                            ].request_policy = RequestPolicy(policy_type)
+                        except ValueError:
+                            raise IncorrectPolicyType(
+                                f"Incorrect request policy type: {policy_type}"
+                            )
+
+                    if policy.startswith("response_policy"):
+                        policy_type = policy.split(":")[1]
+
+                        try:
+                            command_with_policies[module_name][
+                                command_name
+                            ].response_policy = ResponsePolicy(policy_type)
+                        except ValueError:
+                            raise IncorrectPolicyType(
+                                f"Incorrect response policy type: {policy_type}"
+                            )
+
+            elif isinstance(data, list):
+                # For lists, recursively process each element
+                for item in data:
+                    extract_policies(item, module_name, command_name)
+
+            elif isinstance(data, dict):
+                # For dictionaries, recursively process each value
+                for value in data.values():
+                    extract_policies(value, module_name, command_name)
+
+        for command, details in self.commands.items():
+            # Check whether the command has keys
+            is_keyless = self._is_keyless_command(command)
+
+            if is_keyless:
+                default_request_policy = RequestPolicy.DEFAULT_KEYLESS
+                default_response_policy = ResponsePolicy.DEFAULT_KEYLESS
+            else:
+                default_request_policy = RequestPolicy.DEFAULT_KEYED
+                default_response_policy = ResponsePolicy.DEFAULT_KEYED
+
+            # Check if it's a core or module command
+            split_name = command.split(".")
+
+            if len(split_name) > 1:
+                module_name = split_name[0]
+                command_name = split_name[1]
+            else:
+                module_name = "core"
+                command_name = split_name[0]
+
+            # Create a CommandPolicies object with default policies on the new command.
+            if command_with_policies.get(module_name, None) is None:
+                command_with_policies[module_name] = {
+                    command_name: CommandPolicies(
+                        request_policy=default_request_policy,
+                        response_policy=default_response_policy,
+                    )
+                }
+            else:
+                command_with_policies[module_name][command_name] = CommandPolicies(
+                    request_policy=default_request_policy,
+                    response_policy=default_response_policy,
+                )
+
+            tips = details.get("tips")
+            subcommands = details.get("subcommands")
+
+            # Process tips for the main command
+            if tips:
+                extract_policies(tips, module_name, command_name)
+
+            # Process subcommands
+            if subcommands:
+                for subcommand_details in subcommands:
+                    # Get the subcommand name (first element)
+                    subcmd_name = subcommand_details[0]
+                    if isinstance(subcmd_name, bytes):
+                        subcmd_name = subcmd_name.decode()
+
+                    # Check whether the subcommand has keys
+                    is_keyless = self._is_keyless_command(command, subcmd_name)
+
+                    if is_keyless:
+                        default_request_policy = RequestPolicy.DEFAULT_KEYLESS
+                        default_response_policy = ResponsePolicy.DEFAULT_KEYLESS
+                    else:
+                        default_request_policy = RequestPolicy.DEFAULT_KEYED
+                        default_response_policy = ResponsePolicy.DEFAULT_KEYED
+
+                    subcmd_name = subcmd_name.replace("|", " ")
+
+                    # Create a CommandPolicies object with default policies on the new command.
+                    command_with_policies[module_name][subcmd_name] = CommandPolicies(
+                        request_policy=default_request_policy,
+                        response_policy=default_response_policy,
+                    )
+
+                    # Recursively extract policies from the rest of the subcommand details
+                    for subcommand_detail in subcommand_details[1:]:
+                        extract_policies(subcommand_detail, module_name, subcmd_name)
+
+        return command_with_policies
+
+
+class AsyncCommandsParser(AbstractCommandsParser):
+    """
+    Parses Redis commands to get command keys.
+
+    COMMAND output is used to determine key locations.
+    Commands that do not have a predefined key location are flagged with 'movablekeys',
+    and these commands' keys are determined by the command 'COMMAND GETKEYS'.
+
+    NOTE: Due to a bug in redis<7.0, this does not work properly
+    for EVAL or EVALSHA when the `numkeys` arg is 0.
+     - issue: https://github.com/redis/redis/issues/9493
+     - fix: https://github.com/redis/redis/pull/9733
+
+    So, don't use this with EVAL or EVALSHA.
+    """
+
+    __slots__ = ("commands", "node")
+
+    def __init__(self) -> None:
+        self.commands: Dict[str, Union[int, Dict[str, Any]]] = {}
+
+    async def initialize(self, node: Optional["ClusterNode"] = None) -> None:
+        if node:
+            self.node = node
+
+        commands = await self.node.execute_command("COMMAND")
+        self.commands = {cmd.lower(): command for cmd, command in commands.items()}
+
+    # As soon as this PR is merged into Redis, we should reimplement
+    # our logic to use COMMAND INFO changes to determine the key positions
+    # https://github.com/redis/redis/pull/8324
+    async def get_keys(self, *args: Any) -> Optional[Tuple[str, ...]]:
+        """
+        Get the keys from the passed command.
+
+        NOTE: Due to a bug in redis<7.0, this function does not work properly
+        for EVAL or EVALSHA when the `numkeys` arg is 0.
+         - issue: https://github.com/redis/redis/issues/9493
+         - fix: https://github.com/redis/redis/pull/9733
+
+        So, don't use this function with EVAL or EVALSHA.
+        """
+        if len(args) < 2:
+            # The command has no keys in it
+            return None
+
+        cmd_name = args[0].lower()
+        if cmd_name not in self.commands:
+            # try to split the command name and to take only the main command,
+            # e.g. 'memory' for 'memory usage'
+            cmd_name_split = cmd_name.split()
+            cmd_name = cmd_name_split[0]
+            if cmd_name in self.commands:
+                # save the splitted command to args
+                args = cmd_name_split + list(args[1:])
+            else:
+                # We'll try to reinitialize the commands cache, if the engine
+                # version has changed, the commands may not be current
+                await self.initialize()
+                if cmd_name not in self.commands:
+                    raise RedisError(
+                        f"{cmd_name.upper()} command doesn't exist in Redis commands"
+                    )
+
+        command = self.commands.get(cmd_name)
+        if "movablekeys" in command["flags"]:
+            keys = await self._get_moveable_keys(*args)
+        elif "pubsub" in command["flags"] or command["name"] == "pubsub":
+            keys = self._get_pubsub_keys(*args)
+        else:
+            if (
+                command["step_count"] == 0
+                and command["first_key_pos"] == 0
+                and command["last_key_pos"] == 0
+            ):
+                is_subcmd = False
+                if "subcommands" in command:
+                    subcmd_name = f"{cmd_name}|{args[1].lower()}"
+                    for subcmd in command["subcommands"]:
+                        if str_if_bytes(subcmd[0]) == subcmd_name:
+                            command = self.parse_subcommand(subcmd)
+
+                            if command["first_key_pos"] > 0:
+                                is_subcmd = True
+
+                # The command doesn't have keys in it
+                if not is_subcmd:
+                    return None
+            last_key_pos = command["last_key_pos"]
+            if last_key_pos < 0:
+                last_key_pos = len(args) - abs(last_key_pos)
+            keys_pos = list(
+                range(command["first_key_pos"], last_key_pos + 1, command["step_count"])
+            )
+            keys = [args[pos] for pos in keys_pos]
+
+        return keys
+
+    async def _get_moveable_keys(self, *args: Any) -> Optional[Tuple[str, ...]]:
+        try:
+            keys = await self.node.execute_command("COMMAND GETKEYS", *args)
+        except ResponseError as e:
+            message = e.__str__()
+            if (
+                "Invalid arguments" in message
+                or "The command has no key arguments" in message
+            ):
+                return None
+            else:
+                raise e
+        return keys
+
+    async def _is_keyless_command(
+        self, command_name: str, subcommand_name: Optional[str] = None
+    ) -> bool:
+        """
+        Determines whether a given command or subcommand is considered "keyless".
+
+        A keyless command does not operate on specific keys, which is determined based
+        on the first key position in the command or subcommand details. If the command
+        or subcommand's first key position is zero or negative, it is treated as keyless.
+
+        Parameters:
+            command_name: str
+                The name of the command to check.
+            subcommand_name: Optional[str], default=None
+                The name of the subcommand to check, if applicable. If not provided,
+                the check is performed only on the command.
+
+        Returns:
+            bool
+                True if the specified command or subcommand is considered keyless,
+                False otherwise.
+
+        Raises:
+            ValueError
+                If the specified subcommand is not found within the command or the
+                specified command does not exist in the available commands.
+        """
+        if subcommand_name:
+            for subcommand in self.commands.get(command_name)["subcommands"]:
+                if str_if_bytes(subcommand[0]) == subcommand_name:
+                    parsed_subcmd = self.parse_subcommand(subcommand)
+                    return parsed_subcmd["first_key_pos"] <= 0
+            raise ValueError(
+                f"Subcommand {subcommand_name} not found in command {command_name}"
+            )
+        else:
+            command_details = self.commands.get(command_name, None)
+            if command_details is not None:
+                return command_details["first_key_pos"] <= 0
+
+            raise ValueError(f"Command {command_name} not found in commands")
+
+    async def get_command_policies(self) -> Awaitable[PolicyRecords]:
+        """
+        Retrieve and process the command policies for all commands and subcommands.
+
+        This method traverses through commands and subcommands, extracting policy details
+        from associated data structures and constructing a dictionary of commands with their
+        associated policies. It supports nested data structures and handles both main commands
+        and their subcommands.
+
+        Returns:
+            PolicyRecords: A collection of commands and subcommands associated with their
+            respective policies.
+
+        Raises:
+            IncorrectPolicyType: If an invalid policy type is encountered during policy extraction.
+        """
+        command_with_policies = {}
+
+        def extract_policies(data, module_name, command_name):
+            """
+            Recursively extract policies from nested data structures.
+
+            Args:
+                data: The data structure to search (can be list, dict, str, bytes, etc.)
+                command_name: The command name to associate with found policies
+            """
+            if isinstance(data, (str, bytes)):
+                # Decode bytes to string if needed
+                policy = str_if_bytes(data.decode())
+
+                # Check if this is a policy string
+                if policy.startswith("request_policy") or policy.startswith(
+                    "response_policy"
+                ):
+                    if policy.startswith("request_policy"):
+                        policy_type = policy.split(":")[1]
+
+                        try:
+                            command_with_policies[module_name][
+                                command_name
+                            ].request_policy = RequestPolicy(policy_type)
+                        except ValueError:
+                            raise IncorrectPolicyType(
+                                f"Incorrect request policy type: {policy_type}"
+                            )
+
+                    if policy.startswith("response_policy"):
+                        policy_type = policy.split(":")[1]
+
+                        try:
+                            command_with_policies[module_name][
+                                command_name
+                            ].response_policy = ResponsePolicy(policy_type)
+                        except ValueError:
+                            raise IncorrectPolicyType(
+                                f"Incorrect response policy type: {policy_type}"
+                            )
+
+            elif isinstance(data, list):
+                # For lists, recursively process each element
+                for item in data:
+                    extract_policies(item, module_name, command_name)
+
+            elif isinstance(data, dict):
+                # For dictionaries, recursively process each value
+                for value in data.values():
+                    extract_policies(value, module_name, command_name)
+
+        for command, details in self.commands.items():
+            # Check whether the command has keys
+            is_keyless = await self._is_keyless_command(command)
+
+            if is_keyless:
+                default_request_policy = RequestPolicy.DEFAULT_KEYLESS
+                default_response_policy = ResponsePolicy.DEFAULT_KEYLESS
+            else:
+                default_request_policy = RequestPolicy.DEFAULT_KEYED
+                default_response_policy = ResponsePolicy.DEFAULT_KEYED
+
+            # Check if it's a core or module command
+            split_name = command.split(".")
+
+            if len(split_name) > 1:
+                module_name = split_name[0]
+                command_name = split_name[1]
+            else:
+                module_name = "core"
+                command_name = split_name[0]
+
+            # Create a CommandPolicies object with default policies on the new command.
+            if command_with_policies.get(module_name, None) is None:
+                command_with_policies[module_name] = {
+                    command_name: CommandPolicies(
+                        request_policy=default_request_policy,
+                        response_policy=default_response_policy,
+                    )
+                }
+            else:
+                command_with_policies[module_name][command_name] = CommandPolicies(
+                    request_policy=default_request_policy,
+                    response_policy=default_response_policy,
+                )
+
+            tips = details.get("tips")
+            subcommands = details.get("subcommands")
+
+            # Process tips for the main command
+            if tips:
+                extract_policies(tips, module_name, command_name)
+
+            # Process subcommands
+            if subcommands:
+                for subcommand_details in subcommands:
+                    # Get the subcommand name (first element)
+                    subcmd_name = subcommand_details[0]
+                    if isinstance(subcmd_name, bytes):
+                        subcmd_name = subcmd_name.decode()
+
+                    # Check whether the subcommand has keys
+                    is_keyless = await self._is_keyless_command(command, subcmd_name)
+
+                    if is_keyless:
+                        default_request_policy = RequestPolicy.DEFAULT_KEYLESS
+                        default_response_policy = ResponsePolicy.DEFAULT_KEYLESS
+                    else:
+                        default_request_policy = RequestPolicy.DEFAULT_KEYED
+                        default_response_policy = ResponsePolicy.DEFAULT_KEYED
+
+                    subcmd_name = subcmd_name.replace("|", " ")
+
+                    # Create a CommandPolicies object with default policies on the new command.
+                    command_with_policies[module_name][subcmd_name] = CommandPolicies(
+                        request_policy=default_request_policy,
+                        response_policy=default_response_policy,
+                    )
+
+                    # Recursively extract policies from the rest of the subcommand details
+                    for subcommand_detail in subcommand_details[1:]:
+                        extract_policies(subcommand_detail, module_name, subcmd_name)
+
+        return command_with_policies

+ 44 - 0
usr/local/lib/python3.10/dist-packages/redis/_parsers/encoders.py

@@ -0,0 +1,44 @@
+from ..exceptions import DataError
+
+
+class Encoder:
+    "Encode strings to bytes-like and decode bytes-like to strings"
+
+    __slots__ = "encoding", "encoding_errors", "decode_responses"
+
+    def __init__(self, encoding, encoding_errors, decode_responses):
+        self.encoding = encoding
+        self.encoding_errors = encoding_errors
+        self.decode_responses = decode_responses
+
+    def encode(self, value):
+        "Return a bytestring or bytes-like representation of the value"
+        if isinstance(value, (bytes, memoryview)):
+            return value
+        elif isinstance(value, bool):
+            # special case bool since it is a subclass of int
+            raise DataError(
+                "Invalid input of type: 'bool'. Convert to a "
+                "bytes, string, int or float first."
+            )
+        elif isinstance(value, (int, float)):
+            value = repr(value).encode()
+        elif not isinstance(value, str):
+            # a value we don't know how to deal with. throw an error
+            typename = type(value).__name__
+            raise DataError(
+                f"Invalid input of type: '{typename}'. "
+                f"Convert to a bytes, string, int or float first."
+            )
+        if isinstance(value, str):
+            value = value.encode(self.encoding, self.encoding_errors)
+        return value
+
+    def decode(self, value, force=False):
+        "Return a unicode string from the bytes-like representation"
+        if self.decode_responses or force:
+            if isinstance(value, memoryview):
+                value = value.tobytes()
+            if isinstance(value, bytes):
+                value = value.decode(self.encoding, self.encoding_errors)
+        return value

+ 947 - 0
usr/local/lib/python3.10/dist-packages/redis/_parsers/helpers.py

@@ -0,0 +1,947 @@
+import datetime
+
+from redis.utils import str_if_bytes
+
+
+def timestamp_to_datetime(response):
+    "Converts a unix timestamp to a Python datetime object"
+    if not response:
+        return None
+    try:
+        response = int(response)
+    except ValueError:
+        return None
+    return datetime.datetime.fromtimestamp(response)
+
+
+def parse_debug_object(response):
+    "Parse the results of Redis's DEBUG OBJECT command into a Python dict"
+    # The 'type' of the object is the first item in the response, but isn't
+    # prefixed with a name
+    response = str_if_bytes(response)
+    response = "type:" + response
+    response = dict(kv.split(":") for kv in response.split())
+
+    # parse some expected int values from the string response
+    # note: this cmd isn't spec'd so these may not appear in all redis versions
+    int_fields = ("refcount", "serializedlength", "lru", "lru_seconds_idle")
+    for field in int_fields:
+        if field in response:
+            response[field] = int(response[field])
+
+    return response
+
+
+def parse_info(response):
+    """Parse the result of Redis's INFO command into a Python dict"""
+    info = {}
+    response = str_if_bytes(response)
+
+    def get_value(value):
+        if "," not in value and "=" not in value:
+            try:
+                if "." in value:
+                    return float(value)
+                else:
+                    return int(value)
+            except ValueError:
+                return value
+        elif "=" not in value:
+            return [get_value(v) for v in value.split(",") if v]
+        else:
+            sub_dict = {}
+            for item in value.split(","):
+                if not item:
+                    continue
+                if "=" in item:
+                    k, v = item.rsplit("=", 1)
+                    sub_dict[k] = get_value(v)
+                else:
+                    sub_dict[item] = True
+            return sub_dict
+
+    for line in response.splitlines():
+        if line and not line.startswith("#"):
+            if line.find(":") != -1:
+                # Split, the info fields keys and values.
+                # Note that the value may contain ':'. but the 'host:'
+                # pseudo-command is the only case where the key contains ':'
+                key, value = line.split(":", 1)
+                if key == "cmdstat_host":
+                    key, value = line.rsplit(":", 1)
+
+                if key == "module":
+                    # Hardcode a list for key 'modules' since there could be
+                    # multiple lines that started with 'module'
+                    info.setdefault("modules", []).append(get_value(value))
+                else:
+                    info[key] = get_value(value)
+            else:
+                # if the line isn't splittable, append it to the "__raw__" key
+                info.setdefault("__raw__", []).append(line)
+
+    return info
+
+
+def parse_memory_stats(response, **kwargs):
+    """Parse the results of MEMORY STATS"""
+    stats = pairs_to_dict(response, decode_keys=True, decode_string_values=True)
+    for key, value in stats.items():
+        if key.startswith("db.") and isinstance(value, list):
+            stats[key] = pairs_to_dict(
+                value, decode_keys=True, decode_string_values=True
+            )
+    return stats
+
+
+SENTINEL_STATE_TYPES = {
+    "can-failover-its-master": int,
+    "config-epoch": int,
+    "down-after-milliseconds": int,
+    "failover-timeout": int,
+    "info-refresh": int,
+    "last-hello-message": int,
+    "last-ok-ping-reply": int,
+    "last-ping-reply": int,
+    "last-ping-sent": int,
+    "master-link-down-time": int,
+    "master-port": int,
+    "num-other-sentinels": int,
+    "num-slaves": int,
+    "o-down-time": int,
+    "pending-commands": int,
+    "parallel-syncs": int,
+    "port": int,
+    "quorum": int,
+    "role-reported-time": int,
+    "s-down-time": int,
+    "slave-priority": int,
+    "slave-repl-offset": int,
+    "voted-leader-epoch": int,
+}
+
+
+def parse_sentinel_state(item):
+    result = pairs_to_dict_typed(item, SENTINEL_STATE_TYPES)
+    flags = set(result["flags"].split(","))
+    for name, flag in (
+        ("is_master", "master"),
+        ("is_slave", "slave"),
+        ("is_sdown", "s_down"),
+        ("is_odown", "o_down"),
+        ("is_sentinel", "sentinel"),
+        ("is_disconnected", "disconnected"),
+        ("is_master_down", "master_down"),
+    ):
+        result[name] = flag in flags
+    return result
+
+
+def parse_sentinel_master(response, **options):
+    return parse_sentinel_state(map(str_if_bytes, response))
+
+
+def parse_sentinel_state_resp3(response, **options):
+    result = {}
+    for key in response:
+        try:
+            value = SENTINEL_STATE_TYPES[key](str_if_bytes(response[key]))
+            result[str_if_bytes(key)] = value
+        except Exception:
+            result[str_if_bytes(key)] = response[str_if_bytes(key)]
+    flags = set(result["flags"].split(","))
+    result["flags"] = flags
+    return result
+
+
+def parse_sentinel_masters(response, **options):
+    result = {}
+    for item in response:
+        state = parse_sentinel_state(map(str_if_bytes, item))
+        result[state["name"]] = state
+    return result
+
+
+def parse_sentinel_masters_resp3(response, **options):
+    return [parse_sentinel_state_resp3(master) for master in response]
+
+
+def parse_sentinel_slaves_and_sentinels(response, **options):
+    return [parse_sentinel_state(map(str_if_bytes, item)) for item in response]
+
+
+def parse_sentinel_slaves_and_sentinels_resp3(response, **options):
+    return [parse_sentinel_state_resp3(item, **options) for item in response]
+
+
+def parse_sentinel_get_master(response, **options):
+    return response and (response[0], int(response[1])) or None
+
+
+def pairs_to_dict(response, decode_keys=False, decode_string_values=False):
+    """Create a dict given a list of key/value pairs"""
+    if response is None:
+        return {}
+    if decode_keys or decode_string_values:
+        # the iter form is faster, but I don't know how to make that work
+        # with a str_if_bytes() map
+        keys = response[::2]
+        if decode_keys:
+            keys = map(str_if_bytes, keys)
+        values = response[1::2]
+        if decode_string_values:
+            values = map(str_if_bytes, values)
+        return dict(zip(keys, values))
+    else:
+        it = iter(response)
+        return dict(zip(it, it))
+
+
+def pairs_to_dict_typed(response, type_info):
+    it = iter(response)
+    result = {}
+    for key, value in zip(it, it):
+        if key in type_info:
+            try:
+                value = type_info[key](value)
+            except Exception:
+                # if for some reason the value can't be coerced, just use
+                # the string value
+                pass
+        result[key] = value
+    return result
+
+
+def zset_score_pairs(response, **options):
+    """
+    If ``withscores`` is specified in the options, return the response as
+    a list of (value, score) pairs
+    """
+    if not response or not options.get("withscores"):
+        return response
+    score_cast_func = options.get("score_cast_func", float)
+    it = iter(response)
+    return list(zip(it, map(score_cast_func, it)))
+
+
+def zset_score_for_rank(response, **options):
+    """
+    If ``withscores`` is specified in the options, return the response as
+    a [value, score] pair
+    """
+    if not response or not options.get("withscore"):
+        return response
+    score_cast_func = options.get("score_cast_func", float)
+    return [response[0], score_cast_func(response[1])]
+
+
+def zset_score_pairs_resp3(response, **options):
+    """
+    If ``withscores`` is specified in the options, return the response as
+    a list of [value, score] pairs
+    """
+    if not response or not options.get("withscores"):
+        return response
+    score_cast_func = options.get("score_cast_func", float)
+    return [[name, score_cast_func(val)] for name, val in response]
+
+
+def zset_score_for_rank_resp3(response, **options):
+    """
+    If ``withscores`` is specified in the options, return the response as
+    a [value, score] pair
+    """
+    if not response or not options.get("withscore"):
+        return response
+    score_cast_func = options.get("score_cast_func", float)
+    return [response[0], score_cast_func(response[1])]
+
+
+def sort_return_tuples(response, **options):
+    """
+    If ``groups`` is specified, return the response as a list of
+    n-element tuples with n being the value found in options['groups']
+    """
+    if not response or not options.get("groups"):
+        return response
+    n = options["groups"]
+    return list(zip(*[response[i::n] for i in range(n)]))
+
+
+def parse_stream_list(response, **options):
+    if response is None:
+        return None
+    data = []
+    for r in response:
+        if r is not None:
+            if "claim_min_idle_time" in options:
+                data.append((r[0], pairs_to_dict(r[1]), *r[2:]))
+            else:
+                data.append((r[0], pairs_to_dict(r[1])))
+        else:
+            data.append((None, None))
+    return data
+
+
+def pairs_to_dict_with_str_keys(response):
+    return pairs_to_dict(response, decode_keys=True)
+
+
+def parse_list_of_dicts(response):
+    return list(map(pairs_to_dict_with_str_keys, response))
+
+
+def parse_xclaim(response, **options):
+    if options.get("parse_justid", False):
+        return response
+    return parse_stream_list(response)
+
+
+def parse_xautoclaim(response, **options):
+    if options.get("parse_justid", False):
+        return response[1]
+    response[1] = parse_stream_list(response[1])
+    return response
+
+
+def parse_xinfo_stream(response, **options):
+    if isinstance(response, list):
+        data = pairs_to_dict(response, decode_keys=True)
+    else:
+        data = {str_if_bytes(k): v for k, v in response.items()}
+    if not options.get("full", False):
+        first = data.get("first-entry")
+        if first is not None and first[0] is not None:
+            data["first-entry"] = (first[0], pairs_to_dict(first[1]))
+        last = data["last-entry"]
+        if last is not None and last[0] is not None:
+            data["last-entry"] = (last[0], pairs_to_dict(last[1]))
+    else:
+        data["entries"] = {_id: pairs_to_dict(entry) for _id, entry in data["entries"]}
+        if len(data["groups"]) > 0 and isinstance(data["groups"][0], list):
+            data["groups"] = [
+                pairs_to_dict(group, decode_keys=True) for group in data["groups"]
+            ]
+            for g in data["groups"]:
+                if g["consumers"] and g["consumers"][0] is not None:
+                    g["consumers"] = [
+                        pairs_to_dict(c, decode_keys=True) for c in g["consumers"]
+                    ]
+        else:
+            data["groups"] = [
+                {str_if_bytes(k): v for k, v in group.items()}
+                for group in data["groups"]
+            ]
+    return data
+
+
+def parse_xread(response, **options):
+    if response is None:
+        return []
+    return [[r[0], parse_stream_list(r[1], **options)] for r in response]
+
+
+def parse_xread_resp3(response, **options):
+    if response is None:
+        return {}
+    return {
+        key: [parse_stream_list(value, **options)] for key, value in response.items()
+    }
+
+
+def parse_xpending(response, **options):
+    if options.get("parse_detail", False):
+        return parse_xpending_range(response)
+    consumers = [{"name": n, "pending": int(p)} for n, p in response[3] or []]
+    return {
+        "pending": response[0],
+        "min": response[1],
+        "max": response[2],
+        "consumers": consumers,
+    }
+
+
+def parse_xpending_range(response):
+    k = ("message_id", "consumer", "time_since_delivered", "times_delivered")
+    return [dict(zip(k, r)) for r in response]
+
+
+def float_or_none(response):
+    if response is None:
+        return None
+    return float(response)
+
+
+def bool_ok(response, **options):
+    return str_if_bytes(response) == "OK"
+
+
+def parse_zadd(response, **options):
+    if response is None:
+        return None
+    if options.get("as_score"):
+        return float(response)
+    return int(response)
+
+
+def parse_client_list(response, **options):
+    clients = []
+    for c in str_if_bytes(response).splitlines():
+        client_dict = {}
+        tokens = c.split(" ")
+        last_key = None
+        for token in tokens:
+            if "=" in token:
+                # Values might contain '='
+                key, value = token.split("=", 1)
+                client_dict[key] = value
+                last_key = key
+            else:
+                # Values may include spaces. For instance, when running Redis via a Unix socket — such as
+                # "/tmp/redis sock/redis.sock" — the addr or laddr field will include a space.
+                client_dict[last_key] += " " + token
+
+        if client_dict:
+            clients.append(client_dict)
+    return clients
+
+
+def parse_config_get(response, **options):
+    response = [str_if_bytes(i) if i is not None else None for i in response]
+    return response and pairs_to_dict(response) or {}
+
+
+def parse_scan(response, **options):
+    cursor, r = response
+    return int(cursor), r
+
+
+def parse_hscan(response, **options):
+    cursor, r = response
+    no_values = options.get("no_values", False)
+    if no_values:
+        payload = r or []
+    else:
+        payload = r and pairs_to_dict(r) or {}
+    return int(cursor), payload
+
+
+def parse_zscan(response, **options):
+    score_cast_func = options.get("score_cast_func", float)
+    cursor, r = response
+    it = iter(r)
+    return int(cursor), list(zip(it, map(score_cast_func, it)))
+
+
+def parse_zmscore(response, **options):
+    # zmscore: list of scores (double precision floating point number) or nil
+    return [float(score) if score is not None else None for score in response]
+
+
+def parse_slowlog_get(response, **options):
+    space = " " if options.get("decode_responses", False) else b" "
+
+    def parse_item(item):
+        result = {"id": item[0], "start_time": int(item[1]), "duration": int(item[2])}
+        # Redis Enterprise injects another entry at index [3], which has
+        # the complexity info (i.e. the value N in case the command has
+        # an O(N) complexity) instead of the command.
+        if isinstance(item[3], list):
+            result["command"] = space.join(item[3])
+
+            # These fields are optional, depends on environment.
+            if len(item) >= 6:
+                result["client_address"] = item[4]
+                result["client_name"] = item[5]
+        else:
+            result["complexity"] = item[3]
+            result["command"] = space.join(item[4])
+
+            # These fields are optional, depends on environment.
+            if len(item) >= 7:
+                result["client_address"] = item[5]
+                result["client_name"] = item[6]
+
+        return result
+
+    return [parse_item(item) for item in response]
+
+
+def parse_stralgo(response, **options):
+    """
+    Parse the response from `STRALGO` command.
+    Without modifiers the returned value is string.
+    When LEN is given the command returns the length of the result
+    (i.e integer).
+    When IDX is given the command returns a dictionary with the LCS
+    length and all the ranges in both the strings, start and end
+    offset for each string, where there are matches.
+    When WITHMATCHLEN is given, each array representing a match will
+    also have the length of the match at the beginning of the array.
+    """
+    if options.get("len", False):
+        return int(response)
+    if options.get("idx", False):
+        if options.get("withmatchlen", False):
+            matches = [
+                [(int(match[-1]))] + list(map(tuple, match[:-1]))
+                for match in response[1]
+            ]
+        else:
+            matches = [list(map(tuple, match)) for match in response[1]]
+        return {
+            str_if_bytes(response[0]): matches,
+            str_if_bytes(response[2]): int(response[3]),
+        }
+    return str_if_bytes(response)
+
+
+def parse_cluster_info(response, **options):
+    response = str_if_bytes(response)
+    return dict(line.split(":") for line in response.splitlines() if line)
+
+
+def _parse_node_line(line):
+    line_items = line.split(" ")
+    node_id, addr, flags, master_id, ping, pong, epoch, connected = line.split(" ")[:8]
+    ip = addr.split("@")[0]
+    hostname = addr.split("@")[1].split(",")[1] if "@" in addr and "," in addr else ""
+    node_dict = {
+        "node_id": node_id,
+        "hostname": hostname,
+        "flags": flags,
+        "master_id": master_id,
+        "last_ping_sent": ping,
+        "last_pong_rcvd": pong,
+        "epoch": epoch,
+        "slots": [],
+        "migrations": [],
+        "connected": True if connected == "connected" else False,
+    }
+    if len(line_items) >= 9:
+        slots, migrations = _parse_slots(line_items[8:])
+        node_dict["slots"], node_dict["migrations"] = slots, migrations
+    return ip, node_dict
+
+
+def _parse_slots(slot_ranges):
+    slots, migrations = [], []
+    for s_range in slot_ranges:
+        if "->-" in s_range:
+            slot_id, dst_node_id = s_range[1:-1].split("->-", 1)
+            migrations.append(
+                {"slot": slot_id, "node_id": dst_node_id, "state": "migrating"}
+            )
+        elif "-<-" in s_range:
+            slot_id, src_node_id = s_range[1:-1].split("-<-", 1)
+            migrations.append(
+                {"slot": slot_id, "node_id": src_node_id, "state": "importing"}
+            )
+        else:
+            s_range = [sl for sl in s_range.split("-")]
+            slots.append(s_range)
+
+    return slots, migrations
+
+
+def parse_cluster_nodes(response, **options):
+    """
+    @see: https://redis.io/commands/cluster-nodes  # string / bytes
+    @see: https://redis.io/commands/cluster-replicas # list of string / bytes
+    """
+    if isinstance(response, (str, bytes)):
+        response = response.splitlines()
+    return dict(_parse_node_line(str_if_bytes(node)) for node in response)
+
+
+def parse_geosearch_generic(response, **options):
+    """
+    Parse the response of 'GEOSEARCH', GEORADIUS' and 'GEORADIUSBYMEMBER'
+    commands according to 'withdist', 'withhash' and 'withcoord' labels.
+    """
+    try:
+        if options["store"] or options["store_dist"]:
+            # `store` and `store_dist` cant be combined
+            # with other command arguments.
+            # relevant to 'GEORADIUS' and 'GEORADIUSBYMEMBER'
+            return response
+    except KeyError:  # it means the command was sent via execute_command
+        return response
+
+    if not isinstance(response, list):
+        response_list = [response]
+    else:
+        response_list = response
+
+    if not options["withdist"] and not options["withcoord"] and not options["withhash"]:
+        # just a bunch of places
+        return response_list
+
+    cast = {
+        "withdist": float,
+        "withcoord": lambda ll: (float(ll[0]), float(ll[1])),
+        "withhash": int,
+    }
+
+    # zip all output results with each casting function to get
+    # the properly native Python value.
+    f = [lambda x: x]
+    f += [cast[o] for o in ["withdist", "withhash", "withcoord"] if options[o]]
+    return [list(map(lambda fv: fv[0](fv[1]), zip(f, r))) for r in response_list]
+
+
+def parse_command(response, **options):
+    commands = {}
+    for command in response:
+        cmd_dict = {}
+        cmd_name = str_if_bytes(command[0])
+        cmd_dict["name"] = cmd_name
+        cmd_dict["arity"] = int(command[1])
+        cmd_dict["flags"] = [str_if_bytes(flag) for flag in command[2]]
+        cmd_dict["first_key_pos"] = command[3]
+        cmd_dict["last_key_pos"] = command[4]
+        cmd_dict["step_count"] = command[5]
+        if len(command) > 7:
+            cmd_dict["tips"] = command[7]
+            cmd_dict["key_specifications"] = command[8]
+            cmd_dict["subcommands"] = command[9]
+        commands[cmd_name] = cmd_dict
+    return commands
+
+
+def parse_command_resp3(response, **options):
+    commands = {}
+    for command in response:
+        cmd_dict = {}
+        cmd_name = str_if_bytes(command[0])
+        cmd_dict["name"] = cmd_name
+        cmd_dict["arity"] = command[1]
+        cmd_dict["flags"] = {str_if_bytes(flag) for flag in command[2]}
+        cmd_dict["first_key_pos"] = command[3]
+        cmd_dict["last_key_pos"] = command[4]
+        cmd_dict["step_count"] = command[5]
+        cmd_dict["acl_categories"] = command[6]
+        if len(command) > 7:
+            cmd_dict["tips"] = command[7]
+            cmd_dict["key_specifications"] = command[8]
+            cmd_dict["subcommands"] = command[9]
+
+        commands[cmd_name] = cmd_dict
+    return commands
+
+
+def parse_pubsub_numsub(response, **options):
+    return list(zip(response[0::2], response[1::2]))
+
+
+def parse_client_kill(response, **options):
+    if isinstance(response, int):
+        return response
+    return str_if_bytes(response) == "OK"
+
+
+def parse_acl_getuser(response, **options):
+    if response is None:
+        return None
+    if isinstance(response, list):
+        data = pairs_to_dict(response, decode_keys=True)
+    else:
+        data = {str_if_bytes(key): value for key, value in response.items()}
+
+    # convert everything but user-defined data in 'keys' to native strings
+    data["flags"] = list(map(str_if_bytes, data["flags"]))
+    data["passwords"] = list(map(str_if_bytes, data["passwords"]))
+    data["commands"] = str_if_bytes(data["commands"])
+    if isinstance(data["keys"], str) or isinstance(data["keys"], bytes):
+        data["keys"] = list(str_if_bytes(data["keys"]).split(" "))
+    if data["keys"] == [""]:
+        data["keys"] = []
+    if "channels" in data:
+        if isinstance(data["channels"], str) or isinstance(data["channels"], bytes):
+            data["channels"] = list(str_if_bytes(data["channels"]).split(" "))
+        if data["channels"] == [""]:
+            data["channels"] = []
+    if "selectors" in data:
+        if data["selectors"] != [] and isinstance(data["selectors"][0], list):
+            data["selectors"] = [
+                list(map(str_if_bytes, selector)) for selector in data["selectors"]
+            ]
+        elif data["selectors"] != []:
+            data["selectors"] = [
+                {str_if_bytes(k): str_if_bytes(v) for k, v in selector.items()}
+                for selector in data["selectors"]
+            ]
+
+    # split 'commands' into separate 'categories' and 'commands' lists
+    commands, categories = [], []
+    for command in data["commands"].split(" "):
+        categories.append(command) if "@" in command else commands.append(command)
+
+    data["commands"] = commands
+    data["categories"] = categories
+    data["enabled"] = "on" in data["flags"]
+    return data
+
+
+def parse_acl_log(response, **options):
+    if response is None:
+        return None
+    if isinstance(response, list):
+        data = []
+        for log in response:
+            log_data = pairs_to_dict(log, True, True)
+            client_info = log_data.get("client-info", "")
+            log_data["client-info"] = parse_client_info(client_info)
+
+            # float() is lossy comparing to the "double" in C
+            log_data["age-seconds"] = float(log_data["age-seconds"])
+            data.append(log_data)
+    else:
+        data = bool_ok(response)
+    return data
+
+
+def parse_client_info(value):
+    """
+    Parsing client-info in ACL Log in following format.
+    "key1=value1 key2=value2 key3=value3"
+    """
+    client_info = {}
+    for info in str_if_bytes(value).strip().split():
+        key, value = info.split("=")
+        client_info[key] = value
+
+    # Those fields are defined as int in networking.c
+    for int_key in {
+        "id",
+        "age",
+        "idle",
+        "db",
+        "sub",
+        "psub",
+        "multi",
+        "qbuf",
+        "qbuf-free",
+        "obl",
+        "argv-mem",
+        "oll",
+        "omem",
+        "tot-mem",
+    }:
+        if int_key in client_info:
+            client_info[int_key] = int(client_info[int_key])
+    return client_info
+
+
+def parse_set_result(response, **options):
+    """
+    Handle SET result since GET argument is available since Redis 6.2.
+    Parsing SET result into:
+    - BOOL
+    - String when GET argument is used
+    """
+    if options.get("get"):
+        # Redis will return a getCommand result.
+        # See `setGenericCommand` in t_string.c
+        return response
+    return response and str_if_bytes(response) == "OK"
+
+
+def string_keys_to_dict(key_string, callback):
+    return dict.fromkeys(key_string.split(), callback)
+
+
+_RedisCallbacks = {
+    **string_keys_to_dict(
+        "AUTH COPY EXPIRE EXPIREAT HEXISTS HMSET MOVE MSETNX PERSIST PSETEX "
+        "PEXPIRE PEXPIREAT RENAMENX SETEX SETNX SMOVE",
+        bool,
+    ),
+    **string_keys_to_dict("HINCRBYFLOAT INCRBYFLOAT", float),
+    **string_keys_to_dict(
+        "ASKING FLUSHALL FLUSHDB LSET LTRIM MSET PFMERGE READONLY READWRITE "
+        "RENAME SAVE SELECT SHUTDOWN SLAVEOF SWAPDB WATCH UNWATCH",
+        bool_ok,
+    ),
+    **string_keys_to_dict("XREAD XREADGROUP", parse_xread),
+    **string_keys_to_dict(
+        "GEORADIUS GEORADIUSBYMEMBER GEOSEARCH",
+        parse_geosearch_generic,
+    ),
+    **string_keys_to_dict("XRANGE XREVRANGE", parse_stream_list),
+    "ACL GETUSER": parse_acl_getuser,
+    "ACL LOAD": bool_ok,
+    "ACL LOG": parse_acl_log,
+    "ACL SETUSER": bool_ok,
+    "ACL SAVE": bool_ok,
+    "CLIENT INFO": parse_client_info,
+    "CLIENT KILL": parse_client_kill,
+    "CLIENT LIST": parse_client_list,
+    "CLIENT PAUSE": bool_ok,
+    "CLIENT SETINFO": bool_ok,
+    "CLIENT SETNAME": bool_ok,
+    "CLIENT UNBLOCK": bool,
+    "CLUSTER ADDSLOTS": bool_ok,
+    "CLUSTER ADDSLOTSRANGE": bool_ok,
+    "CLUSTER DELSLOTS": bool_ok,
+    "CLUSTER DELSLOTSRANGE": bool_ok,
+    "CLUSTER FAILOVER": bool_ok,
+    "CLUSTER FORGET": bool_ok,
+    "CLUSTER INFO": parse_cluster_info,
+    "CLUSTER MEET": bool_ok,
+    "CLUSTER NODES": parse_cluster_nodes,
+    "CLUSTER REPLICAS": parse_cluster_nodes,
+    "CLUSTER REPLICATE": bool_ok,
+    "CLUSTER RESET": bool_ok,
+    "CLUSTER SAVECONFIG": bool_ok,
+    "CLUSTER SET-CONFIG-EPOCH": bool_ok,
+    "CLUSTER SETSLOT": bool_ok,
+    "CLUSTER SLAVES": parse_cluster_nodes,
+    "COMMAND": parse_command,
+    "CONFIG RESETSTAT": bool_ok,
+    "CONFIG SET": bool_ok,
+    "FUNCTION DELETE": bool_ok,
+    "FUNCTION FLUSH": bool_ok,
+    "FUNCTION RESTORE": bool_ok,
+    "GEODIST": float_or_none,
+    "HSCAN": parse_hscan,
+    "INFO": parse_info,
+    "LASTSAVE": timestamp_to_datetime,
+    "MEMORY PURGE": bool_ok,
+    "MODULE LOAD": bool,
+    "MODULE UNLOAD": bool,
+    "PING": lambda r: str_if_bytes(r) == "PONG",
+    "PUBSUB NUMSUB": parse_pubsub_numsub,
+    "PUBSUB SHARDNUMSUB": parse_pubsub_numsub,
+    "QUIT": bool_ok,
+    "SET": parse_set_result,
+    "SCAN": parse_scan,
+    "SCRIPT EXISTS": lambda r: list(map(bool, r)),
+    "SCRIPT FLUSH": bool_ok,
+    "SCRIPT KILL": bool_ok,
+    "SCRIPT LOAD": str_if_bytes,
+    "SENTINEL CKQUORUM": bool_ok,
+    "SENTINEL FAILOVER": bool_ok,
+    "SENTINEL FLUSHCONFIG": bool_ok,
+    "SENTINEL GET-MASTER-ADDR-BY-NAME": parse_sentinel_get_master,
+    "SENTINEL MONITOR": bool_ok,
+    "SENTINEL RESET": bool_ok,
+    "SENTINEL REMOVE": bool_ok,
+    "SENTINEL SET": bool_ok,
+    "SLOWLOG GET": parse_slowlog_get,
+    "SLOWLOG RESET": bool_ok,
+    "SORT": sort_return_tuples,
+    "SSCAN": parse_scan,
+    "TIME": lambda x: (int(x[0]), int(x[1])),
+    "XAUTOCLAIM": parse_xautoclaim,
+    "XCLAIM": parse_xclaim,
+    "XGROUP CREATE": bool_ok,
+    "XGROUP DESTROY": bool,
+    "XGROUP SETID": bool_ok,
+    "XINFO STREAM": parse_xinfo_stream,
+    "XPENDING": parse_xpending,
+    "ZSCAN": parse_zscan,
+}
+
+
+_RedisCallbacksRESP2 = {
+    **string_keys_to_dict(
+        "SDIFF SINTER SMEMBERS SUNION", lambda r: r and set(r) or set()
+    ),
+    **string_keys_to_dict(
+        "ZDIFF ZINTER ZPOPMAX ZPOPMIN ZRANGE ZRANGEBYSCORE ZREVRANGE "
+        "ZREVRANGEBYSCORE ZUNION",
+        zset_score_pairs,
+    ),
+    **string_keys_to_dict(
+        "ZREVRANK ZRANK",
+        zset_score_for_rank,
+    ),
+    **string_keys_to_dict("ZINCRBY ZSCORE", float_or_none),
+    **string_keys_to_dict("BGREWRITEAOF BGSAVE", lambda r: True),
+    **string_keys_to_dict("BLPOP BRPOP", lambda r: r and tuple(r) or None),
+    **string_keys_to_dict(
+        "BZPOPMAX BZPOPMIN", lambda r: r and (r[0], r[1], float(r[2])) or None
+    ),
+    "ACL CAT": lambda r: list(map(str_if_bytes, r)),
+    "ACL GENPASS": str_if_bytes,
+    "ACL HELP": lambda r: list(map(str_if_bytes, r)),
+    "ACL LIST": lambda r: list(map(str_if_bytes, r)),
+    "ACL USERS": lambda r: list(map(str_if_bytes, r)),
+    "ACL WHOAMI": str_if_bytes,
+    "CLIENT GETNAME": str_if_bytes,
+    "CLIENT TRACKINGINFO": lambda r: list(map(str_if_bytes, r)),
+    "CLUSTER GETKEYSINSLOT": lambda r: list(map(str_if_bytes, r)),
+    "COMMAND GETKEYS": lambda r: list(map(str_if_bytes, r)),
+    "CONFIG GET": parse_config_get,
+    "DEBUG OBJECT": parse_debug_object,
+    "GEOHASH": lambda r: list(map(str_if_bytes, r)),
+    "GEOPOS": lambda r: list(
+        map(lambda ll: (float(ll[0]), float(ll[1])) if ll is not None else None, r)
+    ),
+    "HGETALL": lambda r: r and pairs_to_dict(r) or {},
+    "HOTKEYS GET": lambda r: [pairs_to_dict(m) for m in r],
+    "MEMORY STATS": parse_memory_stats,
+    "MODULE LIST": lambda r: [pairs_to_dict(m) for m in r],
+    "RESET": str_if_bytes,
+    "SENTINEL MASTER": parse_sentinel_master,
+    "SENTINEL MASTERS": parse_sentinel_masters,
+    "SENTINEL SENTINELS": parse_sentinel_slaves_and_sentinels,
+    "SENTINEL SLAVES": parse_sentinel_slaves_and_sentinels,
+    "STRALGO": parse_stralgo,
+    "XINFO CONSUMERS": parse_list_of_dicts,
+    "XINFO GROUPS": parse_list_of_dicts,
+    "ZADD": parse_zadd,
+    "ZMSCORE": parse_zmscore,
+}
+
+
+_RedisCallbacksRESP3 = {
+    **string_keys_to_dict(
+        "SDIFF SINTER SMEMBERS SUNION", lambda r: r and set(r) or set()
+    ),
+    **string_keys_to_dict(
+        "ZRANGE ZINTER ZPOPMAX ZPOPMIN HGETALL XREADGROUP",
+        lambda r, **kwargs: r,
+    ),
+    **string_keys_to_dict(
+        "ZRANGE ZRANGEBYSCORE ZREVRANGE ZREVRANGEBYSCORE ZUNION",
+        zset_score_pairs_resp3,
+    ),
+    **string_keys_to_dict(
+        "ZREVRANK ZRANK",
+        zset_score_for_rank_resp3,
+    ),
+    **string_keys_to_dict("XREAD XREADGROUP", parse_xread_resp3),
+    "ACL LOG": lambda r: (
+        [
+            {str_if_bytes(key): str_if_bytes(value) for key, value in x.items()}
+            for x in r
+        ]
+        if isinstance(r, list)
+        else bool_ok(r)
+    ),
+    "COMMAND": parse_command_resp3,
+    "CONFIG GET": lambda r: {
+        str_if_bytes(key) if key is not None else None: (
+            str_if_bytes(value) if value is not None else None
+        )
+        for key, value in r.items()
+    },
+    "MEMORY STATS": lambda r: {str_if_bytes(key): value for key, value in r.items()},
+    "SENTINEL MASTER": parse_sentinel_state_resp3,
+    "SENTINEL MASTERS": parse_sentinel_masters_resp3,
+    "SENTINEL SENTINELS": parse_sentinel_slaves_and_sentinels_resp3,
+    "SENTINEL SLAVES": parse_sentinel_slaves_and_sentinels_resp3,
+    "STRALGO": lambda r, **options: (
+        {str_if_bytes(key): str_if_bytes(value) for key, value in r.items()}
+        if isinstance(r, dict)
+        else str_if_bytes(r)
+    ),
+    "XINFO CONSUMERS": lambda r: [
+        {str_if_bytes(key): value for key, value in x.items()} for x in r
+    ],
+    "XINFO GROUPS": lambda r: [
+        {str_if_bytes(key): value for key, value in d.items()} for d in r
+    ],
+}

+ 302 - 0
usr/local/lib/python3.10/dist-packages/redis/_parsers/hiredis.py

@@ -0,0 +1,302 @@
+import asyncio
+import socket
+import sys
+from logging import getLogger
+from typing import Callable, List, Optional, TypedDict, Union
+
+if sys.version_info.major >= 3 and sys.version_info.minor >= 11:
+    from asyncio import timeout as async_timeout
+else:
+    from async_timeout import timeout as async_timeout
+
+from ..exceptions import ConnectionError, InvalidResponse, RedisError
+from ..typing import EncodableT
+from ..utils import HIREDIS_AVAILABLE
+from .base import (
+    AsyncBaseParser,
+    AsyncPushNotificationsParser,
+    BaseParser,
+    PushNotificationsParser,
+)
+from .socket import (
+    NONBLOCKING_EXCEPTION_ERROR_NUMBERS,
+    NONBLOCKING_EXCEPTIONS,
+    SENTINEL,
+    SERVER_CLOSED_CONNECTION_ERROR,
+)
+
+# Used to signal that hiredis-py does not have enough data to parse.
+# Using `False` or `None` is not reliable, given that the parser can
+# return `False` or `None` for legitimate reasons from RESP payloads.
+NOT_ENOUGH_DATA = object()
+
+
+class _HiredisReaderArgs(TypedDict, total=False):
+    protocolError: Callable[[str], Exception]
+    replyError: Callable[[str], Exception]
+    encoding: Optional[str]
+    errors: Optional[str]
+
+
+class _HiredisParser(BaseParser, PushNotificationsParser):
+    "Parser class for connections using Hiredis"
+
+    def __init__(self, socket_read_size):
+        if not HIREDIS_AVAILABLE:
+            raise RedisError("Hiredis is not installed")
+        self.socket_read_size = socket_read_size
+        self._buffer = bytearray(socket_read_size)
+        self.pubsub_push_handler_func = self.handle_pubsub_push_response
+        self.node_moving_push_handler_func = None
+        self.maintenance_push_handler_func = None
+        self.oss_cluster_maint_push_handler_func = None
+        self.invalidation_push_handler_func = None
+        self._hiredis_PushNotificationType = None
+
+    def __del__(self):
+        try:
+            self.on_disconnect()
+        except Exception:
+            pass
+
+    def handle_pubsub_push_response(self, response):
+        logger = getLogger("push_response")
+        logger.debug("Push response: " + str(response))
+        return response
+
+    def on_connect(self, connection, **kwargs):
+        import hiredis
+
+        self._sock = connection._sock
+        self._socket_timeout = connection.socket_timeout
+        kwargs = {
+            "protocolError": InvalidResponse,
+            "replyError": self.parse_error,
+            "errors": connection.encoder.encoding_errors,
+            "notEnoughData": NOT_ENOUGH_DATA,
+        }
+
+        if connection.encoder.decode_responses:
+            kwargs["encoding"] = connection.encoder.encoding
+        self._reader = hiredis.Reader(**kwargs)
+        self._next_response = NOT_ENOUGH_DATA
+
+        try:
+            self._hiredis_PushNotificationType = hiredis.PushNotification
+        except AttributeError:
+            # hiredis < 3.2
+            self._hiredis_PushNotificationType = None
+
+    def on_disconnect(self):
+        self._sock = None
+        self._reader = None
+        self._next_response = NOT_ENOUGH_DATA
+
+    def can_read(self, timeout):
+        if not self._reader:
+            raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
+
+        if self._next_response is NOT_ENOUGH_DATA:
+            self._next_response = self._reader.gets()
+            if self._next_response is NOT_ENOUGH_DATA:
+                return self.read_from_socket(timeout=timeout, raise_on_timeout=False)
+        return True
+
+    def read_from_socket(self, timeout=SENTINEL, raise_on_timeout=True):
+        sock = self._sock
+        custom_timeout = timeout is not SENTINEL
+        try:
+            if custom_timeout:
+                sock.settimeout(timeout)
+            bufflen = self._sock.recv_into(self._buffer)
+            if bufflen == 0:
+                raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
+            self._reader.feed(self._buffer, 0, bufflen)
+            # data was read from the socket and added to the buffer.
+            # return True to indicate that data was read.
+            return True
+        except socket.timeout:
+            if raise_on_timeout:
+                raise TimeoutError("Timeout reading from socket")
+            return False
+        except NONBLOCKING_EXCEPTIONS as ex:
+            # if we're in nonblocking mode and the recv raises a
+            # blocking error, simply return False indicating that
+            # there's no data to be read. otherwise raise the
+            # original exception.
+            allowed = NONBLOCKING_EXCEPTION_ERROR_NUMBERS.get(ex.__class__, -1)
+            if not raise_on_timeout and ex.errno == allowed:
+                return False
+            raise ConnectionError(f"Error while reading from socket: {ex.args}")
+        finally:
+            if custom_timeout:
+                sock.settimeout(self._socket_timeout)
+
+    def read_response(self, disable_decoding=False, push_request=False):
+        if not self._reader:
+            raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
+
+        # _next_response might be cached from a can_read() call
+        if self._next_response is not NOT_ENOUGH_DATA:
+            response = self._next_response
+            self._next_response = NOT_ENOUGH_DATA
+            if self._hiredis_PushNotificationType is not None and isinstance(
+                response, self._hiredis_PushNotificationType
+            ):
+                response = self.handle_push_response(response)
+
+                # if this is a push request return the push response
+                if push_request:
+                    return response
+
+                return self.read_response(
+                    disable_decoding=disable_decoding,
+                    push_request=push_request,
+                )
+            return response
+
+        if disable_decoding:
+            response = self._reader.gets(False)
+        else:
+            response = self._reader.gets()
+
+        while response is NOT_ENOUGH_DATA:
+            self.read_from_socket()
+            if disable_decoding:
+                response = self._reader.gets(False)
+            else:
+                response = self._reader.gets()
+        # if the response is a ConnectionError or the response is a list and
+        # the first item is a ConnectionError, raise it as something bad
+        # happened
+        if isinstance(response, ConnectionError):
+            raise response
+        elif self._hiredis_PushNotificationType is not None and isinstance(
+            response, self._hiredis_PushNotificationType
+        ):
+            response = self.handle_push_response(response)
+            if push_request:
+                return response
+            return self.read_response(
+                disable_decoding=disable_decoding,
+                push_request=push_request,
+            )
+
+        elif (
+            isinstance(response, list)
+            and response
+            and isinstance(response[0], ConnectionError)
+        ):
+            raise response[0]
+        return response
+
+
+class _AsyncHiredisParser(AsyncBaseParser, AsyncPushNotificationsParser):
+    """Async implementation of parser class for connections using Hiredis"""
+
+    __slots__ = ("_reader",)
+
+    def __init__(self, socket_read_size: int):
+        if not HIREDIS_AVAILABLE:
+            raise RedisError("Hiredis is not available.")
+        super().__init__(socket_read_size=socket_read_size)
+        self._reader = None
+        self.pubsub_push_handler_func = self.handle_pubsub_push_response
+        self.invalidation_push_handler_func = None
+        self._hiredis_PushNotificationType = None
+
+    async def handle_pubsub_push_response(self, response):
+        logger = getLogger("push_response")
+        logger.debug("Push response: " + str(response))
+        return response
+
+    def on_connect(self, connection):
+        import hiredis
+
+        self._stream = connection._reader
+        kwargs: _HiredisReaderArgs = {
+            "protocolError": InvalidResponse,
+            "replyError": self.parse_error,
+            "notEnoughData": NOT_ENOUGH_DATA,
+        }
+        if connection.encoder.decode_responses:
+            kwargs["encoding"] = connection.encoder.encoding
+            kwargs["errors"] = connection.encoder.encoding_errors
+
+        self._reader = hiredis.Reader(**kwargs)
+        self._connected = True
+
+        try:
+            self._hiredis_PushNotificationType = getattr(
+                hiredis, "PushNotification", None
+            )
+        except AttributeError:
+            # hiredis < 3.2
+            self._hiredis_PushNotificationType = None
+
+    def on_disconnect(self):
+        self._connected = False
+
+    async def can_read_destructive(self):
+        if not self._connected:
+            raise OSError("Buffer is closed.")
+        if self._reader.gets() is not NOT_ENOUGH_DATA:
+            return True
+        try:
+            async with async_timeout(0):
+                return await self.read_from_socket()
+        except asyncio.TimeoutError:
+            return False
+
+    async def read_from_socket(self):
+        buffer = await self._stream.read(self._read_size)
+        if not buffer or not isinstance(buffer, bytes):
+            raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) from None
+        self._reader.feed(buffer)
+        # data was read from the socket and added to the buffer.
+        # return True to indicate that data was read.
+        return True
+
+    async def read_response(
+        self, disable_decoding: bool = False, push_request: bool = False
+    ) -> Union[EncodableT, List[EncodableT]]:
+        # If `on_disconnect()` has been called, prohibit any more reads
+        # even if they could happen because data might be present.
+        # We still allow reads in progress to finish
+        if not self._connected:
+            raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) from None
+
+        if disable_decoding:
+            response = self._reader.gets(False)
+        else:
+            response = self._reader.gets()
+
+        while response is NOT_ENOUGH_DATA:
+            await self.read_from_socket()
+            if disable_decoding:
+                response = self._reader.gets(False)
+            else:
+                response = self._reader.gets()
+
+        # if the response is a ConnectionError or the response is a list and
+        # the first item is a ConnectionError, raise it as something bad
+        # happened
+        if isinstance(response, ConnectionError):
+            raise response
+        elif self._hiredis_PushNotificationType is not None and isinstance(
+            response, self._hiredis_PushNotificationType
+        ):
+            response = await self.handle_push_response(response)
+            if not push_request:
+                return await self.read_response(
+                    disable_decoding=disable_decoding, push_request=push_request
+                )
+            else:
+                return response
+        elif (
+            isinstance(response, list)
+            and response
+            and isinstance(response[0], ConnectionError)
+        ):
+            raise response[0]
+        return response

+ 132 - 0
usr/local/lib/python3.10/dist-packages/redis/_parsers/resp2.py

@@ -0,0 +1,132 @@
+from typing import Any, Union
+
+from ..exceptions import ConnectionError, InvalidResponse, ResponseError
+from ..typing import EncodableT
+from .base import _AsyncRESPBase, _RESPBase
+from .socket import SERVER_CLOSED_CONNECTION_ERROR
+
+
+class _RESP2Parser(_RESPBase):
+    """RESP2 protocol implementation"""
+
+    def read_response(self, disable_decoding=False):
+        pos = self._buffer.get_pos() if self._buffer else None
+        try:
+            result = self._read_response(disable_decoding=disable_decoding)
+        except BaseException:
+            if self._buffer:
+                self._buffer.rewind(pos)
+            raise
+        else:
+            self._buffer.purge()
+            return result
+
+    def _read_response(self, disable_decoding=False):
+        raw = self._buffer.readline()
+        if not raw:
+            raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
+
+        byte, response = raw[:1], raw[1:]
+
+        # server returned an error
+        if byte == b"-":
+            response = response.decode("utf-8", errors="replace")
+            error = self.parse_error(response)
+            # if the error is a ConnectionError, raise immediately so the user
+            # is notified
+            if isinstance(error, ConnectionError):
+                raise error
+            # otherwise, we're dealing with a ResponseError that might belong
+            # inside a pipeline response. the connection's read_response()
+            # and/or the pipeline's execute() will raise this error if
+            # necessary, so just return the exception instance here.
+            return error
+        # single value
+        elif byte == b"+":
+            pass
+        # int value
+        elif byte == b":":
+            return int(response)
+        # bulk response
+        elif byte == b"$" and response == b"-1":
+            return None
+        elif byte == b"$":
+            response = self._buffer.read(int(response))
+        # multi-bulk response
+        elif byte == b"*" and response == b"-1":
+            return None
+        elif byte == b"*":
+            response = [
+                self._read_response(disable_decoding=disable_decoding)
+                for i in range(int(response))
+            ]
+        else:
+            raise InvalidResponse(f"Protocol Error: {raw!r}")
+
+        if disable_decoding is False:
+            response = self.encoder.decode(response)
+        return response
+
+
+class _AsyncRESP2Parser(_AsyncRESPBase):
+    """Async class for the RESP2 protocol"""
+
+    async def read_response(self, disable_decoding: bool = False):
+        if not self._connected:
+            raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
+        if self._chunks:
+            # augment parsing buffer with previously read data
+            self._buffer += b"".join(self._chunks)
+            self._chunks.clear()
+        self._pos = 0
+        response = await self._read_response(disable_decoding=disable_decoding)
+        # Successfully parsing a response allows us to clear our parsing buffer
+        self._clear()
+        return response
+
+    async def _read_response(
+        self, disable_decoding: bool = False
+    ) -> Union[EncodableT, ResponseError, None]:
+        raw = await self._readline()
+        response: Any
+        byte, response = raw[:1], raw[1:]
+
+        # server returned an error
+        if byte == b"-":
+            response = response.decode("utf-8", errors="replace")
+            error = self.parse_error(response)
+            # if the error is a ConnectionError, raise immediately so the user
+            # is notified
+            if isinstance(error, ConnectionError):
+                self._clear()  # Successful parse
+                raise error
+            # otherwise, we're dealing with a ResponseError that might belong
+            # inside a pipeline response. the connection's read_response()
+            # and/or the pipeline's execute() will raise this error if
+            # necessary, so just return the exception instance here.
+            return error
+        # single value
+        elif byte == b"+":
+            pass
+        # int value
+        elif byte == b":":
+            return int(response)
+        # bulk response
+        elif byte == b"$" and response == b"-1":
+            return None
+        elif byte == b"$":
+            response = await self._read(int(response))
+        # multi-bulk response
+        elif byte == b"*" and response == b"-1":
+            return None
+        elif byte == b"*":
+            response = [
+                (await self._read_response(disable_decoding))
+                for _ in range(int(response))  # noqa
+            ]
+        else:
+            raise InvalidResponse(f"Protocol Error: {raw!r}")
+
+        if disable_decoding is False:
+            response = self.encoder.decode(response)
+        return response

+ 270 - 0
usr/local/lib/python3.10/dist-packages/redis/_parsers/resp3.py

@@ -0,0 +1,270 @@
+from logging import getLogger
+from typing import Any, Union
+
+from ..exceptions import ConnectionError, InvalidResponse, ResponseError
+from ..typing import EncodableT
+from .base import (
+    AsyncPushNotificationsParser,
+    PushNotificationsParser,
+    _AsyncRESPBase,
+    _RESPBase,
+)
+from .socket import SERVER_CLOSED_CONNECTION_ERROR
+
+
+class _RESP3Parser(_RESPBase, PushNotificationsParser):
+    """RESP3 protocol implementation"""
+
+    def __init__(self, socket_read_size):
+        super().__init__(socket_read_size)
+        self.pubsub_push_handler_func = self.handle_pubsub_push_response
+        self.node_moving_push_handler_func = None
+        self.maintenance_push_handler_func = None
+        self.oss_cluster_maint_push_handler_func = None
+        self.invalidation_push_handler_func = None
+
+    def handle_pubsub_push_response(self, response):
+        logger = getLogger("push_response")
+        logger.debug("Push response: " + str(response))
+        return response
+
+    def read_response(self, disable_decoding=False, push_request=False):
+        pos = self._buffer.get_pos() if self._buffer is not None else None
+        try:
+            result = self._read_response(
+                disable_decoding=disable_decoding, push_request=push_request
+            )
+        except BaseException:
+            if self._buffer is not None:
+                self._buffer.rewind(pos)
+            raise
+        else:
+            if self._buffer is not None:
+                try:
+                    self._buffer.purge()
+                except AttributeError:
+                    # Buffer may have been set to None by another thread after
+                    # the check above; result is still valid so we don't raise
+                    pass
+            return result
+
+    def _read_response(self, disable_decoding=False, push_request=False):
+        raw = self._buffer.readline()
+        if not raw:
+            raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
+
+        byte, response = raw[:1], raw[1:]
+
+        # server returned an error
+        if byte in (b"-", b"!"):
+            if byte == b"!":
+                response = self._buffer.read(int(response))
+            response = response.decode("utf-8", errors="replace")
+            error = self.parse_error(response)
+            # if the error is a ConnectionError, raise immediately so the user
+            # is notified
+            if isinstance(error, ConnectionError):
+                raise error
+            # otherwise, we're dealing with a ResponseError that might belong
+            # inside a pipeline response. the connection's read_response()
+            # and/or the pipeline's execute() will raise this error if
+            # necessary, so just return the exception instance here.
+            return error
+        # single value
+        elif byte == b"+":
+            pass
+        # null value
+        elif byte == b"_":
+            return None
+        # int and big int values
+        elif byte in (b":", b"("):
+            return int(response)
+        # double value
+        elif byte == b",":
+            return float(response)
+        # bool value
+        elif byte == b"#":
+            return response == b"t"
+        # bulk response
+        elif byte == b"$":
+            response = self._buffer.read(int(response))
+        # verbatim string response
+        elif byte == b"=":
+            response = self._buffer.read(int(response))[4:]
+        # array response
+        elif byte == b"*":
+            response = [
+                self._read_response(disable_decoding=disable_decoding)
+                for _ in range(int(response))
+            ]
+        # set response
+        elif byte == b"~":
+            # redis can return unhashable types (like dict) in a set,
+            # so we return sets as list, all the time, for predictability
+            response = [
+                self._read_response(disable_decoding=disable_decoding)
+                for _ in range(int(response))
+            ]
+        # map response
+        elif byte == b"%":
+            # We cannot use a dict-comprehension to parse stream.
+            # Evaluation order of key:val expression in dict comprehension only
+            # became defined to be left-right in version 3.8
+            resp_dict = {}
+            for _ in range(int(response)):
+                key = self._read_response(disable_decoding=disable_decoding)
+                resp_dict[key] = self._read_response(
+                    disable_decoding=disable_decoding, push_request=push_request
+                )
+            response = resp_dict
+        # push response
+        elif byte == b">":
+            response = [
+                self._read_response(
+                    disable_decoding=disable_decoding, push_request=push_request
+                )
+                for _ in range(int(response))
+            ]
+            response = self.handle_push_response(response)
+
+            # if this is a push request return the push response
+            if push_request:
+                return response
+
+            return self._read_response(
+                disable_decoding=disable_decoding,
+                push_request=push_request,
+            )
+        else:
+            raise InvalidResponse(f"Protocol Error: {raw!r}")
+
+        if isinstance(response, bytes) and disable_decoding is False:
+            response = self.encoder.decode(response)
+
+        return response
+
+
+class _AsyncRESP3Parser(_AsyncRESPBase, AsyncPushNotificationsParser):
+    def __init__(self, socket_read_size):
+        super().__init__(socket_read_size)
+        self.pubsub_push_handler_func = self.handle_pubsub_push_response
+        self.invalidation_push_handler_func = None
+
+    async def handle_pubsub_push_response(self, response):
+        logger = getLogger("push_response")
+        logger.debug("Push response: " + str(response))
+        return response
+
+    async def read_response(
+        self, disable_decoding: bool = False, push_request: bool = False
+    ):
+        if self._chunks:
+            # augment parsing buffer with previously read data
+            self._buffer += b"".join(self._chunks)
+            self._chunks.clear()
+        self._pos = 0
+        response = await self._read_response(
+            disable_decoding=disable_decoding, push_request=push_request
+        )
+        # Successfully parsing a response allows us to clear our parsing buffer
+        self._clear()
+        return response
+
+    async def _read_response(
+        self, disable_decoding: bool = False, push_request: bool = False
+    ) -> Union[EncodableT, ResponseError, None]:
+        if not self._stream or not self.encoder:
+            raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
+        raw = await self._readline()
+        response: Any
+        byte, response = raw[:1], raw[1:]
+
+        # if byte not in (b"-", b"+", b":", b"$", b"*"):
+        #     raise InvalidResponse(f"Protocol Error: {raw!r}")
+
+        # server returned an error
+        if byte in (b"-", b"!"):
+            if byte == b"!":
+                response = await self._read(int(response))
+            response = response.decode("utf-8", errors="replace")
+            error = self.parse_error(response)
+            # if the error is a ConnectionError, raise immediately so the user
+            # is notified
+            if isinstance(error, ConnectionError):
+                self._clear()  # Successful parse
+                raise error
+            # otherwise, we're dealing with a ResponseError that might belong
+            # inside a pipeline response. the connection's read_response()
+            # and/or the pipeline's execute() will raise this error if
+            # necessary, so just return the exception instance here.
+            return error
+        # single value
+        elif byte == b"+":
+            pass
+        # null value
+        elif byte == b"_":
+            return None
+        # int and big int values
+        elif byte in (b":", b"("):
+            return int(response)
+        # double value
+        elif byte == b",":
+            return float(response)
+        # bool value
+        elif byte == b"#":
+            return response == b"t"
+        # bulk response
+        elif byte == b"$":
+            response = await self._read(int(response))
+        # verbatim string response
+        elif byte == b"=":
+            response = (await self._read(int(response)))[4:]
+        # array response
+        elif byte == b"*":
+            response = [
+                (await self._read_response(disable_decoding=disable_decoding))
+                for _ in range(int(response))
+            ]
+        # set response
+        elif byte == b"~":
+            # redis can return unhashable types (like dict) in a set,
+            # so we always convert to a list, to have predictable return types
+            response = [
+                (await self._read_response(disable_decoding=disable_decoding))
+                for _ in range(int(response))
+            ]
+        # map response
+        elif byte == b"%":
+            # We cannot use a dict-comprehension to parse stream.
+            # Evaluation order of key:val expression in dict comprehension only
+            # became defined to be left-right in version 3.8
+            resp_dict = {}
+            for _ in range(int(response)):
+                key = await self._read_response(disable_decoding=disable_decoding)
+                resp_dict[key] = await self._read_response(
+                    disable_decoding=disable_decoding, push_request=push_request
+                )
+            response = resp_dict
+        # push response
+        elif byte == b">":
+            response = [
+                (
+                    await self._read_response(
+                        disable_decoding=disable_decoding, push_request=push_request
+                    )
+                )
+                for _ in range(int(response))
+            ]
+            response = await self.handle_push_response(response)
+            if not push_request:
+                return await self._read_response(
+                    disable_decoding=disable_decoding, push_request=push_request
+                )
+            else:
+                return response
+        else:
+            raise InvalidResponse(f"Protocol Error: {raw!r}")
+
+        if isinstance(response, bytes) and disable_decoding is False:
+            response = self.encoder.decode(response)
+        return response

+ 162 - 0
usr/local/lib/python3.10/dist-packages/redis/_parsers/socket.py

@@ -0,0 +1,162 @@
+import errno
+import io
+import socket
+from io import SEEK_END
+from typing import Optional, Union
+
+from ..exceptions import ConnectionError, TimeoutError
+from ..utils import SSL_AVAILABLE
+
+NONBLOCKING_EXCEPTION_ERROR_NUMBERS = {BlockingIOError: errno.EWOULDBLOCK}
+
+if SSL_AVAILABLE:
+    import ssl
+
+    if hasattr(ssl, "SSLWantReadError"):
+        NONBLOCKING_EXCEPTION_ERROR_NUMBERS[ssl.SSLWantReadError] = 2
+        NONBLOCKING_EXCEPTION_ERROR_NUMBERS[ssl.SSLWantWriteError] = 2
+    else:
+        NONBLOCKING_EXCEPTION_ERROR_NUMBERS[ssl.SSLError] = 2
+
+NONBLOCKING_EXCEPTIONS = tuple(NONBLOCKING_EXCEPTION_ERROR_NUMBERS.keys())
+
+SERVER_CLOSED_CONNECTION_ERROR = "Connection closed by server."
+SENTINEL = object()
+
+SYM_CRLF = b"\r\n"
+
+
+class SocketBuffer:
+    def __init__(
+        self, socket: socket.socket, socket_read_size: int, socket_timeout: float
+    ):
+        self._sock = socket
+        self.socket_read_size = socket_read_size
+        self.socket_timeout = socket_timeout
+        self._buffer = io.BytesIO()
+
+    def unread_bytes(self) -> int:
+        """
+        Remaining unread length of buffer
+        """
+        pos = self._buffer.tell()
+        end = self._buffer.seek(0, SEEK_END)
+        self._buffer.seek(pos)
+        return end - pos
+
+    def _read_from_socket(
+        self,
+        length: Optional[int] = None,
+        timeout: Union[float, object] = SENTINEL,
+        raise_on_timeout: Optional[bool] = True,
+    ) -> bool:
+        sock = self._sock
+        socket_read_size = self.socket_read_size
+        marker = 0
+        custom_timeout = timeout is not SENTINEL
+
+        buf = self._buffer
+        current_pos = buf.tell()
+        buf.seek(0, SEEK_END)
+        if custom_timeout:
+            sock.settimeout(timeout)
+        try:
+            while True:
+                data = sock.recv(socket_read_size)
+                # an empty string indicates the server shutdown the socket
+                if isinstance(data, bytes) and len(data) == 0:
+                    raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
+                buf.write(data)
+                data_length = len(data)
+                marker += data_length
+
+                if length is not None and length > marker:
+                    continue
+                return True
+        except socket.timeout:
+            if raise_on_timeout:
+                raise TimeoutError("Timeout reading from socket")
+            return False
+        except NONBLOCKING_EXCEPTIONS as ex:
+            # if we're in nonblocking mode and the recv raises a
+            # blocking error, simply return False indicating that
+            # there's no data to be read. otherwise raise the
+            # original exception.
+            allowed = NONBLOCKING_EXCEPTION_ERROR_NUMBERS.get(ex.__class__, -1)
+            if not raise_on_timeout and ex.errno == allowed:
+                return False
+            raise ConnectionError(f"Error while reading from socket: {ex.args}")
+        finally:
+            buf.seek(current_pos)
+            if custom_timeout:
+                sock.settimeout(self.socket_timeout)
+
+    def can_read(self, timeout: float) -> bool:
+        return bool(self.unread_bytes()) or self._read_from_socket(
+            timeout=timeout, raise_on_timeout=False
+        )
+
+    def read(self, length: int) -> bytes:
+        length = length + 2  # make sure to read the \r\n terminator
+        # BufferIO will return less than requested if buffer is short
+        data = self._buffer.read(length)
+        missing = length - len(data)
+        if missing:
+            # fill up the buffer and read the remainder
+            self._read_from_socket(missing)
+            data += self._buffer.read(missing)
+        return data[:-2]
+
+    def readline(self) -> bytes:
+        buf = self._buffer
+        data = buf.readline()
+        while not data.endswith(SYM_CRLF):
+            # there's more data in the socket that we need
+            self._read_from_socket()
+            data += buf.readline()
+
+        return data[:-2]
+
+    def get_pos(self) -> int:
+        """
+        Get current read position
+        """
+        return self._buffer.tell()
+
+    def rewind(self, pos: int) -> None:
+        """
+        Rewind the buffer to a specific position, to re-start reading
+        """
+        self._buffer.seek(pos)
+
+    def purge(self) -> None:
+        """
+        After a successful read, purge the read part of buffer
+        """
+        unread = self.unread_bytes()
+
+        # Only if we have read all of the buffer do we truncate, to
+        # reduce the amount of memory thrashing.  This heuristic
+        # can be changed or removed later.
+        if unread > 0:
+            return
+
+        if unread > 0:
+            # move unread data to the front
+            view = self._buffer.getbuffer()
+            view[:unread] = view[-unread:]
+        self._buffer.truncate(unread)
+        self._buffer.seek(0)
+
+    def close(self) -> None:
+        try:
+            self._buffer.close()
+        except Exception:
+            # issue #633 suggests the purge/close somehow raised a
+            # BadFileDescriptor error. Perhaps the client ran out of
+            # memory or something else? It's probably OK to ignore
+            # any error being raised from purge/close since we're
+            # removing the reference to the instance below.
+            pass
+        self._buffer = None
+        self._sock = None

+ 64 - 0
usr/local/lib/python3.10/dist-packages/redis/asyncio/__init__.py

@@ -0,0 +1,64 @@
+from redis.asyncio.client import Redis, StrictRedis
+from redis.asyncio.cluster import RedisCluster
+from redis.asyncio.connection import (
+    BlockingConnectionPool,
+    Connection,
+    ConnectionPool,
+    SSLConnection,
+    UnixDomainSocketConnection,
+)
+from redis.asyncio.sentinel import (
+    Sentinel,
+    SentinelConnectionPool,
+    SentinelManagedConnection,
+    SentinelManagedSSLConnection,
+)
+from redis.asyncio.utils import from_url
+from redis.backoff import default_backoff
+from redis.exceptions import (
+    AuthenticationError,
+    AuthenticationWrongNumberOfArgsError,
+    BusyLoadingError,
+    ChildDeadlockedError,
+    ConnectionError,
+    DataError,
+    InvalidResponse,
+    OutOfMemoryError,
+    PubSubError,
+    ReadOnlyError,
+    RedisError,
+    ResponseError,
+    TimeoutError,
+    WatchError,
+)
+
+__all__ = [
+    "AuthenticationError",
+    "AuthenticationWrongNumberOfArgsError",
+    "BlockingConnectionPool",
+    "BusyLoadingError",
+    "ChildDeadlockedError",
+    "Connection",
+    "ConnectionError",
+    "ConnectionPool",
+    "DataError",
+    "from_url",
+    "default_backoff",
+    "InvalidResponse",
+    "PubSubError",
+    "OutOfMemoryError",
+    "ReadOnlyError",
+    "Redis",
+    "RedisCluster",
+    "RedisError",
+    "ResponseError",
+    "Sentinel",
+    "SentinelConnectionPool",
+    "SentinelManagedConnection",
+    "SentinelManagedSSLConnection",
+    "SSLConnection",
+    "StrictRedis",
+    "TimeoutError",
+    "UnixDomainSocketConnection",
+    "WatchError",
+]

binární
usr/local/lib/python3.10/dist-packages/redis/asyncio/__pycache__/__init__.cpython-310.pyc


binární
usr/local/lib/python3.10/dist-packages/redis/asyncio/__pycache__/client.cpython-310.pyc


binární
usr/local/lib/python3.10/dist-packages/redis/asyncio/__pycache__/cluster.cpython-310.pyc


binární
usr/local/lib/python3.10/dist-packages/redis/asyncio/__pycache__/connection.cpython-310.pyc


binární
usr/local/lib/python3.10/dist-packages/redis/asyncio/__pycache__/lock.cpython-310.pyc


binární
usr/local/lib/python3.10/dist-packages/redis/asyncio/__pycache__/retry.cpython-310.pyc


binární
usr/local/lib/python3.10/dist-packages/redis/asyncio/__pycache__/sentinel.cpython-310.pyc


binární
usr/local/lib/python3.10/dist-packages/redis/asyncio/__pycache__/utils.cpython-310.pyc


Rozdílová data souboru nebyla zobrazena, protože soubor je příliš velký
+ 1951 - 0
usr/local/lib/python3.10/dist-packages/redis/asyncio/client.py


Rozdílová data souboru nebyla zobrazena, protože soubor je příliš velký
+ 2957 - 0
usr/local/lib/python3.10/dist-packages/redis/asyncio/cluster.py


Rozdílová data souboru nebyla zobrazena, protože soubor je příliš velký
+ 1717 - 0
usr/local/lib/python3.10/dist-packages/redis/asyncio/connection.py


+ 0 - 0
usr/local/lib/python3.10/dist-packages/redis/asyncio/http/__init__.py


binární
usr/local/lib/python3.10/dist-packages/redis/asyncio/http/__pycache__/__init__.cpython-310.pyc


binární
usr/local/lib/python3.10/dist-packages/redis/asyncio/http/__pycache__/http_client.cpython-310.pyc


+ 265 - 0
usr/local/lib/python3.10/dist-packages/redis/asyncio/http/http_client.py

@@ -0,0 +1,265 @@
+import asyncio
+from abc import ABC, abstractmethod
+from concurrent.futures import ThreadPoolExecutor
+from typing import Any, Mapping, Optional, Union
+
+from redis.http.http_client import HttpClient, HttpResponse
+
+DEFAULT_USER_AGENT = "HttpClient/1.0 (+https://example.invalid)"
+DEFAULT_TIMEOUT = 30.0
+RETRY_STATUS_CODES = {429, 500, 502, 503, 504}
+
+
+class AsyncHTTPClient(ABC):
+    @abstractmethod
+    async def get(
+        self,
+        path: str,
+        params: Optional[
+            Mapping[str, Union[None, str, int, float, bool, list, tuple]]
+        ] = None,
+        headers: Optional[Mapping[str, str]] = None,
+        timeout: Optional[float] = None,
+        expect_json: bool = True,
+    ) -> Union[HttpResponse, Any]:
+        """
+        Invoke HTTP GET request."""
+        pass
+
+    @abstractmethod
+    async def delete(
+        self,
+        path: str,
+        params: Optional[
+            Mapping[str, Union[None, str, int, float, bool, list, tuple]]
+        ] = None,
+        headers: Optional[Mapping[str, str]] = None,
+        timeout: Optional[float] = None,
+        expect_json: bool = True,
+    ) -> Union[HttpResponse, Any]:
+        """
+        Invoke HTTP DELETE request."""
+        pass
+
+    @abstractmethod
+    async def post(
+        self,
+        path: str,
+        json_body: Optional[Any] = None,
+        data: Optional[Union[bytes, str]] = None,
+        params: Optional[
+            Mapping[str, Union[None, str, int, float, bool, list, tuple]]
+        ] = None,
+        headers: Optional[Mapping[str, str]] = None,
+        timeout: Optional[float] = None,
+        expect_json: bool = True,
+    ) -> Union[HttpResponse, Any]:
+        """
+        Invoke HTTP POST request."""
+        pass
+
+    @abstractmethod
+    async def put(
+        self,
+        path: str,
+        json_body: Optional[Any] = None,
+        data: Optional[Union[bytes, str]] = None,
+        params: Optional[
+            Mapping[str, Union[None, str, int, float, bool, list, tuple]]
+        ] = None,
+        headers: Optional[Mapping[str, str]] = None,
+        timeout: Optional[float] = None,
+        expect_json: bool = True,
+    ) -> Union[HttpResponse, Any]:
+        """
+        Invoke HTTP PUT request."""
+        pass
+
+    @abstractmethod
+    async def patch(
+        self,
+        path: str,
+        json_body: Optional[Any] = None,
+        data: Optional[Union[bytes, str]] = None,
+        params: Optional[
+            Mapping[str, Union[None, str, int, float, bool, list, tuple]]
+        ] = None,
+        headers: Optional[Mapping[str, str]] = None,
+        timeout: Optional[float] = None,
+        expect_json: bool = True,
+    ) -> Union[HttpResponse, Any]:
+        """
+        Invoke HTTP PATCH request."""
+        pass
+
+    @abstractmethod
+    async def request(
+        self,
+        method: str,
+        path: str,
+        params: Optional[
+            Mapping[str, Union[None, str, int, float, bool, list, tuple]]
+        ] = None,
+        headers: Optional[Mapping[str, str]] = None,
+        body: Optional[Union[bytes, str]] = None,
+        timeout: Optional[float] = None,
+    ) -> HttpResponse:
+        """
+        Invoke HTTP request with given method."""
+        pass
+
+
+class AsyncHTTPClientWrapper(AsyncHTTPClient):
+    """
+    An async wrapper around sync HTTP client with thread pool execution.
+    """
+
+    def __init__(self, client: HttpClient, max_workers: int = 10) -> None:
+        """
+        Initialize a new HTTP client instance.
+
+        Args:
+            client: Sync HTTP client instance.
+            max_workers: Maximum number of concurrent requests.
+
+        The client supports both regular HTTPS with server verification and mutual TLS
+        authentication. For server verification, provide CA certificate information via
+        ca_file, ca_path or ca_data. For mutual TLS, additionally provide a client
+        certificate and key via client_cert_file and client_key_file.
+        """
+        self.client = client
+        self._executor = ThreadPoolExecutor(max_workers=max_workers)
+
+    async def get(
+        self,
+        path: str,
+        params: Optional[
+            Mapping[str, Union[None, str, int, float, bool, list, tuple]]
+        ] = None,
+        headers: Optional[Mapping[str, str]] = None,
+        timeout: Optional[float] = None,
+        expect_json: bool = True,
+    ) -> Union[HttpResponse, Any]:
+        loop = asyncio.get_event_loop()
+        return await loop.run_in_executor(
+            self._executor, self.client.get, path, params, headers, timeout, expect_json
+        )
+
+    async def delete(
+        self,
+        path: str,
+        params: Optional[
+            Mapping[str, Union[None, str, int, float, bool, list, tuple]]
+        ] = None,
+        headers: Optional[Mapping[str, str]] = None,
+        timeout: Optional[float] = None,
+        expect_json: bool = True,
+    ) -> Union[HttpResponse, Any]:
+        loop = asyncio.get_event_loop()
+        return await loop.run_in_executor(
+            self._executor,
+            self.client.delete,
+            path,
+            params,
+            headers,
+            timeout,
+            expect_json,
+        )
+
+    async def post(
+        self,
+        path: str,
+        json_body: Optional[Any] = None,
+        data: Optional[Union[bytes, str]] = None,
+        params: Optional[
+            Mapping[str, Union[None, str, int, float, bool, list, tuple]]
+        ] = None,
+        headers: Optional[Mapping[str, str]] = None,
+        timeout: Optional[float] = None,
+        expect_json: bool = True,
+    ) -> Union[HttpResponse, Any]:
+        loop = asyncio.get_event_loop()
+        return await loop.run_in_executor(
+            self._executor,
+            self.client.post,
+            path,
+            json_body,
+            data,
+            params,
+            headers,
+            timeout,
+            expect_json,
+        )
+
+    async def put(
+        self,
+        path: str,
+        json_body: Optional[Any] = None,
+        data: Optional[Union[bytes, str]] = None,
+        params: Optional[
+            Mapping[str, Union[None, str, int, float, bool, list, tuple]]
+        ] = None,
+        headers: Optional[Mapping[str, str]] = None,
+        timeout: Optional[float] = None,
+        expect_json: bool = True,
+    ) -> Union[HttpResponse, Any]:
+        loop = asyncio.get_event_loop()
+        return await loop.run_in_executor(
+            self._executor,
+            self.client.put,
+            path,
+            json_body,
+            data,
+            params,
+            headers,
+            timeout,
+            expect_json,
+        )
+
+    async def patch(
+        self,
+        path: str,
+        json_body: Optional[Any] = None,
+        data: Optional[Union[bytes, str]] = None,
+        params: Optional[
+            Mapping[str, Union[None, str, int, float, bool, list, tuple]]
+        ] = None,
+        headers: Optional[Mapping[str, str]] = None,
+        timeout: Optional[float] = None,
+        expect_json: bool = True,
+    ) -> Union[HttpResponse, Any]:
+        loop = asyncio.get_event_loop()
+        return await loop.run_in_executor(
+            self._executor,
+            self.client.patch,
+            path,
+            json_body,
+            data,
+            params,
+            headers,
+            timeout,
+            expect_json,
+        )
+
+    async def request(
+        self,
+        method: str,
+        path: str,
+        params: Optional[
+            Mapping[str, Union[None, str, int, float, bool, list, tuple]]
+        ] = None,
+        headers: Optional[Mapping[str, str]] = None,
+        body: Optional[Union[bytes, str]] = None,
+        timeout: Optional[float] = None,
+    ) -> HttpResponse:
+        loop = asyncio.get_event_loop()
+        return await loop.run_in_executor(
+            self._executor,
+            self.client.request,
+            method,
+            path,
+            params,
+            headers,
+            body,
+            timeout,
+        )

+ 345 - 0
usr/local/lib/python3.10/dist-packages/redis/asyncio/lock.py

@@ -0,0 +1,345 @@
+import asyncio
+import logging
+import threading
+import uuid
+from types import SimpleNamespace
+from typing import TYPE_CHECKING, Awaitable, Optional, Union
+
+from redis.exceptions import LockError, LockNotOwnedError
+from redis.typing import Number
+
+if TYPE_CHECKING:
+    from redis.asyncio import Redis, RedisCluster
+
+logger = logging.getLogger(__name__)
+
+
+class Lock:
+    """
+    A shared, distributed Lock. Using Redis for locking allows the Lock
+    to be shared across processes and/or machines.
+
+    It's left to the user to resolve deadlock issues and make sure
+    multiple clients play nicely together.
+    """
+
+    lua_release = None
+    lua_extend = None
+    lua_reacquire = None
+
+    # KEYS[1] - lock name
+    # ARGV[1] - token
+    # return 1 if the lock was released, otherwise 0
+    LUA_RELEASE_SCRIPT = """
+        local token = redis.call('get', KEYS[1])
+        if not token or token ~= ARGV[1] then
+            return 0
+        end
+        redis.call('del', KEYS[1])
+        return 1
+    """
+
+    # KEYS[1] - lock name
+    # ARGV[1] - token
+    # ARGV[2] - additional milliseconds
+    # ARGV[3] - "0" if the additional time should be added to the lock's
+    #           existing ttl or "1" if the existing ttl should be replaced
+    # return 1 if the locks time was extended, otherwise 0
+    LUA_EXTEND_SCRIPT = """
+        local token = redis.call('get', KEYS[1])
+        if not token or token ~= ARGV[1] then
+            return 0
+        end
+        local expiration = redis.call('pttl', KEYS[1])
+        if not expiration then
+            expiration = 0
+        end
+        if expiration < 0 then
+            return 0
+        end
+
+        local newttl = ARGV[2]
+        if ARGV[3] == "0" then
+            newttl = ARGV[2] + expiration
+        end
+        redis.call('pexpire', KEYS[1], newttl)
+        return 1
+    """
+
+    # KEYS[1] - lock name
+    # ARGV[1] - token
+    # ARGV[2] - milliseconds
+    # return 1 if the locks time was reacquired, otherwise 0
+    LUA_REACQUIRE_SCRIPT = """
+        local token = redis.call('get', KEYS[1])
+        if not token or token ~= ARGV[1] then
+            return 0
+        end
+        redis.call('pexpire', KEYS[1], ARGV[2])
+        return 1
+    """
+
+    def __init__(
+        self,
+        redis: Union["Redis", "RedisCluster"],
+        name: Union[str, bytes, memoryview],
+        timeout: Optional[float] = None,
+        sleep: float = 0.1,
+        blocking: bool = True,
+        blocking_timeout: Optional[Number] = None,
+        thread_local: bool = True,
+        raise_on_release_error: bool = True,
+    ):
+        """
+        Create a new Lock instance named ``name`` using the Redis client
+        supplied by ``redis``.
+
+        ``timeout`` indicates a maximum life for the lock in seconds.
+        By default, it will remain locked until release() is called.
+        ``timeout`` can be specified as a float or integer, both representing
+        the number of seconds to wait.
+
+        ``sleep`` indicates the amount of time to sleep in seconds per loop
+        iteration when the lock is in blocking mode and another client is
+        currently holding the lock.
+
+        ``blocking`` indicates whether calling ``acquire`` should block until
+        the lock has been acquired or to fail immediately, causing ``acquire``
+        to return False and the lock not being acquired. Defaults to True.
+        Note this value can be overridden by passing a ``blocking``
+        argument to ``acquire``.
+
+        ``blocking_timeout`` indicates the maximum amount of time in seconds to
+        spend trying to acquire the lock. A value of ``None`` indicates
+        continue trying forever. ``blocking_timeout`` can be specified as a
+        float or integer, both representing the number of seconds to wait.
+
+        ``thread_local`` indicates whether the lock token is placed in
+        thread-local storage. By default, the token is placed in thread local
+        storage so that a thread only sees its token, not a token set by
+        another thread. Consider the following timeline:
+
+            time: 0, thread-1 acquires `my-lock`, with a timeout of 5 seconds.
+                     thread-1 sets the token to "abc"
+            time: 1, thread-2 blocks trying to acquire `my-lock` using the
+                     Lock instance.
+            time: 5, thread-1 has not yet completed. redis expires the lock
+                     key.
+            time: 5, thread-2 acquired `my-lock` now that it's available.
+                     thread-2 sets the token to "xyz"
+            time: 6, thread-1 finishes its work and calls release(). if the
+                     token is *not* stored in thread local storage, then
+                     thread-1 would see the token value as "xyz" and would be
+                     able to successfully release the thread-2's lock.
+
+        ``raise_on_release_error`` indicates whether to raise an exception when
+        the lock is no longer owned when exiting the context manager. By default,
+        this is True, meaning an exception will be raised. If False, the warning
+        will be logged and the exception will be suppressed.
+
+        In some use cases it's necessary to disable thread local storage. For
+        example, if you have code where one thread acquires a lock and passes
+        that lock instance to a worker thread to release later. If thread
+        local storage isn't disabled in this case, the worker thread won't see
+        the token set by the thread that acquired the lock. Our assumption
+        is that these cases aren't common and as such default to using
+        thread local storage.
+        """
+        self.redis = redis
+        self.name = name
+        self.timeout = timeout
+        self.sleep = sleep
+        self.blocking = blocking
+        self.blocking_timeout = blocking_timeout
+        self.thread_local = bool(thread_local)
+        self.local = threading.local() if self.thread_local else SimpleNamespace()
+        self.raise_on_release_error = raise_on_release_error
+        self.local.token = None
+        self.register_scripts()
+
+    def register_scripts(self):
+        cls = self.__class__
+        client = self.redis
+        if cls.lua_release is None:
+            cls.lua_release = client.register_script(cls.LUA_RELEASE_SCRIPT)
+        if cls.lua_extend is None:
+            cls.lua_extend = client.register_script(cls.LUA_EXTEND_SCRIPT)
+        if cls.lua_reacquire is None:
+            cls.lua_reacquire = client.register_script(cls.LUA_REACQUIRE_SCRIPT)
+
+    async def __aenter__(self):
+        if await self.acquire():
+            return self
+        raise LockError("Unable to acquire lock within the time specified")
+
+    async def __aexit__(self, exc_type, exc_value, traceback):
+        try:
+            await self.release()
+        except LockError:
+            if self.raise_on_release_error:
+                raise
+            logger.warning(
+                "Lock was unlocked or no longer owned when exiting context manager."
+            )
+
+    async def acquire(
+        self,
+        blocking: Optional[bool] = None,
+        blocking_timeout: Optional[Number] = None,
+        token: Optional[Union[str, bytes]] = None,
+    ):
+        """
+        Use Redis to hold a shared, distributed lock named ``name``.
+        Returns True once the lock is acquired.
+
+        If ``blocking`` is False, always return immediately. If the lock
+        was acquired, return True, otherwise return False.
+
+        ``blocking_timeout`` specifies the maximum number of seconds to
+        wait trying to acquire the lock.
+
+        ``token`` specifies the token value to be used. If provided, token
+        must be a bytes object or a string that can be encoded to a bytes
+        object with the default encoding. If a token isn't specified, a UUID
+        will be generated.
+        """
+        sleep = self.sleep
+        if token is None:
+            token = uuid.uuid1().hex.encode()
+        else:
+            try:
+                encoder = self.redis.connection_pool.get_encoder()
+            except AttributeError:
+                # Cluster
+                encoder = self.redis.get_encoder()
+            token = encoder.encode(token)
+        if blocking is None:
+            blocking = self.blocking
+        if blocking_timeout is None:
+            blocking_timeout = self.blocking_timeout
+        stop_trying_at = None
+        if blocking_timeout is not None:
+            stop_trying_at = asyncio.get_running_loop().time() + blocking_timeout
+        while True:
+            if await self.do_acquire(token):
+                self.local.token = token
+                return True
+            if not blocking:
+                return False
+            next_try_at = asyncio.get_running_loop().time() + sleep
+            if stop_trying_at is not None and next_try_at > stop_trying_at:
+                return False
+            await asyncio.sleep(sleep)
+
+    async def do_acquire(self, token: Union[str, bytes]) -> bool:
+        if self.timeout:
+            # convert to milliseconds
+            timeout = int(self.timeout * 1000)
+        else:
+            timeout = None
+        if await self.redis.set(self.name, token, nx=True, px=timeout):
+            return True
+        return False
+
+    async def locked(self) -> bool:
+        """
+        Returns True if this key is locked by any process, otherwise False.
+        """
+        return await self.redis.get(self.name) is not None
+
+    async def owned(self) -> bool:
+        """
+        Returns True if this key is locked by this lock, otherwise False.
+        """
+        stored_token = await self.redis.get(self.name)
+        # need to always compare bytes to bytes
+        # TODO: this can be simplified when the context manager is finished
+        if stored_token and not isinstance(stored_token, bytes):
+            try:
+                encoder = self.redis.connection_pool.get_encoder()
+            except AttributeError:
+                # Cluster
+                encoder = self.redis.get_encoder()
+            stored_token = encoder.encode(stored_token)
+        return self.local.token is not None and stored_token == self.local.token
+
+    async def release(self) -> None:
+        """Releases the already acquired lock.
+
+        The token is only cleared after the Redis release operation completes
+        successfully. This ensures that if the release is cancelled mid-operation,
+        the lock state remains consistent and can be retried.
+        """
+        expected_token = self.local.token
+        if expected_token is None:
+            raise LockError(
+                "Cannot release a lock that's not owned or is already unlocked.",
+                lock_name=self.name,
+            )
+        try:
+            await self.do_release(expected_token)
+        except LockNotOwnedError:
+            # Lock doesn't exist in Redis, safe to clear token
+            self.local.token = None
+            raise
+        # Only clear token after successful release
+        self.local.token = None
+
+    async def do_release(self, expected_token: bytes) -> None:
+        if not bool(
+            await self.lua_release(
+                keys=[self.name], args=[expected_token], client=self.redis
+            )
+        ):
+            raise LockNotOwnedError("Cannot release a lock that's no longer owned")
+
+    def extend(
+        self, additional_time: Number, replace_ttl: bool = False
+    ) -> Awaitable[bool]:
+        """
+        Adds more time to an already acquired lock.
+
+        ``additional_time`` can be specified as an integer or a float, both
+        representing the number of seconds to add.
+
+        ``replace_ttl`` if False (the default), add `additional_time` to
+        the lock's existing ttl. If True, replace the lock's ttl with
+        `additional_time`.
+        """
+        if self.local.token is None:
+            raise LockError("Cannot extend an unlocked lock")
+        if self.timeout is None:
+            raise LockError("Cannot extend a lock with no timeout")
+        return self.do_extend(additional_time, replace_ttl)
+
+    async def do_extend(self, additional_time, replace_ttl) -> bool:
+        additional_time = int(additional_time * 1000)
+        if not bool(
+            await self.lua_extend(
+                keys=[self.name],
+                args=[self.local.token, additional_time, replace_ttl and "1" or "0"],
+                client=self.redis,
+            )
+        ):
+            raise LockNotOwnedError("Cannot extend a lock that's no longer owned")
+        return True
+
+    def reacquire(self) -> Awaitable[bool]:
+        """
+        Resets a TTL of an already acquired lock back to a timeout value.
+        """
+        if self.local.token is None:
+            raise LockError("Cannot reacquire an unlocked lock")
+        if self.timeout is None:
+            raise LockError("Cannot reacquire a lock with no timeout")
+        return self.do_reacquire()
+
+    async def do_reacquire(self) -> bool:
+        timeout = int(self.timeout * 1000)
+        if not bool(
+            await self.lua_reacquire(
+                keys=[self.name], args=[self.local.token, timeout], client=self.redis
+            )
+        ):
+            raise LockNotOwnedError("Cannot reacquire a lock that's no longer owned")
+        return True

+ 0 - 0
usr/local/lib/python3.10/dist-packages/redis/asyncio/multidb/__init__.py


binární
usr/local/lib/python3.10/dist-packages/redis/asyncio/multidb/__pycache__/__init__.cpython-310.pyc


+ 0 - 0
usr/local/lib/python3.10/dist-packages/redis/asyncio/multidb/__pycache__/client.cpython-310.pyc


Některé soubory nejsou zobrazeny, neboť je v těchto rozdílových datech změněno mnoho souborů