Removed IPC; added watchdog
This commit is contained in:
		
							
								
								
									
										
											BIN
										
									
								
								images/pic1.png
									
									
									
									
									
								
							
							
						
						
									
										
											BIN
										
									
								
								images/pic1.png
									
									
									
									
									
								
							
										
											Binary file not shown.
										
									
								
							| Before Width: | Height: | Size: 409 KiB After Width: | Height: | Size: 182 KiB | 
							
								
								
									
										
											BIN
										
									
								
								images/pic2.png
									
									
									
									
									
								
							
							
						
						
									
										
											BIN
										
									
								
								images/pic2.png
									
									
									
									
									
								
							
										
											Binary file not shown.
										
									
								
							| Before Width: | Height: | Size: 363 KiB After Width: | Height: | Size: 192 KiB | 
							
								
								
									
										17
									
								
								src/app.py
									
									
									
									
									
								
							
							
						
						
									
										17
									
								
								src/app.py
									
									
									
									
									
								
							| @@ -6,7 +6,6 @@ import os | |||||||
|  |  | ||||||
| # Application imports | # Application imports | ||||||
| from utils.debugging import debug_signal_handler | from utils.debugging import debug_signal_handler | ||||||
| from utils.ipc_server import IPCServer |  | ||||||
| from core.window import Window | from core.window import Window | ||||||
|  |  | ||||||
|  |  | ||||||
| @@ -15,26 +14,12 @@ class AppLaunchException(Exception): | |||||||
|     ... |     ... | ||||||
|  |  | ||||||
|  |  | ||||||
| class Application(IPCServer): | class Application: | ||||||
|     """ docstring for Application. """ |     """ docstring for Application. """ | ||||||
|  |  | ||||||
|     def __init__(self, args, unknownargs): |     def __init__(self, args, unknownargs): | ||||||
|         super(Application, self).__init__() |         super(Application, self).__init__() | ||||||
|  |  | ||||||
|         if not settings_manager.is_trace_debug(): |  | ||||||
|             try: |  | ||||||
|                 self.create_ipc_listener() |  | ||||||
|             except Exception: |  | ||||||
|                 ... |  | ||||||
|  |  | ||||||
|             if not self.is_ipc_alive: |  | ||||||
|                 for arg in unknownargs + [args.new_tab,]: |  | ||||||
|                     if os.path.isfile(arg): |  | ||||||
|                         message = f"FILE|{arg}" |  | ||||||
|                         self.send_ipc_message(message) |  | ||||||
|  |  | ||||||
|                 raise AppLaunchException(f"{app_name} IPC Server Exists: Will send path(s) to it and close...") |  | ||||||
|  |  | ||||||
|         try: |         try: | ||||||
|             # kill -SIGUSR2 <pid> from Linux/Unix or SIGBREAK signal from Windows |             # kill -SIGUSR2 <pid> from Linux/Unix or SIGBREAK signal from Windows | ||||||
|             signal.signal( |             signal.signal( | ||||||
|   | |||||||
| @@ -6,6 +6,7 @@ | |||||||
| from .mixins.processor_mixin import ProcessorMixin | from .mixins.processor_mixin import ProcessorMixin | ||||||
| from .controller_data import ControllerData | from .controller_data import ControllerData | ||||||
| from .widgets.desktop_files import DdesktopFiles | from .widgets.desktop_files import DdesktopFiles | ||||||
|  | from .widgets.dir_watcher import DirWatcher | ||||||
| from .widgets.menu import Menu | from .widgets.menu import Menu | ||||||
|  |  | ||||||
|  |  | ||||||
| @@ -32,4 +33,5 @@ class Controller(ProcessorMixin, ControllerData): | |||||||
|  |  | ||||||
|     def _load_widgets(self, args, unknownargs): |     def _load_widgets(self, args, unknownargs): | ||||||
|         DdesktopFiles() |         DdesktopFiles() | ||||||
|  |         DirWatcher() | ||||||
|         Menu(args, unknownargs) |         Menu(args, unknownargs) | ||||||
|   | |||||||
							
								
								
									
										58
									
								
								src/core/widgets/dir_watcher.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										58
									
								
								src/core/widgets/dir_watcher.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,58 @@ | |||||||
|  | # Python imports | ||||||
|  |  | ||||||
|  | # Lib imports | ||||||
|  | from libs.watchdog.observers import Observer | ||||||
|  | from libs.watchdog.events import FileSystemEventHandler | ||||||
|  |  | ||||||
|  | # Application imports | ||||||
|  |  | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class ShellmenFSWatcher(FileSystemEventHandler): | ||||||
|  |     """docstring for ShellmenFSWatcher.""" | ||||||
|  |  | ||||||
|  |     def __init__(self): | ||||||
|  |         super(ShellmenFSWatcher, self).__init__() | ||||||
|  |  | ||||||
|  |  | ||||||
|  |     def on_any_event(self, event): | ||||||
|  |         if not event.event_type in ["opened", "closed"]: | ||||||
|  |             event_system.emit("reload_desktop_entries") | ||||||
|  |  | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class DirWatcher: | ||||||
|  |     def __init__(self): | ||||||
|  |  | ||||||
|  |         self.application_dirs = settings.config.application_dirs | ||||||
|  |  | ||||||
|  |         self._setup_styling() | ||||||
|  |         self._setup_signals() | ||||||
|  |         self._subscribe_to_events() | ||||||
|  |         self._load_widgets() | ||||||
|  |  | ||||||
|  |     def _setup_styling(self): | ||||||
|  |         ... | ||||||
|  |  | ||||||
|  |     def _setup_signals(self): | ||||||
|  |         ... | ||||||
|  |  | ||||||
|  |     def _subscribe_to_events(self): | ||||||
|  |         ... | ||||||
|  |  | ||||||
|  |     def _load_widgets(self): | ||||||
|  |         for path in self.application_dirs: | ||||||
|  |             self.create_watcher(path) | ||||||
|  |  | ||||||
|  |     def create_watcher(self, path): | ||||||
|  |         event_handler = ShellmenFSWatcher() | ||||||
|  |         observer      = Observer() | ||||||
|  |  | ||||||
|  |         observer.schedule(event_handler, path, recursive = False) | ||||||
|  |         observer.start() | ||||||
|  |         # try: | ||||||
|  |         #     while True: | ||||||
|  |         #         time.sleep(1) | ||||||
|  |         # finally: | ||||||
|  |         #         observer.stop() | ||||||
|  |         #         observer.join() | ||||||
							
								
								
									
										14
									
								
								src/libs/watchdog/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										14
									
								
								src/libs/watchdog/__init__.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,14 @@ | |||||||
|  | # Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com> | ||||||
|  | # Copyright 2012 Google, Inc & contributors. | ||||||
|  | # | ||||||
|  | # Licensed under the Apache License, Version 2.0 (the "License"); | ||||||
|  | # you may not use this file except in compliance with the License. | ||||||
|  | # You may obtain a copy of the License at | ||||||
|  | # | ||||||
|  | #     http://www.apache.org/licenses/LICENSE-2.0 | ||||||
|  | # | ||||||
|  | # Unless required by applicable law or agreed to in writing, software | ||||||
|  | # distributed under the License is distributed on an "AS IS" BASIS, | ||||||
|  | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||||
|  | # See the License for the specific language governing permissions and | ||||||
|  | # limitations under the License. | ||||||
							
								
								
									
										599
									
								
								src/libs/watchdog/events.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										599
									
								
								src/libs/watchdog/events.py
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,599 @@ | |||||||
|  | # Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com> | ||||||
|  | # Copyright 2012 Google, Inc & contributors. | ||||||
|  | # | ||||||
|  | # Licensed under the Apache License, Version 2.0 (the "License"); | ||||||
|  | # you may not use this file except in compliance with the License. | ||||||
|  | # You may obtain a copy of the License at | ||||||
|  | # | ||||||
|  | #     http://www.apache.org/licenses/LICENSE-2.0 | ||||||
|  | # | ||||||
|  | # Unless required by applicable law or agreed to in writing, software | ||||||
|  | # distributed under the License is distributed on an "AS IS" BASIS, | ||||||
|  | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||||
|  | # See the License for the specific language governing permissions and | ||||||
|  | # limitations under the License. | ||||||
|  |  | ||||||
|  | """ | ||||||
|  | :module: libs.watchdog.events | ||||||
|  | :synopsis: File system events and event handlers. | ||||||
|  | :author: yesudeep@google.com (Yesudeep Mangalapilly) | ||||||
|  | :author: contact@tiger-222.fr (Mickaël Schoentgen) | ||||||
|  |  | ||||||
|  | Event Classes | ||||||
|  | ------------- | ||||||
|  | .. autoclass:: FileSystemEvent | ||||||
|  |    :members: | ||||||
|  |    :show-inheritance: | ||||||
|  |    :inherited-members: | ||||||
|  |  | ||||||
|  | .. autoclass:: FileSystemMovedEvent | ||||||
|  |    :members: | ||||||
|  |    :show-inheritance: | ||||||
|  |  | ||||||
|  | .. autoclass:: FileMovedEvent | ||||||
|  |    :members: | ||||||
|  |    :show-inheritance: | ||||||
|  |  | ||||||
|  | .. autoclass:: DirMovedEvent | ||||||
|  |    :members: | ||||||
|  |    :show-inheritance: | ||||||
|  |  | ||||||
|  | .. autoclass:: FileModifiedEvent | ||||||
|  |    :members: | ||||||
|  |    :show-inheritance: | ||||||
|  |  | ||||||
|  | .. autoclass:: DirModifiedEvent | ||||||
|  |    :members: | ||||||
|  |    :show-inheritance: | ||||||
|  |  | ||||||
|  | .. autoclass:: FileCreatedEvent | ||||||
|  |    :members: | ||||||
|  |    :show-inheritance: | ||||||
|  |  | ||||||
|  | .. autoclass:: FileClosedEvent | ||||||
|  |    :members: | ||||||
|  |    :show-inheritance: | ||||||
|  |  | ||||||
|  | .. autoclass:: FileOpenedEvent | ||||||
|  |    :members: | ||||||
|  |    :show-inheritance: | ||||||
|  |  | ||||||
|  | .. autoclass:: DirCreatedEvent | ||||||
|  |    :members: | ||||||
|  |    :show-inheritance: | ||||||
|  |  | ||||||
|  | .. autoclass:: FileDeletedEvent | ||||||
|  |    :members: | ||||||
|  |    :show-inheritance: | ||||||
|  |  | ||||||
|  | .. autoclass:: DirDeletedEvent | ||||||
|  |    :members: | ||||||
|  |    :show-inheritance: | ||||||
|  |  | ||||||
|  |  | ||||||
|  | Event Handler Classes | ||||||
|  | --------------------- | ||||||
|  | .. autoclass:: FileSystemEventHandler | ||||||
|  |    :members: | ||||||
|  |    :show-inheritance: | ||||||
|  |  | ||||||
|  | .. autoclass:: PatternMatchingEventHandler | ||||||
|  |    :members: | ||||||
|  |    :show-inheritance: | ||||||
|  |  | ||||||
|  | .. autoclass:: RegexMatchingEventHandler | ||||||
|  |    :members: | ||||||
|  |    :show-inheritance: | ||||||
|  |  | ||||||
|  | .. autoclass:: LoggingEventHandler | ||||||
|  |    :members: | ||||||
|  |    :show-inheritance: | ||||||
|  |  | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | from __future__ import annotations | ||||||
|  |  | ||||||
|  | import logging | ||||||
|  | import os.path | ||||||
|  | import re | ||||||
|  |  | ||||||
|  | from libs.watchdog.utils.patterns import match_any_paths | ||||||
|  |  | ||||||
|  | EVENT_TYPE_MOVED = "moved" | ||||||
|  | EVENT_TYPE_DELETED = "deleted" | ||||||
|  | EVENT_TYPE_CREATED = "created" | ||||||
|  | EVENT_TYPE_MODIFIED = "modified" | ||||||
|  | EVENT_TYPE_CLOSED = "closed" | ||||||
|  | EVENT_TYPE_OPENED = "opened" | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class FileSystemEvent: | ||||||
|  |     """ | ||||||
|  |     Immutable type that represents a file system event that is triggered | ||||||
|  |     when a change occurs on the monitored file system. | ||||||
|  |  | ||||||
|  |     All FileSystemEvent objects are required to be immutable and hence | ||||||
|  |     can be used as keys in dictionaries or be added to sets. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     event_type = "" | ||||||
|  |     """The type of the event as a string.""" | ||||||
|  |  | ||||||
|  |     is_directory = False | ||||||
|  |     """True if event was emitted for a directory; False otherwise.""" | ||||||
|  |  | ||||||
|  |     is_synthetic = False | ||||||
|  |     """ | ||||||
|  |     True if event was synthesized; False otherwise. | ||||||
|  |  | ||||||
|  |     These are events that weren't actually broadcast by the OS, but | ||||||
|  |     are presumed to have happened based on other, actual events. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self, src_path): | ||||||
|  |         self._src_path = src_path | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def src_path(self): | ||||||
|  |         """Source path of the file system object that triggered this event.""" | ||||||
|  |         return self._src_path | ||||||
|  |  | ||||||
|  |     def __str__(self): | ||||||
|  |         return self.__repr__() | ||||||
|  |  | ||||||
|  |     def __repr__(self): | ||||||
|  |         return ( | ||||||
|  |             f"<{type(self).__name__}: event_type={self.event_type}, " | ||||||
|  |             f"src_path={self.src_path!r}, is_directory={self.is_directory}>" | ||||||
|  |         ) | ||||||
|  |  | ||||||
|  |     # Used for comparison of events. | ||||||
|  |     @property | ||||||
|  |     def key(self): | ||||||
|  |         return (self.event_type, self.src_path, self.is_directory) | ||||||
|  |  | ||||||
|  |     def __eq__(self, event): | ||||||
|  |         return self.key == event.key | ||||||
|  |  | ||||||
|  |     def __ne__(self, event): | ||||||
|  |         return self.key != event.key | ||||||
|  |  | ||||||
|  |     def __hash__(self): | ||||||
|  |         return hash(self.key) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class FileSystemMovedEvent(FileSystemEvent): | ||||||
|  |     """ | ||||||
|  |     File system event representing any kind of file system movement. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     event_type = EVENT_TYPE_MOVED | ||||||
|  |  | ||||||
|  |     def __init__(self, src_path, dest_path): | ||||||
|  |         super().__init__(src_path) | ||||||
|  |         self._dest_path = dest_path | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def dest_path(self): | ||||||
|  |         """The destination path of the move event.""" | ||||||
|  |         return self._dest_path | ||||||
|  |  | ||||||
|  |     # Used for hashing this as an immutable object. | ||||||
|  |     @property | ||||||
|  |     def key(self): | ||||||
|  |         return (self.event_type, self.src_path, self.dest_path, self.is_directory) | ||||||
|  |  | ||||||
|  |     def __repr__(self): | ||||||
|  |         return ( | ||||||
|  |             f"<{type(self).__name__}: src_path={self.src_path!r}, " | ||||||
|  |             f"dest_path={self.dest_path!r}, is_directory={self.is_directory}>" | ||||||
|  |         ) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # File events. | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class FileDeletedEvent(FileSystemEvent): | ||||||
|  |     """File system event representing file deletion on the file system.""" | ||||||
|  |  | ||||||
|  |     event_type = EVENT_TYPE_DELETED | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class FileModifiedEvent(FileSystemEvent): | ||||||
|  |     """File system event representing file modification on the file system.""" | ||||||
|  |  | ||||||
|  |     event_type = EVENT_TYPE_MODIFIED | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class FileCreatedEvent(FileSystemEvent): | ||||||
|  |     """File system event representing file creation on the file system.""" | ||||||
|  |  | ||||||
|  |     event_type = EVENT_TYPE_CREATED | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class FileMovedEvent(FileSystemMovedEvent): | ||||||
|  |     """File system event representing file movement on the file system.""" | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class FileClosedEvent(FileSystemEvent): | ||||||
|  |     """File system event representing file close on the file system.""" | ||||||
|  |  | ||||||
|  |     event_type = EVENT_TYPE_CLOSED | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class FileOpenedEvent(FileSystemEvent): | ||||||
|  |     """File system event representing file close on the file system.""" | ||||||
|  |  | ||||||
|  |     event_type = EVENT_TYPE_OPENED | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Directory events. | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class DirDeletedEvent(FileSystemEvent): | ||||||
|  |     """File system event representing directory deletion on the file system.""" | ||||||
|  |  | ||||||
|  |     event_type = EVENT_TYPE_DELETED | ||||||
|  |     is_directory = True | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class DirModifiedEvent(FileSystemEvent): | ||||||
|  |     """ | ||||||
|  |     File system event representing directory modification on the file system. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     event_type = EVENT_TYPE_MODIFIED | ||||||
|  |     is_directory = True | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class DirCreatedEvent(FileSystemEvent): | ||||||
|  |     """File system event representing directory creation on the file system.""" | ||||||
|  |  | ||||||
|  |     event_type = EVENT_TYPE_CREATED | ||||||
|  |     is_directory = True | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class DirMovedEvent(FileSystemMovedEvent): | ||||||
|  |     """File system event representing directory movement on the file system.""" | ||||||
|  |  | ||||||
|  |     is_directory = True | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class FileSystemEventHandler: | ||||||
|  |     """ | ||||||
|  |     Base file system event handler that you can override methods from. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def dispatch(self, event): | ||||||
|  |         """Dispatches events to the appropriate methods. | ||||||
|  |  | ||||||
|  |         :param event: | ||||||
|  |             The event object representing the file system event. | ||||||
|  |         :type event: | ||||||
|  |             :class:`FileSystemEvent` | ||||||
|  |         """ | ||||||
|  |         self.on_any_event(event) | ||||||
|  |         { | ||||||
|  |             EVENT_TYPE_CREATED: self.on_created, | ||||||
|  |             EVENT_TYPE_DELETED: self.on_deleted, | ||||||
|  |             EVENT_TYPE_MODIFIED: self.on_modified, | ||||||
|  |             EVENT_TYPE_MOVED: self.on_moved, | ||||||
|  |             EVENT_TYPE_CLOSED: self.on_closed, | ||||||
|  |             EVENT_TYPE_OPENED: self.on_opened, | ||||||
|  |         }[event.event_type](event) | ||||||
|  |  | ||||||
|  |     def on_any_event(self, event): | ||||||
|  |         """Catch-all event handler. | ||||||
|  |  | ||||||
|  |         :param event: | ||||||
|  |             The event object representing the file system event. | ||||||
|  |         :type event: | ||||||
|  |             :class:`FileSystemEvent` | ||||||
|  |         """ | ||||||
|  |  | ||||||
|  |     def on_moved(self, event): | ||||||
|  |         """Called when a file or a directory is moved or renamed. | ||||||
|  |  | ||||||
|  |         :param event: | ||||||
|  |             Event representing file/directory movement. | ||||||
|  |         :type event: | ||||||
|  |             :class:`DirMovedEvent` or :class:`FileMovedEvent` | ||||||
|  |         """ | ||||||
|  |  | ||||||
|  |     def on_created(self, event): | ||||||
|  |         """Called when a file or directory is created. | ||||||
|  |  | ||||||
|  |         :param event: | ||||||
|  |             Event representing file/directory creation. | ||||||
|  |         :type event: | ||||||
|  |             :class:`DirCreatedEvent` or :class:`FileCreatedEvent` | ||||||
|  |         """ | ||||||
|  |  | ||||||
|  |     def on_deleted(self, event): | ||||||
|  |         """Called when a file or directory is deleted. | ||||||
|  |  | ||||||
|  |         :param event: | ||||||
|  |             Event representing file/directory deletion. | ||||||
|  |         :type event: | ||||||
|  |             :class:`DirDeletedEvent` or :class:`FileDeletedEvent` | ||||||
|  |         """ | ||||||
|  |  | ||||||
|  |     def on_modified(self, event): | ||||||
|  |         """Called when a file or directory is modified. | ||||||
|  |  | ||||||
|  |         :param event: | ||||||
|  |             Event representing file/directory modification. | ||||||
|  |         :type event: | ||||||
|  |             :class:`DirModifiedEvent` or :class:`FileModifiedEvent` | ||||||
|  |         """ | ||||||
|  |  | ||||||
|  |     def on_closed(self, event): | ||||||
|  |         """Called when a file opened for writing is closed. | ||||||
|  |  | ||||||
|  |         :param event: | ||||||
|  |             Event representing file closing. | ||||||
|  |         :type event: | ||||||
|  |             :class:`FileClosedEvent` | ||||||
|  |         """ | ||||||
|  |  | ||||||
|  |     def on_opened(self, event): | ||||||
|  |         """Called when a file is opened. | ||||||
|  |  | ||||||
|  |         :param event: | ||||||
|  |             Event representing file opening. | ||||||
|  |         :type event: | ||||||
|  |             :class:`FileOpenedEvent` | ||||||
|  |         """ | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class PatternMatchingEventHandler(FileSystemEventHandler): | ||||||
|  |     """ | ||||||
|  |     Matches given patterns with file paths associated with occurring events. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__( | ||||||
|  |         self, | ||||||
|  |         patterns=None, | ||||||
|  |         ignore_patterns=None, | ||||||
|  |         ignore_directories=False, | ||||||
|  |         case_sensitive=False, | ||||||
|  |     ): | ||||||
|  |         super().__init__() | ||||||
|  |  | ||||||
|  |         self._patterns = patterns | ||||||
|  |         self._ignore_patterns = ignore_patterns | ||||||
|  |         self._ignore_directories = ignore_directories | ||||||
|  |         self._case_sensitive = case_sensitive | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def patterns(self): | ||||||
|  |         """ | ||||||
|  |         (Read-only) | ||||||
|  |         Patterns to allow matching event paths. | ||||||
|  |         """ | ||||||
|  |         return self._patterns | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def ignore_patterns(self): | ||||||
|  |         """ | ||||||
|  |         (Read-only) | ||||||
|  |         Patterns to ignore matching event paths. | ||||||
|  |         """ | ||||||
|  |         return self._ignore_patterns | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def ignore_directories(self): | ||||||
|  |         """ | ||||||
|  |         (Read-only) | ||||||
|  |         ``True`` if directories should be ignored; ``False`` otherwise. | ||||||
|  |         """ | ||||||
|  |         return self._ignore_directories | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def case_sensitive(self): | ||||||
|  |         """ | ||||||
|  |         (Read-only) | ||||||
|  |         ``True`` if path names should be matched sensitive to case; ``False`` | ||||||
|  |         otherwise. | ||||||
|  |         """ | ||||||
|  |         return self._case_sensitive | ||||||
|  |  | ||||||
|  |     def dispatch(self, event): | ||||||
|  |         """Dispatches events to the appropriate methods. | ||||||
|  |  | ||||||
|  |         :param event: | ||||||
|  |             The event object representing the file system event. | ||||||
|  |         :type event: | ||||||
|  |             :class:`FileSystemEvent` | ||||||
|  |         """ | ||||||
|  |         if self.ignore_directories and event.is_directory: | ||||||
|  |             return | ||||||
|  |  | ||||||
|  |         paths = [] | ||||||
|  |         if hasattr(event, "dest_path"): | ||||||
|  |             paths.append(os.fsdecode(event.dest_path)) | ||||||
|  |         if event.src_path: | ||||||
|  |             paths.append(os.fsdecode(event.src_path)) | ||||||
|  |  | ||||||
|  |         if match_any_paths( | ||||||
|  |             paths, | ||||||
|  |             included_patterns=self.patterns, | ||||||
|  |             excluded_patterns=self.ignore_patterns, | ||||||
|  |             case_sensitive=self.case_sensitive, | ||||||
|  |         ): | ||||||
|  |             super().dispatch(event) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class RegexMatchingEventHandler(FileSystemEventHandler): | ||||||
|  |     """ | ||||||
|  |     Matches given regexes with file paths associated with occurring events. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__( | ||||||
|  |         self, | ||||||
|  |         regexes=None, | ||||||
|  |         ignore_regexes=None, | ||||||
|  |         ignore_directories=False, | ||||||
|  |         case_sensitive=False, | ||||||
|  |     ): | ||||||
|  |         super().__init__() | ||||||
|  |  | ||||||
|  |         if regexes is None: | ||||||
|  |             regexes = [r".*"] | ||||||
|  |         elif isinstance(regexes, str): | ||||||
|  |             regexes = [regexes] | ||||||
|  |         if ignore_regexes is None: | ||||||
|  |             ignore_regexes = [] | ||||||
|  |         if case_sensitive: | ||||||
|  |             self._regexes = [re.compile(r) for r in regexes] | ||||||
|  |             self._ignore_regexes = [re.compile(r) for r in ignore_regexes] | ||||||
|  |         else: | ||||||
|  |             self._regexes = [re.compile(r, re.I) for r in regexes] | ||||||
|  |             self._ignore_regexes = [re.compile(r, re.I) for r in ignore_regexes] | ||||||
|  |         self._ignore_directories = ignore_directories | ||||||
|  |         self._case_sensitive = case_sensitive | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def regexes(self): | ||||||
|  |         """ | ||||||
|  |         (Read-only) | ||||||
|  |         Regexes to allow matching event paths. | ||||||
|  |         """ | ||||||
|  |         return self._regexes | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def ignore_regexes(self): | ||||||
|  |         """ | ||||||
|  |         (Read-only) | ||||||
|  |         Regexes to ignore matching event paths. | ||||||
|  |         """ | ||||||
|  |         return self._ignore_regexes | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def ignore_directories(self): | ||||||
|  |         """ | ||||||
|  |         (Read-only) | ||||||
|  |         ``True`` if directories should be ignored; ``False`` otherwise. | ||||||
|  |         """ | ||||||
|  |         return self._ignore_directories | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def case_sensitive(self): | ||||||
|  |         """ | ||||||
|  |         (Read-only) | ||||||
|  |         ``True`` if path names should be matched sensitive to case; ``False`` | ||||||
|  |         otherwise. | ||||||
|  |         """ | ||||||
|  |         return self._case_sensitive | ||||||
|  |  | ||||||
|  |     def dispatch(self, event): | ||||||
|  |         """Dispatches events to the appropriate methods. | ||||||
|  |  | ||||||
|  |         :param event: | ||||||
|  |             The event object representing the file system event. | ||||||
|  |         :type event: | ||||||
|  |             :class:`FileSystemEvent` | ||||||
|  |         """ | ||||||
|  |         if self.ignore_directories and event.is_directory: | ||||||
|  |             return | ||||||
|  |  | ||||||
|  |         paths = [] | ||||||
|  |         if hasattr(event, "dest_path"): | ||||||
|  |             paths.append(os.fsdecode(event.dest_path)) | ||||||
|  |         if event.src_path: | ||||||
|  |             paths.append(os.fsdecode(event.src_path)) | ||||||
|  |  | ||||||
|  |         if any(r.match(p) for r in self.ignore_regexes for p in paths): | ||||||
|  |             return | ||||||
|  |  | ||||||
|  |         if any(r.match(p) for r in self.regexes for p in paths): | ||||||
|  |             super().dispatch(event) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class LoggingEventHandler(FileSystemEventHandler): | ||||||
|  |     """Logs all the events captured.""" | ||||||
|  |  | ||||||
|  |     def __init__(self, logger=None): | ||||||
|  |         super().__init__() | ||||||
|  |  | ||||||
|  |         self.logger = logger or logging.root | ||||||
|  |  | ||||||
|  |     def on_moved(self, event): | ||||||
|  |         super().on_moved(event) | ||||||
|  |  | ||||||
|  |         what = "directory" if event.is_directory else "file" | ||||||
|  |         self.logger.info( | ||||||
|  |             "Moved %s: from %s to %s", what, event.src_path, event.dest_path | ||||||
|  |         ) | ||||||
|  |  | ||||||
|  |     def on_created(self, event): | ||||||
|  |         super().on_created(event) | ||||||
|  |  | ||||||
|  |         what = "directory" if event.is_directory else "file" | ||||||
|  |         self.logger.info("Created %s: %s", what, event.src_path) | ||||||
|  |  | ||||||
|  |     def on_deleted(self, event): | ||||||
|  |         super().on_deleted(event) | ||||||
|  |  | ||||||
|  |         what = "directory" if event.is_directory else "file" | ||||||
|  |         self.logger.info("Deleted %s: %s", what, event.src_path) | ||||||
|  |  | ||||||
|  |     def on_modified(self, event): | ||||||
|  |         super().on_modified(event) | ||||||
|  |  | ||||||
|  |         what = "directory" if event.is_directory else "file" | ||||||
|  |         self.logger.info("Modified %s: %s", what, event.src_path) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def generate_sub_moved_events(src_dir_path, dest_dir_path): | ||||||
|  |     """Generates an event list of :class:`DirMovedEvent` and | ||||||
|  |     :class:`FileMovedEvent` objects for all the files and directories within | ||||||
|  |     the given moved directory that were moved along with the directory. | ||||||
|  |  | ||||||
|  |     :param src_dir_path: | ||||||
|  |         The source path of the moved directory. | ||||||
|  |     :param dest_dir_path: | ||||||
|  |         The destination path of the moved directory. | ||||||
|  |     :returns: | ||||||
|  |         An iterable of file system events of type :class:`DirMovedEvent` and | ||||||
|  |         :class:`FileMovedEvent`. | ||||||
|  |     """ | ||||||
|  |     for root, directories, filenames in os.walk(dest_dir_path): | ||||||
|  |         for directory in directories: | ||||||
|  |             full_path = os.path.join(root, directory) | ||||||
|  |             renamed_path = ( | ||||||
|  |                 full_path.replace(dest_dir_path, src_dir_path) if src_dir_path else None | ||||||
|  |             ) | ||||||
|  |             dir_moved_event = DirMovedEvent(renamed_path, full_path) | ||||||
|  |             dir_moved_event.is_synthetic = True | ||||||
|  |             yield dir_moved_event | ||||||
|  |         for filename in filenames: | ||||||
|  |             full_path = os.path.join(root, filename) | ||||||
|  |             renamed_path = ( | ||||||
|  |                 full_path.replace(dest_dir_path, src_dir_path) if src_dir_path else None | ||||||
|  |             ) | ||||||
|  |             file_moved_event = FileMovedEvent(renamed_path, full_path) | ||||||
|  |             file_moved_event.is_synthetic = True | ||||||
|  |             yield file_moved_event | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def generate_sub_created_events(src_dir_path): | ||||||
|  |     """Generates an event list of :class:`DirCreatedEvent` and | ||||||
|  |     :class:`FileCreatedEvent` objects for all the files and directories within | ||||||
|  |     the given moved directory that were moved along with the directory. | ||||||
|  |  | ||||||
|  |     :param src_dir_path: | ||||||
|  |         The source path of the created directory. | ||||||
|  |     :returns: | ||||||
|  |         An iterable of file system events of type :class:`DirCreatedEvent` and | ||||||
|  |         :class:`FileCreatedEvent`. | ||||||
|  |     """ | ||||||
|  |     for root, directories, filenames in os.walk(src_dir_path): | ||||||
|  |         for directory in directories: | ||||||
|  |             dir_created_event = DirCreatedEvent(os.path.join(root, directory)) | ||||||
|  |             dir_created_event.is_synthetic = True | ||||||
|  |             yield dir_created_event | ||||||
|  |         for filename in filenames: | ||||||
|  |             file_created_event = FileCreatedEvent(os.path.join(root, filename)) | ||||||
|  |             file_created_event.is_synthetic = True | ||||||
|  |             yield file_created_event | ||||||
							
								
								
									
										97
									
								
								src/libs/watchdog/observers/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										97
									
								
								src/libs/watchdog/observers/__init__.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,97 @@ | |||||||
|  | # Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com> | ||||||
|  | # Copyright 2012 Google, Inc & contributors. | ||||||
|  | # | ||||||
|  | # Licensed under the Apache License, Version 2.0 (the "License"); | ||||||
|  | # you may not use this file except in compliance with the License. | ||||||
|  | # You may obtain a copy of the License at | ||||||
|  | # | ||||||
|  | #     http://www.apache.org/licenses/LICENSE-2.0 | ||||||
|  | # | ||||||
|  | # Unless required by applicable law or agreed to in writing, software | ||||||
|  | # distributed under the License is distributed on an "AS IS" BASIS, | ||||||
|  | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||||
|  | # See the License for the specific language governing permissions and | ||||||
|  | # limitations under the License. | ||||||
|  |  | ||||||
|  | """ | ||||||
|  | :module: libs.watchdog.observers | ||||||
|  | :synopsis: Observer that picks a native implementation if available. | ||||||
|  | :author: yesudeep@google.com (Yesudeep Mangalapilly) | ||||||
|  | :author: contact@tiger-222.fr (Mickaël Schoentgen) | ||||||
|  |  | ||||||
|  | Classes | ||||||
|  | ======= | ||||||
|  | .. autoclass:: Observer | ||||||
|  |    :members: | ||||||
|  |    :show-inheritance: | ||||||
|  |    :inherited-members: | ||||||
|  |  | ||||||
|  | Observer thread that schedules watching directories and dispatches | ||||||
|  | calls to event handlers. | ||||||
|  |  | ||||||
|  | You can also import platform specific classes directly and use it instead | ||||||
|  | of :class:`Observer`.  Here is a list of implemented observer classes.: | ||||||
|  |  | ||||||
|  | ============== ================================ ============================== | ||||||
|  | Class          Platforms                        Note | ||||||
|  | ============== ================================ ============================== | ||||||
|  | |Inotify|      Linux 2.6.13+                    ``inotify(7)`` based observer | ||||||
|  | |FSEvents|     macOS                            FSEvents based observer | ||||||
|  | |Kqueue|       macOS and BSD with kqueue(2)     ``kqueue(2)`` based observer | ||||||
|  | |WinApi|       MS Windows                       Windows API-based observer | ||||||
|  | |Polling|      Any                              fallback implementation | ||||||
|  | ============== ================================ ============================== | ||||||
|  |  | ||||||
|  | .. |Inotify|     replace:: :class:`.inotify.InotifyObserver` | ||||||
|  | .. |FSEvents|    replace:: :class:`.fsevents.FSEventsObserver` | ||||||
|  | .. |Kqueue|      replace:: :class:`.kqueue.KqueueObserver` | ||||||
|  | .. |WinApi|      replace:: :class:`.read_directory_changes.WindowsApiObserver` | ||||||
|  | .. |Polling|     replace:: :class:`.polling.PollingObserver` | ||||||
|  |  | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | from __future__ import annotations | ||||||
|  |  | ||||||
|  | import sys | ||||||
|  | import warnings | ||||||
|  |  | ||||||
|  | from libs.watchdog.utils import UnsupportedLibc | ||||||
|  |  | ||||||
|  | from .api import BaseObserverSubclassCallable | ||||||
|  |  | ||||||
|  | Observer: BaseObserverSubclassCallable | ||||||
|  |  | ||||||
|  |  | ||||||
|  | if sys.platform.startswith("linux"): | ||||||
|  |     try: | ||||||
|  |         from .inotify import InotifyObserver as Observer | ||||||
|  |     except UnsupportedLibc: | ||||||
|  |         from .polling import PollingObserver as Observer | ||||||
|  |  | ||||||
|  | elif sys.platform.startswith("darwin"): | ||||||
|  |     try: | ||||||
|  |         from .fsevents import FSEventsObserver as Observer | ||||||
|  |     except Exception: | ||||||
|  |         try: | ||||||
|  |             from .kqueue import KqueueObserver as Observer | ||||||
|  |             warnings.warn("Failed to import fsevents. Fall back to kqueue") | ||||||
|  |         except Exception: | ||||||
|  |             from .polling import PollingObserver as Observer | ||||||
|  |  | ||||||
|  |             warnings.warn("Failed to import fsevents and kqueue. Fall back to polling.") | ||||||
|  |  | ||||||
|  | elif sys.platform in ("dragonfly", "freebsd", "netbsd", "openbsd", "bsd"): | ||||||
|  |     from .kqueue import KqueueObserver as Observer | ||||||
|  |  | ||||||
|  | elif sys.platform.startswith("win"): | ||||||
|  |     try: | ||||||
|  |         from .read_directory_changes import WindowsApiObserver as Observer | ||||||
|  |     except Exception: | ||||||
|  |         from .polling import PollingObserver as Observer | ||||||
|  |  | ||||||
|  |         warnings.warn("Failed to import read_directory_changes. Fall back to polling.") | ||||||
|  |  | ||||||
|  | else: | ||||||
|  |     from .polling import PollingObserver as Observer | ||||||
|  |  | ||||||
|  | __all__ = ["Observer"] | ||||||
							
								
								
									
										386
									
								
								src/libs/watchdog/observers/api.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										386
									
								
								src/libs/watchdog/observers/api.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,386 @@ | |||||||
|  | # Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com> | ||||||
|  | # Copyright 2012 Google, Inc & contributors. | ||||||
|  | # | ||||||
|  | # Licensed under the Apache License, Version 2.0 (the "License"); | ||||||
|  | # you may not use this file except in compliance with the License. | ||||||
|  | # You may obtain a copy of the License at | ||||||
|  | # | ||||||
|  | #     http://www.apache.org/licenses/LICENSE-2.0 | ||||||
|  | # | ||||||
|  | # Unless required by applicable law or agreed to in writing, software | ||||||
|  | # distributed under the License is distributed on an "AS IS" BASIS, | ||||||
|  | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||||
|  | # See the License for the specific language governing permissions and | ||||||
|  | # limitations under the License. | ||||||
|  |  | ||||||
|  | from __future__ import annotations | ||||||
|  |  | ||||||
|  | import queue | ||||||
|  | import threading | ||||||
|  | from pathlib import Path | ||||||
|  |  | ||||||
|  | from libs.watchdog.utils import BaseThread, Protocol | ||||||
|  | from libs.watchdog.utils.bricks import SkipRepeatsQueue | ||||||
|  |  | ||||||
|  | DEFAULT_EMITTER_TIMEOUT = 1  # in seconds. | ||||||
|  | DEFAULT_OBSERVER_TIMEOUT = 1  # in seconds. | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Collection classes | ||||||
|  | class EventQueue(SkipRepeatsQueue): | ||||||
|  |     """Thread-safe event queue based on a special queue that skips adding | ||||||
|  |     the same event (:class:`FileSystemEvent`) multiple times consecutively. | ||||||
|  |     Thus avoiding dispatching multiple event handling | ||||||
|  |     calls when multiple identical events are produced quicker than an observer | ||||||
|  |     can consume them. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class ObservedWatch: | ||||||
|  |     """An scheduled watch. | ||||||
|  |  | ||||||
|  |     :param path: | ||||||
|  |         Path string. | ||||||
|  |     :param recursive: | ||||||
|  |         ``True`` if watch is recursive; ``False`` otherwise. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self, path, recursive): | ||||||
|  |         if isinstance(path, Path): | ||||||
|  |             self._path = str(path) | ||||||
|  |         else: | ||||||
|  |             self._path = path | ||||||
|  |         self._is_recursive = recursive | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def path(self): | ||||||
|  |         """The path that this watch monitors.""" | ||||||
|  |         return self._path | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def is_recursive(self): | ||||||
|  |         """Determines whether subdirectories are watched for the path.""" | ||||||
|  |         return self._is_recursive | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def key(self): | ||||||
|  |         return self.path, self.is_recursive | ||||||
|  |  | ||||||
|  |     def __eq__(self, watch): | ||||||
|  |         return self.key == watch.key | ||||||
|  |  | ||||||
|  |     def __ne__(self, watch): | ||||||
|  |         return self.key != watch.key | ||||||
|  |  | ||||||
|  |     def __hash__(self): | ||||||
|  |         return hash(self.key) | ||||||
|  |  | ||||||
|  |     def __repr__(self): | ||||||
|  |         return f"<{type(self).__name__}: path={self.path!r}, is_recursive={self.is_recursive}>" | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Observer classes | ||||||
|  | class EventEmitter(BaseThread): | ||||||
|  |     """ | ||||||
|  |     Producer thread base class subclassed by event emitters | ||||||
|  |     that generate events and populate a queue with them. | ||||||
|  |  | ||||||
|  |     :param event_queue: | ||||||
|  |         The event queue to populate with generated events. | ||||||
|  |     :type event_queue: | ||||||
|  |         :class:`libs.watchdog.events.EventQueue` | ||||||
|  |     :param watch: | ||||||
|  |         The watch to observe and produce events for. | ||||||
|  |     :type watch: | ||||||
|  |         :class:`ObservedWatch` | ||||||
|  |     :param timeout: | ||||||
|  |         Timeout (in seconds) between successive attempts at reading events. | ||||||
|  |     :type timeout: | ||||||
|  |         ``float`` | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT): | ||||||
|  |         super().__init__() | ||||||
|  |         self._event_queue = event_queue | ||||||
|  |         self._watch = watch | ||||||
|  |         self._timeout = timeout | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def timeout(self): | ||||||
|  |         """ | ||||||
|  |         Blocking timeout for reading events. | ||||||
|  |         """ | ||||||
|  |         return self._timeout | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def watch(self): | ||||||
|  |         """ | ||||||
|  |         The watch associated with this emitter. | ||||||
|  |         """ | ||||||
|  |         return self._watch | ||||||
|  |  | ||||||
|  |     def queue_event(self, event): | ||||||
|  |         """ | ||||||
|  |         Queues a single event. | ||||||
|  |  | ||||||
|  |         :param event: | ||||||
|  |             Event to be queued. | ||||||
|  |         :type event: | ||||||
|  |             An instance of :class:`libs.watchdog.events.FileSystemEvent` | ||||||
|  |             or a subclass. | ||||||
|  |         """ | ||||||
|  |         self._event_queue.put((event, self.watch)) | ||||||
|  |  | ||||||
|  |     def queue_events(self, timeout): | ||||||
|  |         """Override this method to populate the event queue with events | ||||||
|  |         per interval period. | ||||||
|  |  | ||||||
|  |         :param timeout: | ||||||
|  |             Timeout (in seconds) between successive attempts at | ||||||
|  |             reading events. | ||||||
|  |         :type timeout: | ||||||
|  |             ``float`` | ||||||
|  |         """ | ||||||
|  |  | ||||||
|  |     def run(self): | ||||||
|  |         while self.should_keep_running(): | ||||||
|  |             self.queue_events(self.timeout) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class EventDispatcher(BaseThread): | ||||||
|  |     """ | ||||||
|  |     Consumer thread base class subclassed by event observer threads | ||||||
|  |     that dispatch events from an event queue to appropriate event handlers. | ||||||
|  |  | ||||||
|  |     :param timeout: | ||||||
|  |         Timeout value (in seconds) passed to emitters | ||||||
|  |         constructions in the child class BaseObserver. | ||||||
|  |     :type timeout: | ||||||
|  |         ``float`` | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     _stop_event = object() | ||||||
|  |     """Event inserted into the queue to signal a requested stop.""" | ||||||
|  |  | ||||||
|  |     def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT): | ||||||
|  |         super().__init__() | ||||||
|  |         self._event_queue = EventQueue() | ||||||
|  |         self._timeout = timeout | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def timeout(self): | ||||||
|  |         """Timeout value to construct emitters with.""" | ||||||
|  |         return self._timeout | ||||||
|  |  | ||||||
|  |     def stop(self): | ||||||
|  |         BaseThread.stop(self) | ||||||
|  |         try: | ||||||
|  |             self.event_queue.put_nowait(EventDispatcher._stop_event) | ||||||
|  |         except queue.Full: | ||||||
|  |             pass | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def event_queue(self): | ||||||
|  |         """The event queue which is populated with file system events | ||||||
|  |         by emitters and from which events are dispatched by a dispatcher | ||||||
|  |         thread.""" | ||||||
|  |         return self._event_queue | ||||||
|  |  | ||||||
|  |     def dispatch_events(self, event_queue): | ||||||
|  |         """Override this method to consume events from an event queue, blocking | ||||||
|  |         on the queue for the specified timeout before raising :class:`queue.Empty`. | ||||||
|  |  | ||||||
|  |         :param event_queue: | ||||||
|  |             Event queue to populate with one set of events. | ||||||
|  |         :type event_queue: | ||||||
|  |             :class:`EventQueue` | ||||||
|  |         :raises: | ||||||
|  |             :class:`queue.Empty` | ||||||
|  |         """ | ||||||
|  |  | ||||||
|  |     def run(self): | ||||||
|  |         while self.should_keep_running(): | ||||||
|  |             try: | ||||||
|  |                 self.dispatch_events(self.event_queue) | ||||||
|  |             except queue.Empty: | ||||||
|  |                 continue | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class BaseObserver(EventDispatcher): | ||||||
|  |     """Base observer.""" | ||||||
|  |  | ||||||
|  |     def __init__(self, emitter_class, timeout=DEFAULT_OBSERVER_TIMEOUT): | ||||||
|  |         super().__init__(timeout) | ||||||
|  |         self._emitter_class = emitter_class | ||||||
|  |         self._lock = threading.RLock() | ||||||
|  |         self._watches = set() | ||||||
|  |         self._handlers = dict() | ||||||
|  |         self._emitters = set() | ||||||
|  |         self._emitter_for_watch = dict() | ||||||
|  |  | ||||||
|  |     def _add_emitter(self, emitter): | ||||||
|  |         self._emitter_for_watch[emitter.watch] = emitter | ||||||
|  |         self._emitters.add(emitter) | ||||||
|  |  | ||||||
|  |     def _remove_emitter(self, emitter): | ||||||
|  |         del self._emitter_for_watch[emitter.watch] | ||||||
|  |         self._emitters.remove(emitter) | ||||||
|  |         emitter.stop() | ||||||
|  |         try: | ||||||
|  |             emitter.join() | ||||||
|  |         except RuntimeError: | ||||||
|  |             pass | ||||||
|  |  | ||||||
|  |     def _clear_emitters(self): | ||||||
|  |         for emitter in self._emitters: | ||||||
|  |             emitter.stop() | ||||||
|  |         for emitter in self._emitters: | ||||||
|  |             try: | ||||||
|  |                 emitter.join() | ||||||
|  |             except RuntimeError: | ||||||
|  |                 pass | ||||||
|  |         self._emitters.clear() | ||||||
|  |         self._emitter_for_watch.clear() | ||||||
|  |  | ||||||
|  |     def _add_handler_for_watch(self, event_handler, watch): | ||||||
|  |         if watch not in self._handlers: | ||||||
|  |             self._handlers[watch] = set() | ||||||
|  |         self._handlers[watch].add(event_handler) | ||||||
|  |  | ||||||
|  |     def _remove_handlers_for_watch(self, watch): | ||||||
|  |         del self._handlers[watch] | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def emitters(self): | ||||||
|  |         """Returns event emitter created by this observer.""" | ||||||
|  |         return self._emitters | ||||||
|  |  | ||||||
|  |     def start(self): | ||||||
|  |         for emitter in self._emitters.copy(): | ||||||
|  |             try: | ||||||
|  |                 emitter.start() | ||||||
|  |             except Exception: | ||||||
|  |                 self._remove_emitter(emitter) | ||||||
|  |                 raise | ||||||
|  |         super().start() | ||||||
|  |  | ||||||
|  |     def schedule(self, event_handler, path, recursive=False): | ||||||
|  |         """ | ||||||
|  |         Schedules watching a path and calls appropriate methods specified | ||||||
|  |         in the given event handler in response to file system events. | ||||||
|  |  | ||||||
|  |         :param event_handler: | ||||||
|  |             An event handler instance that has appropriate event handling | ||||||
|  |             methods which will be called by the observer in response to | ||||||
|  |             file system events. | ||||||
|  |         :type event_handler: | ||||||
|  |             :class:`libs.watchdog.events.FileSystemEventHandler` or a subclass | ||||||
|  |         :param path: | ||||||
|  |             Directory path that will be monitored. | ||||||
|  |         :type path: | ||||||
|  |             ``str`` | ||||||
|  |         :param recursive: | ||||||
|  |             ``True`` if events will be emitted for sub-directories | ||||||
|  |             traversed recursively; ``False`` otherwise. | ||||||
|  |         :type recursive: | ||||||
|  |             ``bool`` | ||||||
|  |         :return: | ||||||
|  |             An :class:`ObservedWatch` object instance representing | ||||||
|  |             a watch. | ||||||
|  |         """ | ||||||
|  |         with self._lock: | ||||||
|  |             watch = ObservedWatch(path, recursive) | ||||||
|  |             self._add_handler_for_watch(event_handler, watch) | ||||||
|  |  | ||||||
|  |             # If we don't have an emitter for this watch already, create it. | ||||||
|  |             if self._emitter_for_watch.get(watch) is None: | ||||||
|  |                 emitter = self._emitter_class( | ||||||
|  |                     event_queue=self.event_queue, watch=watch, timeout=self.timeout | ||||||
|  |                 ) | ||||||
|  |                 if self.is_alive(): | ||||||
|  |                     emitter.start() | ||||||
|  |                 self._add_emitter(emitter) | ||||||
|  |             self._watches.add(watch) | ||||||
|  |         return watch | ||||||
|  |  | ||||||
|  |     def add_handler_for_watch(self, event_handler, watch): | ||||||
|  |         """Adds a handler for the given watch. | ||||||
|  |  | ||||||
|  |         :param event_handler: | ||||||
|  |             An event handler instance that has appropriate event handling | ||||||
|  |             methods which will be called by the observer in response to | ||||||
|  |             file system events. | ||||||
|  |         :type event_handler: | ||||||
|  |             :class:`libs.watchdog.events.FileSystemEventHandler` or a subclass | ||||||
|  |         :param watch: | ||||||
|  |             The watch to add a handler for. | ||||||
|  |         :type watch: | ||||||
|  |             An instance of :class:`ObservedWatch` or a subclass of | ||||||
|  |             :class:`ObservedWatch` | ||||||
|  |         """ | ||||||
|  |         with self._lock: | ||||||
|  |             self._add_handler_for_watch(event_handler, watch) | ||||||
|  |  | ||||||
|  |     def remove_handler_for_watch(self, event_handler, watch): | ||||||
|  |         """Removes a handler for the given watch. | ||||||
|  |  | ||||||
|  |         :param event_handler: | ||||||
|  |             An event handler instance that has appropriate event handling | ||||||
|  |             methods which will be called by the observer in response to | ||||||
|  |             file system events. | ||||||
|  |         :type event_handler: | ||||||
|  |             :class:`libs.watchdog.events.FileSystemEventHandler` or a subclass | ||||||
|  |         :param watch: | ||||||
|  |             The watch to remove a handler for. | ||||||
|  |         :type watch: | ||||||
|  |             An instance of :class:`ObservedWatch` or a subclass of | ||||||
|  |             :class:`ObservedWatch` | ||||||
|  |         """ | ||||||
|  |         with self._lock: | ||||||
|  |             self._handlers[watch].remove(event_handler) | ||||||
|  |  | ||||||
|  |     def unschedule(self, watch): | ||||||
|  |         """Unschedules a watch. | ||||||
|  |  | ||||||
|  |         :param watch: | ||||||
|  |             The watch to unschedule. | ||||||
|  |         :type watch: | ||||||
|  |             An instance of :class:`ObservedWatch` or a subclass of | ||||||
|  |             :class:`ObservedWatch` | ||||||
|  |         """ | ||||||
|  |         with self._lock: | ||||||
|  |             emitter = self._emitter_for_watch[watch] | ||||||
|  |             del self._handlers[watch] | ||||||
|  |             self._remove_emitter(emitter) | ||||||
|  |             self._watches.remove(watch) | ||||||
|  |  | ||||||
|  |     def unschedule_all(self): | ||||||
|  |         """Unschedules all watches and detaches all associated event | ||||||
|  |         handlers.""" | ||||||
|  |         with self._lock: | ||||||
|  |             self._handlers.clear() | ||||||
|  |             self._clear_emitters() | ||||||
|  |             self._watches.clear() | ||||||
|  |  | ||||||
|  |     def on_thread_stop(self): | ||||||
|  |         self.unschedule_all() | ||||||
|  |  | ||||||
|  |     def dispatch_events(self, event_queue): | ||||||
|  |         entry = event_queue.get(block=True) | ||||||
|  |         if entry is EventDispatcher._stop_event: | ||||||
|  |             return | ||||||
|  |         event, watch = entry | ||||||
|  |  | ||||||
|  |         with self._lock: | ||||||
|  |             # To allow unschedule/stop and safe removal of event handlers | ||||||
|  |             # within event handlers itself, check if the handler is still | ||||||
|  |             # registered after every dispatch. | ||||||
|  |             for handler in list(self._handlers.get(watch, [])): | ||||||
|  |                 if handler in self._handlers.get(watch, []): | ||||||
|  |                     handler.dispatch(event) | ||||||
|  |         event_queue.task_done() | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class BaseObserverSubclassCallable(Protocol): | ||||||
|  |     def __call__(self, timeout: float = ...) -> BaseObserver: | ||||||
|  |         ... | ||||||
							
								
								
									
										354
									
								
								src/libs/watchdog/observers/fsevents.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										354
									
								
								src/libs/watchdog/observers/fsevents.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,354 @@ | |||||||
|  | # Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com> | ||||||
|  | # Copyright 2012 Google, Inc & contributors. | ||||||
|  | # | ||||||
|  | # Licensed under the Apache License, Version 2.0 (the "License"); | ||||||
|  | # you may not use this file except in compliance with the License. | ||||||
|  | # You may obtain a copy of the License at | ||||||
|  | # | ||||||
|  | #     http://www.apache.org/licenses/LICENSE-2.0 | ||||||
|  | # | ||||||
|  | # Unless required by applicable law or agreed to in writing, software | ||||||
|  | # distributed under the License is distributed on an "AS IS" BASIS, | ||||||
|  | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||||
|  | # See the License for the specific language governing permissions and | ||||||
|  | # limitations under the License. | ||||||
|  |  | ||||||
|  | """ | ||||||
|  | :module: libs.watchdog.observers.fsevents | ||||||
|  | :synopsis: FSEvents based emitter implementation. | ||||||
|  | :author: yesudeep@google.com (Yesudeep Mangalapilly) | ||||||
|  | :author: contact@tiger-222.fr (Mickaël Schoentgen) | ||||||
|  | :platforms: macOS | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | from __future__ import annotations | ||||||
|  |  | ||||||
|  | import logging | ||||||
|  | import os | ||||||
|  | import threading | ||||||
|  | import time | ||||||
|  | import unicodedata | ||||||
|  |  | ||||||
|  | import _libs.watchdog_fsevents as _fsevents  # type: ignore[import] | ||||||
|  |  | ||||||
|  | from libs.watchdog.events import ( | ||||||
|  |     DirCreatedEvent, | ||||||
|  |     DirDeletedEvent, | ||||||
|  |     DirModifiedEvent, | ||||||
|  |     DirMovedEvent, | ||||||
|  |     FileCreatedEvent, | ||||||
|  |     FileDeletedEvent, | ||||||
|  |     FileModifiedEvent, | ||||||
|  |     FileMovedEvent, | ||||||
|  |     generate_sub_created_events, | ||||||
|  |     generate_sub_moved_events, | ||||||
|  | ) | ||||||
|  | from libs.watchdog.observers.api import DEFAULT_EMITTER_TIMEOUT, DEFAULT_OBSERVER_TIMEOUT, BaseObserver, EventEmitter | ||||||
|  | from libs.watchdog.utils.dirsnapshot import DirectorySnapshot | ||||||
|  |  | ||||||
|  | logger = logging.getLogger("fsevents") | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class FSEventsEmitter(EventEmitter): | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |     macOS FSEvents Emitter class. | ||||||
|  |  | ||||||
|  |     :param event_queue: | ||||||
|  |         The event queue to fill with events. | ||||||
|  |     :param watch: | ||||||
|  |         A watch object representing the directory to monitor. | ||||||
|  |     :type watch: | ||||||
|  |         :class:`libs.watchdog.observers.api.ObservedWatch` | ||||||
|  |     :param timeout: | ||||||
|  |         Read events blocking timeout (in seconds). | ||||||
|  |     :param suppress_history: | ||||||
|  |         The FSEvents API may emit historic events up to 30 sec before the watch was | ||||||
|  |         started. When ``suppress_history`` is ``True``, those events will be suppressed | ||||||
|  |         by creating a directory snapshot of the watched path before starting the stream | ||||||
|  |         as a reference to suppress old events. Warning: This may result in significant | ||||||
|  |         memory usage in case of a large number of items in the watched path. | ||||||
|  |     :type timeout: | ||||||
|  |         ``float`` | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__( | ||||||
|  |         self, | ||||||
|  |         event_queue, | ||||||
|  |         watch, | ||||||
|  |         timeout=DEFAULT_EMITTER_TIMEOUT, | ||||||
|  |         suppress_history=False, | ||||||
|  |     ): | ||||||
|  |         super().__init__(event_queue, watch, timeout) | ||||||
|  |         self._fs_view = set() | ||||||
|  |         self.suppress_history = suppress_history | ||||||
|  |         self._start_time = 0.0 | ||||||
|  |         self._starting_state = None | ||||||
|  |         self._lock = threading.Lock() | ||||||
|  |         self._absolute_watch_path = os.path.realpath( | ||||||
|  |             os.path.abspath(os.path.expanduser(self.watch.path)) | ||||||
|  |         ) | ||||||
|  |  | ||||||
|  |     def on_thread_stop(self): | ||||||
|  |         _fsevents.remove_watch(self.watch) | ||||||
|  |         _fsevents.stop(self) | ||||||
|  |  | ||||||
|  |     def queue_event(self, event): | ||||||
|  |         # fsevents defaults to be recursive, so if the watch was meant to be non-recursive then we need to drop | ||||||
|  |         # all the events here which do not have a src_path / dest_path that matches the watched path | ||||||
|  |         if self._watch.is_recursive: | ||||||
|  |             logger.debug("queue_event %s", event) | ||||||
|  |             EventEmitter.queue_event(self, event) | ||||||
|  |         else: | ||||||
|  |             if not self._is_recursive_event(event): | ||||||
|  |                 logger.debug("queue_event %s", event) | ||||||
|  |                 EventEmitter.queue_event(self, event) | ||||||
|  |             else: | ||||||
|  |                 logger.debug("drop event %s", event) | ||||||
|  |  | ||||||
|  |     def _is_recursive_event(self, event): | ||||||
|  |         src_path = ( | ||||||
|  |             event.src_path if event.is_directory else os.path.dirname(event.src_path) | ||||||
|  |         ) | ||||||
|  |         if src_path == self._absolute_watch_path: | ||||||
|  |             return False | ||||||
|  |  | ||||||
|  |         if isinstance(event, (FileMovedEvent, DirMovedEvent)): | ||||||
|  |             # when moving something into the watch path we must always take the dirname, | ||||||
|  |             # otherwise we miss out on `DirMovedEvent`s | ||||||
|  |             dest_path = os.path.dirname(event.dest_path) | ||||||
|  |             if dest_path == self._absolute_watch_path: | ||||||
|  |                 return False | ||||||
|  |  | ||||||
|  |         return True | ||||||
|  |  | ||||||
|  |     def _queue_created_event(self, event, src_path, dirname): | ||||||
|  |         cls = DirCreatedEvent if event.is_directory else FileCreatedEvent | ||||||
|  |         self.queue_event(cls(src_path)) | ||||||
|  |         self.queue_event(DirModifiedEvent(dirname)) | ||||||
|  |  | ||||||
|  |     def _queue_deleted_event(self, event, src_path, dirname): | ||||||
|  |         cls = DirDeletedEvent if event.is_directory else FileDeletedEvent | ||||||
|  |         self.queue_event(cls(src_path)) | ||||||
|  |         self.queue_event(DirModifiedEvent(dirname)) | ||||||
|  |  | ||||||
|  |     def _queue_modified_event(self, event, src_path, dirname): | ||||||
|  |         cls = DirModifiedEvent if event.is_directory else FileModifiedEvent | ||||||
|  |         self.queue_event(cls(src_path)) | ||||||
|  |  | ||||||
|  |     def _queue_renamed_event( | ||||||
|  |         self, src_event, src_path, dst_path, src_dirname, dst_dirname | ||||||
|  |     ): | ||||||
|  |         cls = DirMovedEvent if src_event.is_directory else FileMovedEvent | ||||||
|  |         dst_path = self._encode_path(dst_path) | ||||||
|  |         self.queue_event(cls(src_path, dst_path)) | ||||||
|  |         self.queue_event(DirModifiedEvent(src_dirname)) | ||||||
|  |         self.queue_event(DirModifiedEvent(dst_dirname)) | ||||||
|  |  | ||||||
|  |     def _is_historic_created_event(self, event): | ||||||
|  |         # We only queue a created event if the item was created after we | ||||||
|  |         # started the FSEventsStream. | ||||||
|  |  | ||||||
|  |         in_history = event.inode in self._fs_view | ||||||
|  |  | ||||||
|  |         if self._starting_state: | ||||||
|  |             try: | ||||||
|  |                 old_inode = self._starting_state.inode(event.path)[0] | ||||||
|  |                 before_start = old_inode == event.inode | ||||||
|  |             except KeyError: | ||||||
|  |                 before_start = False | ||||||
|  |         else: | ||||||
|  |             before_start = False | ||||||
|  |  | ||||||
|  |         return in_history or before_start | ||||||
|  |  | ||||||
|  |     @staticmethod | ||||||
|  |     def _is_meta_mod(event): | ||||||
|  |         """Returns True if the event indicates a change in metadata.""" | ||||||
|  |         return event.is_inode_meta_mod or event.is_xattr_mod or event.is_owner_change | ||||||
|  |  | ||||||
|  |     def queue_events(self, timeout, events): | ||||||
|  |         if logger.getEffectiveLevel() <= logging.DEBUG: | ||||||
|  |             for event in events: | ||||||
|  |                 flags = ", ".join( | ||||||
|  |                     attr for attr in dir(event) if getattr(event, attr) is True | ||||||
|  |                 ) | ||||||
|  |                 logger.debug(f"{event}: {flags}") | ||||||
|  |  | ||||||
|  |         if time.monotonic() - self._start_time > 60: | ||||||
|  |             # Event history is no longer needed, let's free some memory. | ||||||
|  |             self._starting_state = None | ||||||
|  |  | ||||||
|  |         while events: | ||||||
|  |             event = events.pop(0) | ||||||
|  |  | ||||||
|  |             src_path = self._encode_path(event.path) | ||||||
|  |             src_dirname = os.path.dirname(src_path) | ||||||
|  |  | ||||||
|  |             try: | ||||||
|  |                 stat = os.stat(src_path) | ||||||
|  |             except OSError: | ||||||
|  |                 stat = None | ||||||
|  |  | ||||||
|  |             exists = stat and stat.st_ino == event.inode | ||||||
|  |  | ||||||
|  |             # FSevents may coalesce multiple events for the same item + path into a | ||||||
|  |             # single event. However, events are never coalesced for different items at | ||||||
|  |             # the same path or for the same item at different paths. Therefore, the | ||||||
|  |             # event chains "removed -> created" and "created -> renamed -> removed" will | ||||||
|  |             # never emit a single native event and a deleted event *always* means that | ||||||
|  |             # the item no longer existed at the end of the event chain. | ||||||
|  |  | ||||||
|  |             # Some events will have a spurious `is_created` flag set, coalesced from an | ||||||
|  |             # already emitted and processed CreatedEvent. To filter those, we keep track | ||||||
|  |             # of all inodes which we know to be already created. This is safer than | ||||||
|  |             # keeping track of paths since paths are more likely to be reused than | ||||||
|  |             # inodes. | ||||||
|  |  | ||||||
|  |             # Likewise, some events will have a spurious `is_modified`, | ||||||
|  |             # `is_inode_meta_mod` or `is_xattr_mod` flag set. We currently do not | ||||||
|  |             # suppress those but could do so if the item still exists by caching the | ||||||
|  |             # stat result and verifying that it did change. | ||||||
|  |  | ||||||
|  |             if event.is_created and event.is_removed: | ||||||
|  |                 # Events will only be coalesced for the same item / inode. | ||||||
|  |                 # The sequence deleted -> created therefore cannot occur. | ||||||
|  |                 # Any combination with renamed cannot occur either. | ||||||
|  |  | ||||||
|  |                 if not self._is_historic_created_event(event): | ||||||
|  |                     self._queue_created_event(event, src_path, src_dirname) | ||||||
|  |  | ||||||
|  |                 self._fs_view.add(event.inode) | ||||||
|  |  | ||||||
|  |                 if event.is_modified or self._is_meta_mod(event): | ||||||
|  |                     self._queue_modified_event(event, src_path, src_dirname) | ||||||
|  |  | ||||||
|  |                 self._queue_deleted_event(event, src_path, src_dirname) | ||||||
|  |                 self._fs_view.discard(event.inode) | ||||||
|  |  | ||||||
|  |             else: | ||||||
|  |                 if event.is_created and not self._is_historic_created_event(event): | ||||||
|  |                     self._queue_created_event(event, src_path, src_dirname) | ||||||
|  |  | ||||||
|  |                 self._fs_view.add(event.inode) | ||||||
|  |  | ||||||
|  |                 if event.is_modified or self._is_meta_mod(event): | ||||||
|  |                     self._queue_modified_event(event, src_path, src_dirname) | ||||||
|  |  | ||||||
|  |                 if event.is_renamed: | ||||||
|  |                     # Check if we have a corresponding destination event in the watched path. | ||||||
|  |                     dst_event = next( | ||||||
|  |                         iter( | ||||||
|  |                             e for e in events if e.is_renamed and e.inode == event.inode | ||||||
|  |                         ), | ||||||
|  |                         None, | ||||||
|  |                     ) | ||||||
|  |  | ||||||
|  |                     if dst_event: | ||||||
|  |                         # Item was moved within the watched folder. | ||||||
|  |                         logger.debug("Destination event for rename is %s", dst_event) | ||||||
|  |  | ||||||
|  |                         dst_path = self._encode_path(dst_event.path) | ||||||
|  |                         dst_dirname = os.path.dirname(dst_path) | ||||||
|  |  | ||||||
|  |                         self._queue_renamed_event( | ||||||
|  |                             event, src_path, dst_path, src_dirname, dst_dirname | ||||||
|  |                         ) | ||||||
|  |                         self._fs_view.add(event.inode) | ||||||
|  |  | ||||||
|  |                         for sub_event in generate_sub_moved_events(src_path, dst_path): | ||||||
|  |                             self.queue_event(sub_event) | ||||||
|  |  | ||||||
|  |                         # Process any coalesced flags for the dst_event. | ||||||
|  |  | ||||||
|  |                         events.remove(dst_event) | ||||||
|  |  | ||||||
|  |                         if dst_event.is_modified or self._is_meta_mod(dst_event): | ||||||
|  |                             self._queue_modified_event(dst_event, dst_path, dst_dirname) | ||||||
|  |  | ||||||
|  |                         if dst_event.is_removed: | ||||||
|  |                             self._queue_deleted_event(dst_event, dst_path, dst_dirname) | ||||||
|  |                             self._fs_view.discard(dst_event.inode) | ||||||
|  |  | ||||||
|  |                     elif exists: | ||||||
|  |                         # This is the destination event, item was moved into the watched | ||||||
|  |                         # folder. | ||||||
|  |                         self._queue_created_event(event, src_path, src_dirname) | ||||||
|  |                         self._fs_view.add(event.inode) | ||||||
|  |  | ||||||
|  |                         for sub_event in generate_sub_created_events(src_path): | ||||||
|  |                             self.queue_event(sub_event) | ||||||
|  |  | ||||||
|  |                     else: | ||||||
|  |                         # This is the source event, item was moved out of the watched | ||||||
|  |                         # folder. | ||||||
|  |                         self._queue_deleted_event(event, src_path, src_dirname) | ||||||
|  |                         self._fs_view.discard(event.inode) | ||||||
|  |  | ||||||
|  |                         # Skip further coalesced processing. | ||||||
|  |                         continue | ||||||
|  |  | ||||||
|  |                 if event.is_removed: | ||||||
|  |                     # Won't occur together with renamed. | ||||||
|  |                     self._queue_deleted_event(event, src_path, src_dirname) | ||||||
|  |                     self._fs_view.discard(event.inode) | ||||||
|  |  | ||||||
|  |             if event.is_root_changed: | ||||||
|  |                 # This will be set if root or any of its parents is renamed or deleted. | ||||||
|  |                 # TODO: find out new path and generate DirMovedEvent? | ||||||
|  |                 self.queue_event(DirDeletedEvent(self.watch.path)) | ||||||
|  |                 logger.debug("Stopping because root path was changed") | ||||||
|  |                 self.stop() | ||||||
|  |  | ||||||
|  |                 self._fs_view.clear() | ||||||
|  |  | ||||||
|  |     def events_callback(self, paths, inodes, flags, ids): | ||||||
|  |         """Callback passed to FSEventStreamCreate(), it will receive all | ||||||
|  |         FS events and queue them. | ||||||
|  |         """ | ||||||
|  |         cls = _fsevents.NativeEvent | ||||||
|  |         try: | ||||||
|  |             events = [ | ||||||
|  |                 cls(path, inode, event_flags, event_id) | ||||||
|  |                 for path, inode, event_flags, event_id in zip(paths, inodes, flags, ids) | ||||||
|  |             ] | ||||||
|  |             with self._lock: | ||||||
|  |                 self.queue_events(self.timeout, events) | ||||||
|  |         except Exception: | ||||||
|  |             logger.exception("Unhandled exception in fsevents callback") | ||||||
|  |  | ||||||
|  |     def run(self): | ||||||
|  |         self.pathnames = [self.watch.path] | ||||||
|  |         self._start_time = time.monotonic() | ||||||
|  |         try: | ||||||
|  |             _fsevents.add_watch(self, self.watch, self.events_callback, self.pathnames) | ||||||
|  |             _fsevents.read_events(self) | ||||||
|  |         except Exception: | ||||||
|  |             logger.exception("Unhandled exception in FSEventsEmitter") | ||||||
|  |  | ||||||
|  |     def on_thread_start(self): | ||||||
|  |         if self.suppress_history: | ||||||
|  |             if isinstance(self.watch.path, bytes): | ||||||
|  |                 watch_path = os.fsdecode(self.watch.path) | ||||||
|  |             else: | ||||||
|  |                 watch_path = self.watch.path | ||||||
|  |  | ||||||
|  |             self._starting_state = DirectorySnapshot(watch_path) | ||||||
|  |  | ||||||
|  |     def _encode_path(self, path): | ||||||
|  |         """Encode path only if bytes were passed to this emitter.""" | ||||||
|  |         if isinstance(self.watch.path, bytes): | ||||||
|  |             return os.fsencode(path) | ||||||
|  |         return path | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class FSEventsObserver(BaseObserver): | ||||||
|  |     def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT): | ||||||
|  |         super().__init__(emitter_class=FSEventsEmitter, timeout=timeout) | ||||||
|  |  | ||||||
|  |     def schedule(self, event_handler, path, recursive=False): | ||||||
|  |         # Fix for issue #26: Trace/BPT error when given a unicode path | ||||||
|  |         # string. https://github.com/gorakhargosh/libs.watchdog/issues#issue/26 | ||||||
|  |         if isinstance(path, str): | ||||||
|  |             path = unicodedata.normalize("NFC", path) | ||||||
|  |         return BaseObserver.schedule(self, event_handler, path, recursive) | ||||||
							
								
								
									
										262
									
								
								src/libs/watchdog/observers/fsevents2.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										262
									
								
								src/libs/watchdog/observers/fsevents2.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,262 @@ | |||||||
|  | # Copyright 2014 Thomas Amland <thomas.amland@gmail.com> | ||||||
|  | # | ||||||
|  | # Licensed under the Apache License, Version 2.0 (the "License"); | ||||||
|  | # you may not use this file except in compliance with the License. | ||||||
|  | # You may obtain a copy of the License at | ||||||
|  | # | ||||||
|  | #     http://www.apache.org/licenses/LICENSE-2.0 | ||||||
|  | # | ||||||
|  | # Unless required by applicable law or agreed to in writing, software | ||||||
|  | # distributed under the License is distributed on an "AS IS" BASIS, | ||||||
|  | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||||
|  | # See the License for the specific language governing permissions and | ||||||
|  | # limitations under the License. | ||||||
|  |  | ||||||
|  | """ | ||||||
|  | :module: libs.watchdog.observers.fsevents2 | ||||||
|  | :synopsis: FSEvents based emitter implementation. | ||||||
|  | :platforms: macOS | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | from __future__ import annotations | ||||||
|  |  | ||||||
|  | import logging | ||||||
|  | import os | ||||||
|  | import queue | ||||||
|  | import unicodedata | ||||||
|  | import warnings | ||||||
|  | from threading import Thread | ||||||
|  | from typing import List, Optional, Type | ||||||
|  |  | ||||||
|  | # pyobjc | ||||||
|  | import AppKit  # type: ignore[import] | ||||||
|  | from FSEvents import (  # type: ignore[import] | ||||||
|  |     CFRunLoopGetCurrent, | ||||||
|  |     CFRunLoopRun, | ||||||
|  |     CFRunLoopStop, | ||||||
|  |     FSEventStreamCreate, | ||||||
|  |     FSEventStreamInvalidate, | ||||||
|  |     FSEventStreamRelease, | ||||||
|  |     FSEventStreamScheduleWithRunLoop, | ||||||
|  |     FSEventStreamStart, | ||||||
|  |     FSEventStreamStop, | ||||||
|  |     kCFAllocatorDefault, | ||||||
|  |     kCFRunLoopDefaultMode, | ||||||
|  |     kFSEventStreamCreateFlagFileEvents, | ||||||
|  |     kFSEventStreamCreateFlagNoDefer, | ||||||
|  |     kFSEventStreamEventFlagItemChangeOwner, | ||||||
|  |     kFSEventStreamEventFlagItemCreated, | ||||||
|  |     kFSEventStreamEventFlagItemFinderInfoMod, | ||||||
|  |     kFSEventStreamEventFlagItemInodeMetaMod, | ||||||
|  |     kFSEventStreamEventFlagItemIsDir, | ||||||
|  |     kFSEventStreamEventFlagItemIsSymlink, | ||||||
|  |     kFSEventStreamEventFlagItemModified, | ||||||
|  |     kFSEventStreamEventFlagItemRemoved, | ||||||
|  |     kFSEventStreamEventFlagItemRenamed, | ||||||
|  |     kFSEventStreamEventFlagItemXattrMod, | ||||||
|  |     kFSEventStreamEventIdSinceNow, | ||||||
|  | ) | ||||||
|  |  | ||||||
|  | from libs.watchdog.events import ( | ||||||
|  |     DirCreatedEvent, | ||||||
|  |     DirDeletedEvent, | ||||||
|  |     DirModifiedEvent, | ||||||
|  |     DirMovedEvent, | ||||||
|  |     FileCreatedEvent, | ||||||
|  |     FileDeletedEvent, | ||||||
|  |     FileModifiedEvent, | ||||||
|  |     FileMovedEvent, | ||||||
|  |     FileSystemEvent, | ||||||
|  | ) | ||||||
|  | from libs.watchdog.observers.api import DEFAULT_EMITTER_TIMEOUT, DEFAULT_OBSERVER_TIMEOUT, BaseObserver, EventEmitter | ||||||
|  |  | ||||||
|  | logger = logging.getLogger(__name__) | ||||||
|  |  | ||||||
|  | message = "libs.watchdog.observers.fsevents2 is deprecated and will be removed in a future release." | ||||||
|  | warnings.warn(message, DeprecationWarning) | ||||||
|  | logger.warning(message) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class FSEventsQueue(Thread): | ||||||
|  |     """Low level FSEvents client.""" | ||||||
|  |  | ||||||
|  |     def __init__(self, path): | ||||||
|  |         Thread.__init__(self) | ||||||
|  |         self._queue: queue.Queue[Optional[List[NativeEvent]]] = queue.Queue() | ||||||
|  |         self._run_loop = None | ||||||
|  |  | ||||||
|  |         if isinstance(path, bytes): | ||||||
|  |             path = os.fsdecode(path) | ||||||
|  |         self._path = unicodedata.normalize("NFC", path) | ||||||
|  |  | ||||||
|  |         context = None | ||||||
|  |         latency = 1.0 | ||||||
|  |         self._stream_ref = FSEventStreamCreate( | ||||||
|  |             kCFAllocatorDefault, | ||||||
|  |             self._callback, | ||||||
|  |             context, | ||||||
|  |             [self._path], | ||||||
|  |             kFSEventStreamEventIdSinceNow, | ||||||
|  |             latency, | ||||||
|  |             kFSEventStreamCreateFlagNoDefer | kFSEventStreamCreateFlagFileEvents, | ||||||
|  |         ) | ||||||
|  |         if self._stream_ref is None: | ||||||
|  |             raise OSError("FSEvents. Could not create stream.") | ||||||
|  |  | ||||||
|  |     def run(self): | ||||||
|  |         pool = AppKit.NSAutoreleasePool.alloc().init() | ||||||
|  |         self._run_loop = CFRunLoopGetCurrent() | ||||||
|  |         FSEventStreamScheduleWithRunLoop( | ||||||
|  |             self._stream_ref, self._run_loop, kCFRunLoopDefaultMode | ||||||
|  |         ) | ||||||
|  |         if not FSEventStreamStart(self._stream_ref): | ||||||
|  |             FSEventStreamInvalidate(self._stream_ref) | ||||||
|  |             FSEventStreamRelease(self._stream_ref) | ||||||
|  |             raise OSError("FSEvents. Could not start stream.") | ||||||
|  |  | ||||||
|  |         CFRunLoopRun() | ||||||
|  |         FSEventStreamStop(self._stream_ref) | ||||||
|  |         FSEventStreamInvalidate(self._stream_ref) | ||||||
|  |         FSEventStreamRelease(self._stream_ref) | ||||||
|  |         del pool | ||||||
|  |         # Make sure waiting thread is notified | ||||||
|  |         self._queue.put(None) | ||||||
|  |  | ||||||
|  |     def stop(self): | ||||||
|  |         if self._run_loop is not None: | ||||||
|  |             CFRunLoopStop(self._run_loop) | ||||||
|  |  | ||||||
|  |     def _callback( | ||||||
|  |         self, streamRef, clientCallBackInfo, numEvents, eventPaths, eventFlags, eventIDs | ||||||
|  |     ): | ||||||
|  |         events = [ | ||||||
|  |             NativeEvent(path, flags, _id) | ||||||
|  |             for path, flags, _id in zip(eventPaths, eventFlags, eventIDs) | ||||||
|  |         ] | ||||||
|  |         logger.debug(f"FSEvents callback. Got {numEvents} events:") | ||||||
|  |         for e in events: | ||||||
|  |             logger.debug(e) | ||||||
|  |         self._queue.put(events) | ||||||
|  |  | ||||||
|  |     def read_events(self): | ||||||
|  |         """ | ||||||
|  |         Returns a list or one or more events, or None if there are no more | ||||||
|  |         events to be read. | ||||||
|  |         """ | ||||||
|  |         if not self.is_alive(): | ||||||
|  |             return None | ||||||
|  |         return self._queue.get() | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class NativeEvent: | ||||||
|  |     def __init__(self, path, flags, event_id): | ||||||
|  |         self.path = path | ||||||
|  |         self.flags = flags | ||||||
|  |         self.event_id = event_id | ||||||
|  |         self.is_created = bool(flags & kFSEventStreamEventFlagItemCreated) | ||||||
|  |         self.is_removed = bool(flags & kFSEventStreamEventFlagItemRemoved) | ||||||
|  |         self.is_renamed = bool(flags & kFSEventStreamEventFlagItemRenamed) | ||||||
|  |         self.is_modified = bool(flags & kFSEventStreamEventFlagItemModified) | ||||||
|  |         self.is_change_owner = bool(flags & kFSEventStreamEventFlagItemChangeOwner) | ||||||
|  |         self.is_inode_meta_mod = bool(flags & kFSEventStreamEventFlagItemInodeMetaMod) | ||||||
|  |         self.is_finder_info_mod = bool(flags & kFSEventStreamEventFlagItemFinderInfoMod) | ||||||
|  |         self.is_xattr_mod = bool(flags & kFSEventStreamEventFlagItemXattrMod) | ||||||
|  |         self.is_symlink = bool(flags & kFSEventStreamEventFlagItemIsSymlink) | ||||||
|  |         self.is_directory = bool(flags & kFSEventStreamEventFlagItemIsDir) | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def _event_type(self): | ||||||
|  |         if self.is_created: | ||||||
|  |             return "Created" | ||||||
|  |         if self.is_removed: | ||||||
|  |             return "Removed" | ||||||
|  |         if self.is_renamed: | ||||||
|  |             return "Renamed" | ||||||
|  |         if self.is_modified: | ||||||
|  |             return "Modified" | ||||||
|  |         if self.is_inode_meta_mod: | ||||||
|  |             return "InodeMetaMod" | ||||||
|  |         if self.is_xattr_mod: | ||||||
|  |             return "XattrMod" | ||||||
|  |         return "Unknown" | ||||||
|  |  | ||||||
|  |     def __repr__(self): | ||||||
|  |         return ( | ||||||
|  |             f"<{type(self).__name__}: path={self.path!r}, type={self._event_type}," | ||||||
|  |             f" is_dir={self.is_directory}, flags={hex(self.flags)}, id={self.event_id}>" | ||||||
|  |         ) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class FSEventsEmitter(EventEmitter): | ||||||
|  |     """ | ||||||
|  |     FSEvents based event emitter. Handles conversion of native events. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT): | ||||||
|  |         super().__init__(event_queue, watch, timeout) | ||||||
|  |         self._fsevents = FSEventsQueue(watch.path) | ||||||
|  |         self._fsevents.start() | ||||||
|  |  | ||||||
|  |     def on_thread_stop(self): | ||||||
|  |         self._fsevents.stop() | ||||||
|  |  | ||||||
|  |     def queue_events(self, timeout): | ||||||
|  |         events = self._fsevents.read_events() | ||||||
|  |         if events is None: | ||||||
|  |             return | ||||||
|  |         i = 0 | ||||||
|  |         while i < len(events): | ||||||
|  |             event = events[i] | ||||||
|  |  | ||||||
|  |             cls: Type[FileSystemEvent] | ||||||
|  |             # For some reason the create and remove flags are sometimes also | ||||||
|  |             # set for rename and modify type events, so let those take | ||||||
|  |             # precedence. | ||||||
|  |             if event.is_renamed: | ||||||
|  |                 # Internal moves appears to always be consecutive in the same | ||||||
|  |                 # buffer and have IDs differ by exactly one (while others | ||||||
|  |                 # don't) making it possible to pair up the two events coming | ||||||
|  |                 # from a single move operation. (None of this is documented!) | ||||||
|  |                 # Otherwise, guess whether file was moved in or out. | ||||||
|  |                 # TODO: handle id wrapping | ||||||
|  |                 if ( | ||||||
|  |                     i + 1 < len(events) | ||||||
|  |                     and events[i + 1].is_renamed | ||||||
|  |                     and events[i + 1].event_id == event.event_id + 1 | ||||||
|  |                 ): | ||||||
|  |                     cls = DirMovedEvent if event.is_directory else FileMovedEvent | ||||||
|  |                     self.queue_event(cls(event.path, events[i + 1].path)) | ||||||
|  |                     self.queue_event(DirModifiedEvent(os.path.dirname(event.path))) | ||||||
|  |                     self.queue_event( | ||||||
|  |                         DirModifiedEvent(os.path.dirname(events[i + 1].path)) | ||||||
|  |                     ) | ||||||
|  |                     i += 1 | ||||||
|  |                 elif os.path.exists(event.path): | ||||||
|  |                     cls = DirCreatedEvent if event.is_directory else FileCreatedEvent | ||||||
|  |                     self.queue_event(cls(event.path)) | ||||||
|  |                     self.queue_event(DirModifiedEvent(os.path.dirname(event.path))) | ||||||
|  |                 else: | ||||||
|  |                     cls = DirDeletedEvent if event.is_directory else FileDeletedEvent | ||||||
|  |                     self.queue_event(cls(event.path)) | ||||||
|  |                     self.queue_event(DirModifiedEvent(os.path.dirname(event.path))) | ||||||
|  |                 # TODO: generate events for tree | ||||||
|  |  | ||||||
|  |             elif event.is_modified or event.is_inode_meta_mod or event.is_xattr_mod: | ||||||
|  |                 cls = DirModifiedEvent if event.is_directory else FileModifiedEvent | ||||||
|  |                 self.queue_event(cls(event.path)) | ||||||
|  |  | ||||||
|  |             elif event.is_created: | ||||||
|  |                 cls = DirCreatedEvent if event.is_directory else FileCreatedEvent | ||||||
|  |                 self.queue_event(cls(event.path)) | ||||||
|  |                 self.queue_event(DirModifiedEvent(os.path.dirname(event.path))) | ||||||
|  |  | ||||||
|  |             elif event.is_removed: | ||||||
|  |                 cls = DirDeletedEvent if event.is_directory else FileDeletedEvent | ||||||
|  |                 self.queue_event(cls(event.path)) | ||||||
|  |                 self.queue_event(DirModifiedEvent(os.path.dirname(event.path))) | ||||||
|  |             i += 1 | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class FSEventsObserver2(BaseObserver): | ||||||
|  |     def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT): | ||||||
|  |         super().__init__(emitter_class=FSEventsEmitter, timeout=timeout) | ||||||
							
								
								
									
										238
									
								
								src/libs/watchdog/observers/inotify.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										238
									
								
								src/libs/watchdog/observers/inotify.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,238 @@ | |||||||
|  | # Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com> | ||||||
|  | # Copyright 2012 Google, Inc & contributors. | ||||||
|  | # | ||||||
|  | # Licensed under the Apache License, Version 2.0 (the "License"); | ||||||
|  | # you may not use this file except in compliance with the License. | ||||||
|  | # You may obtain a copy of the License at | ||||||
|  | # | ||||||
|  | #     http://www.apache.org/licenses/LICENSE-2.0 | ||||||
|  | # | ||||||
|  | # Unless required by applicable law or agreed to in writing, software | ||||||
|  | # distributed under the License is distributed on an "AS IS" BASIS, | ||||||
|  | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||||
|  | # See the License for the specific language governing permissions and | ||||||
|  | # limitations under the License. | ||||||
|  |  | ||||||
|  | """ | ||||||
|  | :module: libs.watchdog.observers.inotify | ||||||
|  | :synopsis: ``inotify(7)`` based emitter implementation. | ||||||
|  | :author: Sebastien Martini <seb@dbzteam.org> | ||||||
|  | :author: Luke McCarthy <luke@iogopro.co.uk> | ||||||
|  | :author: yesudeep@google.com (Yesudeep Mangalapilly) | ||||||
|  | :author: Tim Cuthbertson <tim+github@gfxmonk.net> | ||||||
|  | :platforms: Linux 2.6.13+. | ||||||
|  |  | ||||||
|  | .. ADMONITION:: About system requirements | ||||||
|  |  | ||||||
|  |     Recommended minimum kernel version: 2.6.25. | ||||||
|  |  | ||||||
|  |     Quote from the inotify(7) man page: | ||||||
|  |  | ||||||
|  |         "Inotify was merged into the 2.6.13 Linux kernel. The required library | ||||||
|  |         interfaces were added to glibc in version 2.4. (IN_DONT_FOLLOW, | ||||||
|  |         IN_MASK_ADD, and IN_ONLYDIR were only added in version 2.5.)" | ||||||
|  |  | ||||||
|  |     Therefore, you must ensure the system is running at least these versions | ||||||
|  |     appropriate libraries and the kernel. | ||||||
|  |  | ||||||
|  | .. ADMONITION:: About recursiveness, event order, and event coalescing | ||||||
|  |  | ||||||
|  |     Quote from the inotify(7) man page: | ||||||
|  |  | ||||||
|  |         If successive output inotify events produced on the inotify file | ||||||
|  |         descriptor are identical (same wd, mask, cookie, and name) then they | ||||||
|  |         are coalesced into a single event if the older event has not yet been | ||||||
|  |         read (but see BUGS). | ||||||
|  |  | ||||||
|  |         The events returned by reading from an inotify file descriptor form | ||||||
|  |         an ordered queue. Thus, for example, it is guaranteed that when | ||||||
|  |         renaming from one directory to another, events will be produced in | ||||||
|  |         the correct order on the inotify file descriptor. | ||||||
|  |  | ||||||
|  |         ... | ||||||
|  |  | ||||||
|  |         Inotify monitoring of directories is not recursive: to monitor | ||||||
|  |         subdirectories under a directory, additional watches must be created. | ||||||
|  |  | ||||||
|  |     This emitter implementation therefore automatically adds watches for | ||||||
|  |     sub-directories if running in recursive mode. | ||||||
|  |  | ||||||
|  | Some extremely useful articles and documentation: | ||||||
|  |  | ||||||
|  | .. _inotify FAQ: http://inotify.aiken.cz/?section=inotify&page=faq&lang=en | ||||||
|  | .. _intro to inotify: http://www.linuxjournal.com/article/8478 | ||||||
|  |  | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | from __future__ import annotations | ||||||
|  |  | ||||||
|  | import logging | ||||||
|  | import os | ||||||
|  | import threading | ||||||
|  | from typing import Type | ||||||
|  |  | ||||||
|  | from libs.watchdog.events import ( | ||||||
|  |     DirCreatedEvent, | ||||||
|  |     DirDeletedEvent, | ||||||
|  |     DirModifiedEvent, | ||||||
|  |     DirMovedEvent, | ||||||
|  |     FileClosedEvent, | ||||||
|  |     FileCreatedEvent, | ||||||
|  |     FileDeletedEvent, | ||||||
|  |     FileModifiedEvent, | ||||||
|  |     FileMovedEvent, | ||||||
|  |     FileOpenedEvent, | ||||||
|  |     FileSystemEvent, | ||||||
|  |     generate_sub_created_events, | ||||||
|  |     generate_sub_moved_events, | ||||||
|  | ) | ||||||
|  | from libs.watchdog.observers.api import DEFAULT_EMITTER_TIMEOUT, DEFAULT_OBSERVER_TIMEOUT, BaseObserver, EventEmitter | ||||||
|  |  | ||||||
|  | from .inotify_buffer import InotifyBuffer | ||||||
|  |  | ||||||
|  | logger = logging.getLogger(__name__) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class InotifyEmitter(EventEmitter): | ||||||
|  |     """ | ||||||
|  |     inotify(7)-based event emitter. | ||||||
|  |  | ||||||
|  |     :param event_queue: | ||||||
|  |         The event queue to fill with events. | ||||||
|  |     :param watch: | ||||||
|  |         A watch object representing the directory to monitor. | ||||||
|  |     :type watch: | ||||||
|  |         :class:`libs.watchdog.observers.api.ObservedWatch` | ||||||
|  |     :param timeout: | ||||||
|  |         Read events blocking timeout (in seconds). | ||||||
|  |     :type timeout: | ||||||
|  |         ``float`` | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT): | ||||||
|  |         super().__init__(event_queue, watch, timeout) | ||||||
|  |         self._lock = threading.Lock() | ||||||
|  |         self._inotify = None | ||||||
|  |  | ||||||
|  |     def on_thread_start(self): | ||||||
|  |         path = os.fsencode(self.watch.path) | ||||||
|  |         self._inotify = InotifyBuffer(path, self.watch.is_recursive) | ||||||
|  |  | ||||||
|  |     def on_thread_stop(self): | ||||||
|  |         if self._inotify: | ||||||
|  |             self._inotify.close() | ||||||
|  |             self._inotify = None | ||||||
|  |  | ||||||
|  |     def queue_events(self, timeout, full_events=False): | ||||||
|  |         # If "full_events" is true, then the method will report unmatched move events as separate events | ||||||
|  |         # This behavior is by default only called by a InotifyFullEmitter | ||||||
|  |         if self._inotify is None: | ||||||
|  |             logger.error("InotifyEmitter.queue_events() called when the thread is inactive") | ||||||
|  |             return | ||||||
|  |         with self._lock: | ||||||
|  |             if self._inotify is None: | ||||||
|  |                 logger.error("InotifyEmitter.queue_events() called when the thread is inactive") | ||||||
|  |                 return | ||||||
|  |             event = self._inotify.read_event() | ||||||
|  |             if event is None: | ||||||
|  |                 return | ||||||
|  |  | ||||||
|  |             cls: Type[FileSystemEvent] | ||||||
|  |             if isinstance(event, tuple): | ||||||
|  |                 move_from, move_to = event | ||||||
|  |                 src_path = self._decode_path(move_from.src_path) | ||||||
|  |                 dest_path = self._decode_path(move_to.src_path) | ||||||
|  |                 cls = DirMovedEvent if move_from.is_directory else FileMovedEvent | ||||||
|  |                 self.queue_event(cls(src_path, dest_path)) | ||||||
|  |                 self.queue_event(DirModifiedEvent(os.path.dirname(src_path))) | ||||||
|  |                 self.queue_event(DirModifiedEvent(os.path.dirname(dest_path))) | ||||||
|  |                 if move_from.is_directory and self.watch.is_recursive: | ||||||
|  |                     for sub_event in generate_sub_moved_events(src_path, dest_path): | ||||||
|  |                         self.queue_event(sub_event) | ||||||
|  |                 return | ||||||
|  |  | ||||||
|  |             src_path = self._decode_path(event.src_path) | ||||||
|  |             if event.is_moved_to: | ||||||
|  |                 if full_events: | ||||||
|  |                     cls = DirMovedEvent if event.is_directory else FileMovedEvent | ||||||
|  |                     self.queue_event(cls(None, src_path)) | ||||||
|  |                 else: | ||||||
|  |                     cls = DirCreatedEvent if event.is_directory else FileCreatedEvent | ||||||
|  |                     self.queue_event(cls(src_path)) | ||||||
|  |                 self.queue_event(DirModifiedEvent(os.path.dirname(src_path))) | ||||||
|  |                 if event.is_directory and self.watch.is_recursive: | ||||||
|  |                     for sub_event in generate_sub_created_events(src_path): | ||||||
|  |                         self.queue_event(sub_event) | ||||||
|  |             elif event.is_attrib: | ||||||
|  |                 cls = DirModifiedEvent if event.is_directory else FileModifiedEvent | ||||||
|  |                 self.queue_event(cls(src_path)) | ||||||
|  |             elif event.is_modify: | ||||||
|  |                 cls = DirModifiedEvent if event.is_directory else FileModifiedEvent | ||||||
|  |                 self.queue_event(cls(src_path)) | ||||||
|  |             elif event.is_delete or (event.is_moved_from and not full_events): | ||||||
|  |                 cls = DirDeletedEvent if event.is_directory else FileDeletedEvent | ||||||
|  |                 self.queue_event(cls(src_path)) | ||||||
|  |                 self.queue_event(DirModifiedEvent(os.path.dirname(src_path))) | ||||||
|  |             elif event.is_moved_from and full_events: | ||||||
|  |                 cls = DirMovedEvent if event.is_directory else FileMovedEvent | ||||||
|  |                 self.queue_event(cls(src_path, None)) | ||||||
|  |                 self.queue_event(DirModifiedEvent(os.path.dirname(src_path))) | ||||||
|  |             elif event.is_create: | ||||||
|  |                 cls = DirCreatedEvent if event.is_directory else FileCreatedEvent | ||||||
|  |                 self.queue_event(cls(src_path)) | ||||||
|  |                 self.queue_event(DirModifiedEvent(os.path.dirname(src_path))) | ||||||
|  |             elif event.is_close_write and not event.is_directory: | ||||||
|  |                 cls = FileClosedEvent | ||||||
|  |                 self.queue_event(cls(src_path)) | ||||||
|  |                 self.queue_event(DirModifiedEvent(os.path.dirname(src_path))) | ||||||
|  |             elif event.is_open and not event.is_directory: | ||||||
|  |                 cls = FileOpenedEvent | ||||||
|  |                 self.queue_event(cls(src_path)) | ||||||
|  |             # elif event.is_close_nowrite and not event.is_directory: | ||||||
|  |             #     cls = FileClosedEvent | ||||||
|  |             #     self.queue_event(cls(src_path)) | ||||||
|  |             elif event.is_delete_self and src_path == self.watch.path: | ||||||
|  |                 cls = DirDeletedEvent if event.is_directory else FileDeletedEvent | ||||||
|  |                 self.queue_event(cls(src_path)) | ||||||
|  |                 self.stop() | ||||||
|  |  | ||||||
|  |     def _decode_path(self, path): | ||||||
|  |         """Decode path only if unicode string was passed to this emitter.""" | ||||||
|  |         if isinstance(self.watch.path, bytes): | ||||||
|  |             return path | ||||||
|  |         return os.fsdecode(path) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class InotifyFullEmitter(InotifyEmitter): | ||||||
|  |     """ | ||||||
|  |     inotify(7)-based event emitter. By default this class produces move events even if they are not matched | ||||||
|  |     Such move events will have a ``None`` value for the unmatched part. | ||||||
|  |  | ||||||
|  |     :param event_queue: | ||||||
|  |         The event queue to fill with events. | ||||||
|  |     :param watch: | ||||||
|  |         A watch object representing the directory to monitor. | ||||||
|  |     :type watch: | ||||||
|  |         :class:`libs.watchdog.observers.api.ObservedWatch` | ||||||
|  |     :param timeout: | ||||||
|  |         Read events blocking timeout (in seconds). | ||||||
|  |     :type timeout: | ||||||
|  |         ``float`` | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT): | ||||||
|  |         super().__init__(event_queue, watch, timeout) | ||||||
|  |  | ||||||
|  |     def queue_events(self, timeout, events=True): | ||||||
|  |         InotifyEmitter.queue_events(self, timeout, full_events=events) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class InotifyObserver(BaseObserver): | ||||||
|  |     """ | ||||||
|  |     Observer thread that schedules watching directories and dispatches | ||||||
|  |     calls to event handlers. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT, generate_full_events=False): | ||||||
|  |         cls = InotifyFullEmitter if generate_full_events else InotifyEmitter | ||||||
|  |         super().__init__(emitter_class=cls, timeout=timeout) | ||||||
							
								
								
									
										118
									
								
								src/libs/watchdog/observers/inotify_buffer.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										118
									
								
								src/libs/watchdog/observers/inotify_buffer.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,118 @@ | |||||||
|  | # Copyright 2014 Thomas Amland <thomas.amland@gmail.com> | ||||||
|  | # | ||||||
|  | # Licensed under the Apache License, Version 2.0 (the "License"); | ||||||
|  | # you may not use this file except in compliance with the License. | ||||||
|  | # You may obtain a copy of the License at | ||||||
|  | # | ||||||
|  | #     http://www.apache.org/licenses/LICENSE-2.0 | ||||||
|  | # | ||||||
|  | # Unless required by applicable law or agreed to in writing, software | ||||||
|  | # distributed under the License is distributed on an "AS IS" BASIS, | ||||||
|  | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||||
|  | # See the License for the specific language governing permissions and | ||||||
|  | # limitations under the License. | ||||||
|  |  | ||||||
|  | from __future__ import annotations | ||||||
|  |  | ||||||
|  | import logging | ||||||
|  | from typing import TYPE_CHECKING, List, Tuple, Union | ||||||
|  |  | ||||||
|  | from libs.watchdog.observers.inotify_c import Inotify, InotifyEvent | ||||||
|  | from libs.watchdog.utils import BaseThread | ||||||
|  | from libs.watchdog.utils.delayed_queue import DelayedQueue | ||||||
|  |  | ||||||
|  | logger = logging.getLogger(__name__) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class InotifyBuffer(BaseThread): | ||||||
|  |     """A wrapper for `Inotify` that holds events for `delay` seconds. During | ||||||
|  |     this time, IN_MOVED_FROM and IN_MOVED_TO events are paired. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     delay = 0.5 | ||||||
|  |  | ||||||
|  |     def __init__(self, path, recursive=False): | ||||||
|  |         super().__init__() | ||||||
|  |         self._queue = DelayedQueue[InotifyEvent](self.delay) | ||||||
|  |         self._inotify = Inotify(path, recursive) | ||||||
|  |         self.start() | ||||||
|  |  | ||||||
|  |     def read_event(self): | ||||||
|  |         """Returns a single event or a tuple of from/to events in case of a | ||||||
|  |         paired move event. If this buffer has been closed, immediately return | ||||||
|  |         None. | ||||||
|  |         """ | ||||||
|  |         return self._queue.get() | ||||||
|  |  | ||||||
|  |     def on_thread_stop(self): | ||||||
|  |         self._inotify.close() | ||||||
|  |         self._queue.close() | ||||||
|  |  | ||||||
|  |     def close(self): | ||||||
|  |         self.stop() | ||||||
|  |         self.join() | ||||||
|  |  | ||||||
|  |     def _group_events(self, event_list): | ||||||
|  |         """Group any matching move events""" | ||||||
|  |         grouped: List[Union[InotifyEvent, Tuple[InotifyEvent, InotifyEvent]]] = [] | ||||||
|  |         for inotify_event in event_list: | ||||||
|  |             logger.debug("in-event %s", inotify_event) | ||||||
|  |  | ||||||
|  |             def matching_from_event(event): | ||||||
|  |                 return ( | ||||||
|  |                     not isinstance(event, tuple) | ||||||
|  |                     and event.is_moved_from | ||||||
|  |                     and event.cookie == inotify_event.cookie | ||||||
|  |                 ) | ||||||
|  |  | ||||||
|  |             if inotify_event.is_moved_to: | ||||||
|  |                 # Check if move_from is already in the buffer | ||||||
|  |                 for index, event in enumerate(grouped): | ||||||
|  |                     if matching_from_event(event): | ||||||
|  |                         if TYPE_CHECKING: | ||||||
|  |                             # this check is hidden from mypy inside matching_from_event() | ||||||
|  |                             assert not isinstance(event, tuple) | ||||||
|  |                         grouped[index] = (event, inotify_event) | ||||||
|  |                         break | ||||||
|  |                 else: | ||||||
|  |                     # Check if move_from is in delayqueue already | ||||||
|  |                     from_event = self._queue.remove(matching_from_event) | ||||||
|  |                     if from_event is not None: | ||||||
|  |                         grouped.append((from_event, inotify_event)) | ||||||
|  |                     else: | ||||||
|  |                         logger.debug("could not find matching move_from event") | ||||||
|  |                         grouped.append(inotify_event) | ||||||
|  |             else: | ||||||
|  |                 grouped.append(inotify_event) | ||||||
|  |         return grouped | ||||||
|  |  | ||||||
|  |     def run(self): | ||||||
|  |         """Read event from `inotify` and add them to `queue`. When reading a | ||||||
|  |         IN_MOVE_TO event, remove the previous added matching IN_MOVE_FROM event | ||||||
|  |         and add them back to the queue as a tuple. | ||||||
|  |         """ | ||||||
|  |         deleted_self = False | ||||||
|  |         while self.should_keep_running() and not deleted_self: | ||||||
|  |             inotify_events = self._inotify.read_events() | ||||||
|  |             grouped_events = self._group_events(inotify_events) | ||||||
|  |             for inotify_event in grouped_events: | ||||||
|  |                 if not isinstance(inotify_event, tuple) and inotify_event.is_ignored: | ||||||
|  |                     if inotify_event.src_path == self._inotify.path: | ||||||
|  |                         # Watch was removed explicitly (inotify_rm_watch(2)) or automatically (file | ||||||
|  |                         # was deleted, or filesystem was unmounted), stop watching for events | ||||||
|  |                         deleted_self = True | ||||||
|  |                     continue | ||||||
|  |  | ||||||
|  |                 # Only add delay for unmatched move_from events | ||||||
|  |                 delay = ( | ||||||
|  |                     not isinstance(inotify_event, tuple) and inotify_event.is_moved_from | ||||||
|  |                 ) | ||||||
|  |                 self._queue.put(inotify_event, delay) | ||||||
|  |  | ||||||
|  |                 if ( | ||||||
|  |                     not isinstance(inotify_event, tuple) | ||||||
|  |                     and inotify_event.is_delete_self | ||||||
|  |                     and inotify_event.src_path == self._inotify.path | ||||||
|  |                 ): | ||||||
|  |                     # Deleted the watched directory, stop watching for events | ||||||
|  |                     deleted_self = True | ||||||
							
								
								
									
										602
									
								
								src/libs/watchdog/observers/inotify_c.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										602
									
								
								src/libs/watchdog/observers/inotify_c.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,602 @@ | |||||||
|  | # Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com> | ||||||
|  | # Copyright 2012 Google, Inc & contributors. | ||||||
|  | # | ||||||
|  | # Licensed under the Apache License, Version 2.0 (the "License"); | ||||||
|  | # you may not use this file except in compliance with the License. | ||||||
|  | # You may obtain a copy of the License at | ||||||
|  | # | ||||||
|  | #     http://www.apache.org/licenses/LICENSE-2.0 | ||||||
|  | # | ||||||
|  | # Unless required by applicable law or agreed to in writing, software | ||||||
|  | # distributed under the License is distributed on an "AS IS" BASIS, | ||||||
|  | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||||
|  | # See the License for the specific language governing permissions and | ||||||
|  | # limitations under the License. | ||||||
|  |  | ||||||
|  | from __future__ import annotations | ||||||
|  |  | ||||||
|  | import ctypes | ||||||
|  | import ctypes.util | ||||||
|  | import errno | ||||||
|  | import os | ||||||
|  | import struct | ||||||
|  | import threading | ||||||
|  | from ctypes import c_char_p, c_int, c_uint32 | ||||||
|  | from functools import reduce | ||||||
|  |  | ||||||
|  | from libs.watchdog.utils import UnsupportedLibc | ||||||
|  |  | ||||||
|  | libc = ctypes.CDLL(None) | ||||||
|  |  | ||||||
|  | if ( | ||||||
|  |     not hasattr(libc, "inotify_init") | ||||||
|  |     or not hasattr(libc, "inotify_add_watch") | ||||||
|  |     or not hasattr(libc, "inotify_rm_watch") | ||||||
|  | ): | ||||||
|  |     raise UnsupportedLibc(f"Unsupported libc version found: {libc._name}") | ||||||
|  |  | ||||||
|  | inotify_add_watch = ctypes.CFUNCTYPE(c_int, c_int, c_char_p, c_uint32, use_errno=True)( | ||||||
|  |     ("inotify_add_watch", libc) | ||||||
|  | ) | ||||||
|  |  | ||||||
|  | inotify_rm_watch = ctypes.CFUNCTYPE(c_int, c_int, c_uint32, use_errno=True)( | ||||||
|  |     ("inotify_rm_watch", libc) | ||||||
|  | ) | ||||||
|  |  | ||||||
|  | inotify_init = ctypes.CFUNCTYPE(c_int, use_errno=True)(("inotify_init", libc)) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class InotifyConstants: | ||||||
|  |     # User-space events | ||||||
|  |     IN_ACCESS = 0x00000001  # File was accessed. | ||||||
|  |     IN_MODIFY = 0x00000002  # File was modified. | ||||||
|  |     IN_ATTRIB = 0x00000004  # Meta-data changed. | ||||||
|  |     IN_CLOSE_WRITE = 0x00000008  # Writable file was closed. | ||||||
|  |     IN_CLOSE_NOWRITE = 0x00000010  # Unwritable file closed. | ||||||
|  |     IN_OPEN = 0x00000020  # File was opened. | ||||||
|  |     IN_MOVED_FROM = 0x00000040  # File was moved from X. | ||||||
|  |     IN_MOVED_TO = 0x00000080  # File was moved to Y. | ||||||
|  |     IN_CREATE = 0x00000100  # Subfile was created. | ||||||
|  |     IN_DELETE = 0x00000200  # Subfile was deleted. | ||||||
|  |     IN_DELETE_SELF = 0x00000400  # Self was deleted. | ||||||
|  |     IN_MOVE_SELF = 0x00000800  # Self was moved. | ||||||
|  |  | ||||||
|  |     # Helper user-space events. | ||||||
|  |     IN_CLOSE = IN_CLOSE_WRITE | IN_CLOSE_NOWRITE  # Close. | ||||||
|  |     IN_MOVE = IN_MOVED_FROM | IN_MOVED_TO  # Moves. | ||||||
|  |  | ||||||
|  |     # Events sent by the kernel to a watch. | ||||||
|  |     IN_UNMOUNT = 0x00002000  # Backing file system was unmounted. | ||||||
|  |     IN_Q_OVERFLOW = 0x00004000  # Event queued overflowed. | ||||||
|  |     IN_IGNORED = 0x00008000  # File was ignored. | ||||||
|  |  | ||||||
|  |     # Special flags. | ||||||
|  |     IN_ONLYDIR = 0x01000000  # Only watch the path if it's a directory. | ||||||
|  |     IN_DONT_FOLLOW = 0x02000000  # Do not follow a symbolic link. | ||||||
|  |     IN_EXCL_UNLINK = 0x04000000  # Exclude events on unlinked objects | ||||||
|  |     IN_MASK_ADD = 0x20000000  # Add to the mask of an existing watch. | ||||||
|  |     IN_ISDIR = 0x40000000  # Event occurred against directory. | ||||||
|  |     IN_ONESHOT = 0x80000000  # Only send event once. | ||||||
|  |  | ||||||
|  |     # All user-space events. | ||||||
|  |     IN_ALL_EVENTS = reduce( | ||||||
|  |         lambda x, y: x | y, | ||||||
|  |         [ | ||||||
|  |             IN_ACCESS, | ||||||
|  |             IN_MODIFY, | ||||||
|  |             IN_ATTRIB, | ||||||
|  |             IN_CLOSE_WRITE, | ||||||
|  |             IN_CLOSE_NOWRITE, | ||||||
|  |             IN_OPEN, | ||||||
|  |             IN_MOVED_FROM, | ||||||
|  |             IN_MOVED_TO, | ||||||
|  |             IN_DELETE, | ||||||
|  |             IN_CREATE, | ||||||
|  |             IN_DELETE_SELF, | ||||||
|  |             IN_MOVE_SELF, | ||||||
|  |         ], | ||||||
|  |     ) | ||||||
|  |  | ||||||
|  |     # Flags for ``inotify_init1`` | ||||||
|  |     IN_CLOEXEC = 0x02000000 | ||||||
|  |     IN_NONBLOCK = 0x00004000 | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Watchdog's API cares only about these events. | ||||||
|  | WATCHDOG_ALL_EVENTS = reduce( | ||||||
|  |     lambda x, y: x | y, | ||||||
|  |     [ | ||||||
|  |         InotifyConstants.IN_MODIFY, | ||||||
|  |         InotifyConstants.IN_ATTRIB, | ||||||
|  |         InotifyConstants.IN_MOVED_FROM, | ||||||
|  |         InotifyConstants.IN_MOVED_TO, | ||||||
|  |         InotifyConstants.IN_CREATE, | ||||||
|  |         InotifyConstants.IN_DELETE, | ||||||
|  |         InotifyConstants.IN_DELETE_SELF, | ||||||
|  |         InotifyConstants.IN_DONT_FOLLOW, | ||||||
|  |         InotifyConstants.IN_CLOSE_WRITE, | ||||||
|  |         InotifyConstants.IN_OPEN, | ||||||
|  |     ], | ||||||
|  | ) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class inotify_event_struct(ctypes.Structure): | ||||||
|  |     """ | ||||||
|  |     Structure representation of the inotify_event structure | ||||||
|  |     (used in buffer size calculations):: | ||||||
|  |  | ||||||
|  |         struct inotify_event { | ||||||
|  |             __s32 wd;            /* watch descriptor */ | ||||||
|  |             __u32 mask;          /* watch mask */ | ||||||
|  |             __u32 cookie;        /* cookie to synchronize two events */ | ||||||
|  |             __u32 len;           /* length (including nulls) of name */ | ||||||
|  |             char  name[0];       /* stub for possible name */ | ||||||
|  |         }; | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     _fields_ = [ | ||||||
|  |         ("wd", c_int), | ||||||
|  |         ("mask", c_uint32), | ||||||
|  |         ("cookie", c_uint32), | ||||||
|  |         ("len", c_uint32), | ||||||
|  |         ("name", c_char_p), | ||||||
|  |     ] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | EVENT_SIZE = ctypes.sizeof(inotify_event_struct) | ||||||
|  | DEFAULT_NUM_EVENTS = 2048 | ||||||
|  | DEFAULT_EVENT_BUFFER_SIZE = DEFAULT_NUM_EVENTS * (EVENT_SIZE + 16) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class Inotify: | ||||||
|  |     """ | ||||||
|  |     Linux inotify(7) API wrapper class. | ||||||
|  |  | ||||||
|  |     :param path: | ||||||
|  |         The directory path for which we want an inotify object. | ||||||
|  |     :type path: | ||||||
|  |         :class:`bytes` | ||||||
|  |     :param recursive: | ||||||
|  |         ``True`` if subdirectories should be monitored; ``False`` otherwise. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self, path, recursive=False, event_mask=WATCHDOG_ALL_EVENTS): | ||||||
|  |         # The file descriptor associated with the inotify instance. | ||||||
|  |         inotify_fd = inotify_init() | ||||||
|  |         if inotify_fd == -1: | ||||||
|  |             Inotify._raise_error() | ||||||
|  |         self._inotify_fd = inotify_fd | ||||||
|  |         self._lock = threading.Lock() | ||||||
|  |  | ||||||
|  |         # Stores the watch descriptor for a given path. | ||||||
|  |         self._wd_for_path = {} | ||||||
|  |         self._path_for_wd = {} | ||||||
|  |  | ||||||
|  |         self._path = path | ||||||
|  |         self._event_mask = event_mask | ||||||
|  |         self._is_recursive = recursive | ||||||
|  |         if os.path.isdir(path): | ||||||
|  |             self._add_dir_watch(path, recursive, event_mask) | ||||||
|  |         else: | ||||||
|  |             self._add_watch(path, event_mask) | ||||||
|  |         self._moved_from_events = {} | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def event_mask(self): | ||||||
|  |         """The event mask for this inotify instance.""" | ||||||
|  |         return self._event_mask | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def path(self): | ||||||
|  |         """The path associated with the inotify instance.""" | ||||||
|  |         return self._path | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def is_recursive(self): | ||||||
|  |         """Whether we are watching directories recursively.""" | ||||||
|  |         return self._is_recursive | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def fd(self): | ||||||
|  |         """The file descriptor associated with the inotify instance.""" | ||||||
|  |         return self._inotify_fd | ||||||
|  |  | ||||||
|  |     def clear_move_records(self): | ||||||
|  |         """Clear cached records of MOVED_FROM events""" | ||||||
|  |         self._moved_from_events = {} | ||||||
|  |  | ||||||
|  |     def source_for_move(self, destination_event): | ||||||
|  |         """ | ||||||
|  |         The source path corresponding to the given MOVED_TO event. | ||||||
|  |  | ||||||
|  |         If the source path is outside the monitored directories, None | ||||||
|  |         is returned instead. | ||||||
|  |         """ | ||||||
|  |         if destination_event.cookie in self._moved_from_events: | ||||||
|  |             return self._moved_from_events[destination_event.cookie].src_path | ||||||
|  |         else: | ||||||
|  |             return None | ||||||
|  |  | ||||||
|  |     def remember_move_from_event(self, event): | ||||||
|  |         """ | ||||||
|  |         Save this event as the source event for future MOVED_TO events to | ||||||
|  |         reference. | ||||||
|  |         """ | ||||||
|  |         self._moved_from_events[event.cookie] = event | ||||||
|  |  | ||||||
|  |     def add_watch(self, path): | ||||||
|  |         """ | ||||||
|  |         Adds a watch for the given path. | ||||||
|  |  | ||||||
|  |         :param path: | ||||||
|  |             Path to begin monitoring. | ||||||
|  |         """ | ||||||
|  |         with self._lock: | ||||||
|  |             self._add_watch(path, self._event_mask) | ||||||
|  |  | ||||||
|  |     def remove_watch(self, path): | ||||||
|  |         """ | ||||||
|  |         Removes a watch for the given path. | ||||||
|  |  | ||||||
|  |         :param path: | ||||||
|  |             Path string for which the watch will be removed. | ||||||
|  |         """ | ||||||
|  |         with self._lock: | ||||||
|  |             wd = self._wd_for_path.pop(path) | ||||||
|  |             del self._path_for_wd[wd] | ||||||
|  |             if inotify_rm_watch(self._inotify_fd, wd) == -1: | ||||||
|  |                 Inotify._raise_error() | ||||||
|  |  | ||||||
|  |     def close(self): | ||||||
|  |         """ | ||||||
|  |         Closes the inotify instance and removes all associated watches. | ||||||
|  |         """ | ||||||
|  |         with self._lock: | ||||||
|  |             if self._path in self._wd_for_path: | ||||||
|  |                 wd = self._wd_for_path[self._path] | ||||||
|  |                 inotify_rm_watch(self._inotify_fd, wd) | ||||||
|  |  | ||||||
|  |             try: | ||||||
|  |                 os.close(self._inotify_fd) | ||||||
|  |             except OSError: | ||||||
|  |                 # descriptor may be invalid because file was deleted | ||||||
|  |                 pass | ||||||
|  |  | ||||||
|  |     def read_events(self, event_buffer_size=DEFAULT_EVENT_BUFFER_SIZE): | ||||||
|  |         """ | ||||||
|  |         Reads events from inotify and yields them. | ||||||
|  |         """ | ||||||
|  |         # HACK: We need to traverse the directory path | ||||||
|  |         # recursively and simulate events for newly | ||||||
|  |         # created subdirectories/files. This will handle | ||||||
|  |         # mkdir -p foobar/blah/bar; touch foobar/afile | ||||||
|  |  | ||||||
|  |         def _recursive_simulate(src_path): | ||||||
|  |             events = [] | ||||||
|  |             for root, dirnames, filenames in os.walk(src_path): | ||||||
|  |                 for dirname in dirnames: | ||||||
|  |                     try: | ||||||
|  |                         full_path = os.path.join(root, dirname) | ||||||
|  |                         wd_dir = self._add_watch(full_path, self._event_mask) | ||||||
|  |                         e = InotifyEvent( | ||||||
|  |                             wd_dir, | ||||||
|  |                             InotifyConstants.IN_CREATE | InotifyConstants.IN_ISDIR, | ||||||
|  |                             0, | ||||||
|  |                             dirname, | ||||||
|  |                             full_path, | ||||||
|  |                         ) | ||||||
|  |                         events.append(e) | ||||||
|  |                     except OSError: | ||||||
|  |                         pass | ||||||
|  |                 for filename in filenames: | ||||||
|  |                     full_path = os.path.join(root, filename) | ||||||
|  |                     wd_parent_dir = self._wd_for_path[os.path.dirname(full_path)] | ||||||
|  |                     e = InotifyEvent( | ||||||
|  |                         wd_parent_dir, | ||||||
|  |                         InotifyConstants.IN_CREATE, | ||||||
|  |                         0, | ||||||
|  |                         filename, | ||||||
|  |                         full_path, | ||||||
|  |                     ) | ||||||
|  |                     events.append(e) | ||||||
|  |             return events | ||||||
|  |  | ||||||
|  |         event_buffer = None | ||||||
|  |         while True: | ||||||
|  |             try: | ||||||
|  |                 event_buffer = os.read(self._inotify_fd, event_buffer_size) | ||||||
|  |             except OSError as e: | ||||||
|  |                 if e.errno == errno.EINTR: | ||||||
|  |                     continue | ||||||
|  |                 elif e.errno == errno.EBADF: | ||||||
|  |                     return [] | ||||||
|  |                 else: | ||||||
|  |                     raise | ||||||
|  |             break | ||||||
|  |  | ||||||
|  |         with self._lock: | ||||||
|  |             event_list = [] | ||||||
|  |             for wd, mask, cookie, name in Inotify._parse_event_buffer(event_buffer): | ||||||
|  |                 if wd == -1: | ||||||
|  |                     continue | ||||||
|  |                 wd_path = self._path_for_wd[wd] | ||||||
|  |                 src_path = ( | ||||||
|  |                     os.path.join(wd_path, name) if name else wd_path | ||||||
|  |                 )  # avoid trailing slash | ||||||
|  |                 inotify_event = InotifyEvent(wd, mask, cookie, name, src_path) | ||||||
|  |  | ||||||
|  |                 if inotify_event.is_moved_from: | ||||||
|  |                     self.remember_move_from_event(inotify_event) | ||||||
|  |                 elif inotify_event.is_moved_to: | ||||||
|  |                     move_src_path = self.source_for_move(inotify_event) | ||||||
|  |                     if move_src_path in self._wd_for_path: | ||||||
|  |                         moved_wd = self._wd_for_path[move_src_path] | ||||||
|  |                         del self._wd_for_path[move_src_path] | ||||||
|  |                         self._wd_for_path[inotify_event.src_path] = moved_wd | ||||||
|  |                         self._path_for_wd[moved_wd] = inotify_event.src_path | ||||||
|  |                         if self.is_recursive: | ||||||
|  |                             for _path, _wd in self._wd_for_path.copy().items(): | ||||||
|  |                                 if _path.startswith( | ||||||
|  |                                     move_src_path + os.path.sep.encode() | ||||||
|  |                                 ): | ||||||
|  |                                     moved_wd = self._wd_for_path.pop(_path) | ||||||
|  |                                     _move_to_path = _path.replace( | ||||||
|  |                                         move_src_path, inotify_event.src_path | ||||||
|  |                                     ) | ||||||
|  |                                     self._wd_for_path[_move_to_path] = moved_wd | ||||||
|  |                                     self._path_for_wd[moved_wd] = _move_to_path | ||||||
|  |                     src_path = os.path.join(wd_path, name) | ||||||
|  |                     inotify_event = InotifyEvent(wd, mask, cookie, name, src_path) | ||||||
|  |  | ||||||
|  |                 if inotify_event.is_ignored: | ||||||
|  |                     # Clean up book-keeping for deleted watches. | ||||||
|  |                     path = self._path_for_wd.pop(wd) | ||||||
|  |                     if self._wd_for_path[path] == wd: | ||||||
|  |                         del self._wd_for_path[path] | ||||||
|  |  | ||||||
|  |                 event_list.append(inotify_event) | ||||||
|  |  | ||||||
|  |                 if ( | ||||||
|  |                     self.is_recursive | ||||||
|  |                     and inotify_event.is_directory | ||||||
|  |                     and inotify_event.is_create | ||||||
|  |                 ): | ||||||
|  |                     # TODO: When a directory from another part of the | ||||||
|  |                     # filesystem is moved into a watched directory, this | ||||||
|  |                     # will not generate events for the directory tree. | ||||||
|  |                     # We need to coalesce IN_MOVED_TO events and those | ||||||
|  |                     # IN_MOVED_TO events which don't pair up with | ||||||
|  |                     # IN_MOVED_FROM events should be marked IN_CREATE | ||||||
|  |                     # instead relative to this directory. | ||||||
|  |                     try: | ||||||
|  |                         self._add_watch(src_path, self._event_mask) | ||||||
|  |                     except OSError: | ||||||
|  |                         continue | ||||||
|  |  | ||||||
|  |                     event_list.extend(_recursive_simulate(src_path)) | ||||||
|  |  | ||||||
|  |         return event_list | ||||||
|  |  | ||||||
|  |     # Non-synchronized methods. | ||||||
|  |     def _add_dir_watch(self, path, recursive, mask): | ||||||
|  |         """ | ||||||
|  |         Adds a watch (optionally recursively) for the given directory path | ||||||
|  |         to monitor events specified by the mask. | ||||||
|  |  | ||||||
|  |         :param path: | ||||||
|  |             Path to monitor | ||||||
|  |         :param recursive: | ||||||
|  |             ``True`` to monitor recursively. | ||||||
|  |         :param mask: | ||||||
|  |             Event bit mask. | ||||||
|  |         """ | ||||||
|  |         if not os.path.isdir(path): | ||||||
|  |             raise OSError(errno.ENOTDIR, os.strerror(errno.ENOTDIR), path) | ||||||
|  |         self._add_watch(path, mask) | ||||||
|  |         if recursive: | ||||||
|  |             for root, dirnames, _ in os.walk(path): | ||||||
|  |                 for dirname in dirnames: | ||||||
|  |                     full_path = os.path.join(root, dirname) | ||||||
|  |                     if os.path.islink(full_path): | ||||||
|  |                         continue | ||||||
|  |                     self._add_watch(full_path, mask) | ||||||
|  |  | ||||||
|  |     def _add_watch(self, path, mask): | ||||||
|  |         """ | ||||||
|  |         Adds a watch for the given path to monitor events specified by the | ||||||
|  |         mask. | ||||||
|  |  | ||||||
|  |         :param path: | ||||||
|  |             Path to monitor | ||||||
|  |         :param mask: | ||||||
|  |             Event bit mask. | ||||||
|  |         """ | ||||||
|  |         wd = inotify_add_watch(self._inotify_fd, path, mask) | ||||||
|  |         if wd == -1: | ||||||
|  |             Inotify._raise_error() | ||||||
|  |         self._wd_for_path[path] = wd | ||||||
|  |         self._path_for_wd[wd] = path | ||||||
|  |         return wd | ||||||
|  |  | ||||||
|  |     @staticmethod | ||||||
|  |     def _raise_error(): | ||||||
|  |         """ | ||||||
|  |         Raises errors for inotify failures. | ||||||
|  |         """ | ||||||
|  |         err = ctypes.get_errno() | ||||||
|  |         if err == errno.ENOSPC: | ||||||
|  |             raise OSError(errno.ENOSPC, "inotify watch limit reached") | ||||||
|  |         elif err == errno.EMFILE: | ||||||
|  |             raise OSError(errno.EMFILE, "inotify instance limit reached") | ||||||
|  |         elif err != errno.EACCES: | ||||||
|  |             raise OSError(err, os.strerror(err)) | ||||||
|  |  | ||||||
|  |     @staticmethod | ||||||
|  |     def _parse_event_buffer(event_buffer): | ||||||
|  |         """ | ||||||
|  |         Parses an event buffer of ``inotify_event`` structs returned by | ||||||
|  |         inotify:: | ||||||
|  |  | ||||||
|  |             struct inotify_event { | ||||||
|  |                 __s32 wd;            /* watch descriptor */ | ||||||
|  |                 __u32 mask;          /* watch mask */ | ||||||
|  |                 __u32 cookie;        /* cookie to synchronize two events */ | ||||||
|  |                 __u32 len;           /* length (including nulls) of name */ | ||||||
|  |                 char  name[0];       /* stub for possible name */ | ||||||
|  |             }; | ||||||
|  |  | ||||||
|  |         The ``cookie`` member of this struct is used to pair two related | ||||||
|  |         events, for example, it pairs an IN_MOVED_FROM event with an | ||||||
|  |         IN_MOVED_TO event. | ||||||
|  |         """ | ||||||
|  |         i = 0 | ||||||
|  |         while i + 16 <= len(event_buffer): | ||||||
|  |             wd, mask, cookie, length = struct.unpack_from("iIII", event_buffer, i) | ||||||
|  |             name = event_buffer[i + 16 : i + 16 + length].rstrip(b"\0") | ||||||
|  |             i += 16 + length | ||||||
|  |             yield wd, mask, cookie, name | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class InotifyEvent: | ||||||
|  |     """ | ||||||
|  |     Inotify event struct wrapper. | ||||||
|  |  | ||||||
|  |     :param wd: | ||||||
|  |         Watch descriptor | ||||||
|  |     :param mask: | ||||||
|  |         Event mask | ||||||
|  |     :param cookie: | ||||||
|  |         Event cookie | ||||||
|  |     :param name: | ||||||
|  |         Base name of the event source path. | ||||||
|  |     :param src_path: | ||||||
|  |         Full event source path. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self, wd, mask, cookie, name, src_path): | ||||||
|  |         self._wd = wd | ||||||
|  |         self._mask = mask | ||||||
|  |         self._cookie = cookie | ||||||
|  |         self._name = name | ||||||
|  |         self._src_path = src_path | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def src_path(self): | ||||||
|  |         return self._src_path | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def wd(self): | ||||||
|  |         return self._wd | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def mask(self): | ||||||
|  |         return self._mask | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def cookie(self): | ||||||
|  |         return self._cookie | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def name(self): | ||||||
|  |         return self._name | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def is_modify(self): | ||||||
|  |         return self._mask & InotifyConstants.IN_MODIFY > 0 | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def is_close_write(self): | ||||||
|  |         return self._mask & InotifyConstants.IN_CLOSE_WRITE > 0 | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def is_close_nowrite(self): | ||||||
|  |         return self._mask & InotifyConstants.IN_CLOSE_NOWRITE > 0 | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def is_open(self): | ||||||
|  |         return self._mask & InotifyConstants.IN_OPEN > 0 | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def is_access(self): | ||||||
|  |         return self._mask & InotifyConstants.IN_ACCESS > 0 | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def is_delete(self): | ||||||
|  |         return self._mask & InotifyConstants.IN_DELETE > 0 | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def is_delete_self(self): | ||||||
|  |         return self._mask & InotifyConstants.IN_DELETE_SELF > 0 | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def is_create(self): | ||||||
|  |         return self._mask & InotifyConstants.IN_CREATE > 0 | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def is_moved_from(self): | ||||||
|  |         return self._mask & InotifyConstants.IN_MOVED_FROM > 0 | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def is_moved_to(self): | ||||||
|  |         return self._mask & InotifyConstants.IN_MOVED_TO > 0 | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def is_move(self): | ||||||
|  |         return self._mask & InotifyConstants.IN_MOVE > 0 | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def is_move_self(self): | ||||||
|  |         return self._mask & InotifyConstants.IN_MOVE_SELF > 0 | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def is_attrib(self): | ||||||
|  |         return self._mask & InotifyConstants.IN_ATTRIB > 0 | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def is_ignored(self): | ||||||
|  |         return self._mask & InotifyConstants.IN_IGNORED > 0 | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def is_directory(self): | ||||||
|  |         # It looks like the kernel does not provide this information for | ||||||
|  |         # IN_DELETE_SELF and IN_MOVE_SELF. In this case, assume it's a dir. | ||||||
|  |         # See also: https://github.com/seb-m/pyinotify/blob/2c7e8f8/python2/pyinotify.py#L897 | ||||||
|  |         return ( | ||||||
|  |             self.is_delete_self | ||||||
|  |             or self.is_move_self | ||||||
|  |             or self._mask & InotifyConstants.IN_ISDIR > 0 | ||||||
|  |         ) | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def key(self): | ||||||
|  |         return self._src_path, self._wd, self._mask, self._cookie, self._name | ||||||
|  |  | ||||||
|  |     def __eq__(self, inotify_event): | ||||||
|  |         return self.key == inotify_event.key | ||||||
|  |  | ||||||
|  |     def __ne__(self, inotify_event): | ||||||
|  |         return self.key != inotify_event.key | ||||||
|  |  | ||||||
|  |     def __hash__(self): | ||||||
|  |         return hash(self.key) | ||||||
|  |  | ||||||
|  |     @staticmethod | ||||||
|  |     def _get_mask_string(mask): | ||||||
|  |         masks = [] | ||||||
|  |         for c in dir(InotifyConstants): | ||||||
|  |             if c.startswith("IN_") and c not in [ | ||||||
|  |                 "IN_ALL_EVENTS", | ||||||
|  |                 "IN_CLOSE", | ||||||
|  |                 "IN_MOVE", | ||||||
|  |             ]: | ||||||
|  |                 c_val = getattr(InotifyConstants, c) | ||||||
|  |                 if mask & c_val: | ||||||
|  |                     masks.append(c) | ||||||
|  |         return "|".join(masks) | ||||||
|  |  | ||||||
|  |     def __repr__(self): | ||||||
|  |         return ( | ||||||
|  |             f"<{type(self).__name__}: src_path={self.src_path!r}, wd={self.wd}," | ||||||
|  |             f" mask={self._get_mask_string(self.mask)}, cookie={self.cookie}," | ||||||
|  |             f" name={os.fsdecode(self.name)!r}>" | ||||||
|  |         ) | ||||||
							
								
								
									
										699
									
								
								src/libs/watchdog/observers/kqueue.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										699
									
								
								src/libs/watchdog/observers/kqueue.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,699 @@ | |||||||
|  | # Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com> | ||||||
|  | # Copyright 2012 Google, Inc & contributors. | ||||||
|  | # | ||||||
|  | # Licensed under the Apache License, Version 2.0 (the "License"); | ||||||
|  | # you may not use this file except in compliance with the License. | ||||||
|  | # You may obtain a copy of the License at | ||||||
|  | # | ||||||
|  | #     http://www.apache.org/licenses/LICENSE-2.0 | ||||||
|  | # | ||||||
|  | # Unless required by applicable law or agreed to in writing, software | ||||||
|  | # distributed under the License is distributed on an "AS IS" BASIS, | ||||||
|  | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||||
|  | # See the License for the specific language governing permissions and | ||||||
|  | # limitations under the License. | ||||||
|  |  | ||||||
|  | from __future__ import annotations | ||||||
|  |  | ||||||
|  | # The `select` module varies between platforms. | ||||||
|  | # mypy may complain about missing module attributes | ||||||
|  | # depending on which platform it's running on. | ||||||
|  | # The comment below disables mypy's attribute check. | ||||||
|  | # | ||||||
|  | # mypy: disable-error-code=attr-defined | ||||||
|  | # | ||||||
|  | """ | ||||||
|  | :module: libs.watchdog.observers.kqueue | ||||||
|  | :synopsis: ``kqueue(2)`` based emitter implementation. | ||||||
|  | :author: yesudeep@google.com (Yesudeep Mangalapilly) | ||||||
|  | :author: contact@tiger-222.fr (Mickaël Schoentgen) | ||||||
|  | :platforms: macOS and BSD with kqueue(2). | ||||||
|  |  | ||||||
|  | .. WARNING:: kqueue is a very heavyweight way to monitor file systems. | ||||||
|  |              Each kqueue-detected directory modification triggers | ||||||
|  |              a full directory scan. Traversing the entire directory tree | ||||||
|  |              and opening file descriptors for all files will create | ||||||
|  |              performance problems. We need to find a way to re-scan | ||||||
|  |              only those directories which report changes and do a diff | ||||||
|  |              between two sub-DirectorySnapshots perhaps. | ||||||
|  |  | ||||||
|  | .. ADMONITION:: About OS X performance guidelines | ||||||
|  |  | ||||||
|  |     Quote from the `macOS File System Performance Guidelines`_: | ||||||
|  |  | ||||||
|  |         "When you only want to track changes on a file or directory, be sure to | ||||||
|  |         open it using the ``O_EVTONLY`` flag. This flag prevents the file or | ||||||
|  |         directory from being marked as open or in use. This is important | ||||||
|  |         if you are tracking files on a removable volume and the user tries to | ||||||
|  |         unmount the volume. With this flag in place, the system knows it can | ||||||
|  |         dismiss the volume. If you had opened the files or directories without | ||||||
|  |         this flag, the volume would be marked as busy and would not be | ||||||
|  |         unmounted." | ||||||
|  |  | ||||||
|  |     ``O_EVTONLY`` is defined as ``0x8000`` in the OS X header files. | ||||||
|  |     More information here: http://www.mlsite.net/blog/?p=2312 | ||||||
|  |  | ||||||
|  | Classes | ||||||
|  | ------- | ||||||
|  | .. autoclass:: KqueueEmitter | ||||||
|  |    :members: | ||||||
|  |    :show-inheritance: | ||||||
|  |  | ||||||
|  | Collections and Utility Classes | ||||||
|  | ------------------------------- | ||||||
|  | .. autoclass:: KeventDescriptor | ||||||
|  |    :members: | ||||||
|  |    :show-inheritance: | ||||||
|  |  | ||||||
|  | .. autoclass:: KeventDescriptorSet | ||||||
|  |    :members: | ||||||
|  |    :show-inheritance: | ||||||
|  |  | ||||||
|  | .. _macOS File System Performance Guidelines: | ||||||
|  |     http://developer.apple.com/library/ios/#documentation/Performance/Conceptual/FileSystem/Articles/TrackingChanges.html#//apple_ref/doc/uid/20001993-CJBJFIDD | ||||||
|  |  | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | import errno | ||||||
|  | import os | ||||||
|  | import os.path | ||||||
|  | import select | ||||||
|  | import threading | ||||||
|  | from stat import S_ISDIR | ||||||
|  |  | ||||||
|  | from libs.watchdog.events import ( | ||||||
|  |     EVENT_TYPE_CREATED, | ||||||
|  |     EVENT_TYPE_DELETED, | ||||||
|  |     EVENT_TYPE_MOVED, | ||||||
|  |     DirCreatedEvent, | ||||||
|  |     DirDeletedEvent, | ||||||
|  |     DirModifiedEvent, | ||||||
|  |     DirMovedEvent, | ||||||
|  |     FileCreatedEvent, | ||||||
|  |     FileDeletedEvent, | ||||||
|  |     FileModifiedEvent, | ||||||
|  |     FileMovedEvent, | ||||||
|  |     generate_sub_moved_events, | ||||||
|  | ) | ||||||
|  | from libs.watchdog.observers.api import DEFAULT_EMITTER_TIMEOUT, DEFAULT_OBSERVER_TIMEOUT, BaseObserver, EventEmitter | ||||||
|  | from libs.watchdog.utils import platform | ||||||
|  | from libs.watchdog.utils.dirsnapshot import DirectorySnapshot | ||||||
|  |  | ||||||
|  | # Maximum number of events to process. | ||||||
|  | MAX_EVENTS = 4096 | ||||||
|  |  | ||||||
|  | # O_EVTONLY value from the header files for OS X only. | ||||||
|  | O_EVTONLY = 0x8000 | ||||||
|  |  | ||||||
|  | # Pre-calculated values for the kevent filter, flags, and fflags attributes. | ||||||
|  | if platform.is_darwin(): | ||||||
|  |     WATCHDOG_OS_OPEN_FLAGS = O_EVTONLY | ||||||
|  | else: | ||||||
|  |     WATCHDOG_OS_OPEN_FLAGS = os.O_RDONLY | os.O_NONBLOCK | ||||||
|  | WATCHDOG_KQ_FILTER = select.KQ_FILTER_VNODE | ||||||
|  | WATCHDOG_KQ_EV_FLAGS = select.KQ_EV_ADD | select.KQ_EV_ENABLE | select.KQ_EV_CLEAR | ||||||
|  | WATCHDOG_KQ_FFLAGS = ( | ||||||
|  |     select.KQ_NOTE_DELETE | ||||||
|  |     | select.KQ_NOTE_WRITE | ||||||
|  |     | select.KQ_NOTE_EXTEND | ||||||
|  |     | select.KQ_NOTE_ATTRIB | ||||||
|  |     | select.KQ_NOTE_LINK | ||||||
|  |     | select.KQ_NOTE_RENAME | ||||||
|  |     | select.KQ_NOTE_REVOKE | ||||||
|  | ) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def absolute_path(path): | ||||||
|  |     return os.path.abspath(os.path.normpath(path)) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Flag tests. | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def is_deleted(kev): | ||||||
|  |     """Determines whether the given kevent represents deletion.""" | ||||||
|  |     return kev.fflags & select.KQ_NOTE_DELETE | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def is_modified(kev): | ||||||
|  |     """Determines whether the given kevent represents modification.""" | ||||||
|  |     fflags = kev.fflags | ||||||
|  |     return (fflags & select.KQ_NOTE_EXTEND) or (fflags & select.KQ_NOTE_WRITE) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def is_attrib_modified(kev): | ||||||
|  |     """Determines whether the given kevent represents attribute modification.""" | ||||||
|  |     return kev.fflags & select.KQ_NOTE_ATTRIB | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def is_renamed(kev): | ||||||
|  |     """Determines whether the given kevent represents movement.""" | ||||||
|  |     return kev.fflags & select.KQ_NOTE_RENAME | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class KeventDescriptorSet: | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |     Thread-safe kevent descriptor collection. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self): | ||||||
|  |         # Set of KeventDescriptor | ||||||
|  |         self._descriptors = set() | ||||||
|  |  | ||||||
|  |         # Descriptor for a given path. | ||||||
|  |         self._descriptor_for_path = dict() | ||||||
|  |  | ||||||
|  |         # Descriptor for a given fd. | ||||||
|  |         self._descriptor_for_fd = dict() | ||||||
|  |  | ||||||
|  |         # List of kevent objects. | ||||||
|  |         self._kevents = list() | ||||||
|  |  | ||||||
|  |         self._lock = threading.Lock() | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def kevents(self): | ||||||
|  |         """ | ||||||
|  |         List of kevents monitored. | ||||||
|  |         """ | ||||||
|  |         with self._lock: | ||||||
|  |             return self._kevents | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def paths(self): | ||||||
|  |         """ | ||||||
|  |         List of paths for which kevents have been created. | ||||||
|  |         """ | ||||||
|  |         with self._lock: | ||||||
|  |             return list(self._descriptor_for_path.keys()) | ||||||
|  |  | ||||||
|  |     def get_for_fd(self, fd): | ||||||
|  |         """ | ||||||
|  |         Given a file descriptor, returns the kevent descriptor object | ||||||
|  |         for it. | ||||||
|  |  | ||||||
|  |         :param fd: | ||||||
|  |             OS file descriptor. | ||||||
|  |         :type fd: | ||||||
|  |             ``int`` | ||||||
|  |         :returns: | ||||||
|  |             A :class:`KeventDescriptor` object. | ||||||
|  |         """ | ||||||
|  |         with self._lock: | ||||||
|  |             return self._descriptor_for_fd[fd] | ||||||
|  |  | ||||||
|  |     def get(self, path): | ||||||
|  |         """ | ||||||
|  |         Obtains a :class:`KeventDescriptor` object for the specified path. | ||||||
|  |  | ||||||
|  |         :param path: | ||||||
|  |             Path for which the descriptor will be obtained. | ||||||
|  |         """ | ||||||
|  |         with self._lock: | ||||||
|  |             path = absolute_path(path) | ||||||
|  |             return self._get(path) | ||||||
|  |  | ||||||
|  |     def __contains__(self, path): | ||||||
|  |         """ | ||||||
|  |         Determines whether a :class:`KeventDescriptor has been registered | ||||||
|  |         for the specified path. | ||||||
|  |  | ||||||
|  |         :param path: | ||||||
|  |             Path for which the descriptor will be obtained. | ||||||
|  |         """ | ||||||
|  |         with self._lock: | ||||||
|  |             path = absolute_path(path) | ||||||
|  |             return self._has_path(path) | ||||||
|  |  | ||||||
|  |     def add(self, path, is_directory): | ||||||
|  |         """ | ||||||
|  |         Adds a :class:`KeventDescriptor` to the collection for the given | ||||||
|  |         path. | ||||||
|  |  | ||||||
|  |         :param path: | ||||||
|  |             The path for which a :class:`KeventDescriptor` object will be | ||||||
|  |             added. | ||||||
|  |         :param is_directory: | ||||||
|  |             ``True`` if the path refers to a directory; ``False`` otherwise. | ||||||
|  |         :type is_directory: | ||||||
|  |             ``bool`` | ||||||
|  |         """ | ||||||
|  |         with self._lock: | ||||||
|  |             path = absolute_path(path) | ||||||
|  |             if not self._has_path(path): | ||||||
|  |                 self._add_descriptor(KeventDescriptor(path, is_directory)) | ||||||
|  |  | ||||||
|  |     def remove(self, path): | ||||||
|  |         """ | ||||||
|  |         Removes the :class:`KeventDescriptor` object for the given path | ||||||
|  |         if it already exists. | ||||||
|  |  | ||||||
|  |         :param path: | ||||||
|  |             Path for which the :class:`KeventDescriptor` object will be | ||||||
|  |             removed. | ||||||
|  |         """ | ||||||
|  |         with self._lock: | ||||||
|  |             path = absolute_path(path) | ||||||
|  |             if self._has_path(path): | ||||||
|  |                 self._remove_descriptor(self._get(path)) | ||||||
|  |  | ||||||
|  |     def clear(self): | ||||||
|  |         """ | ||||||
|  |         Clears the collection and closes all open descriptors. | ||||||
|  |         """ | ||||||
|  |         with self._lock: | ||||||
|  |             for descriptor in self._descriptors: | ||||||
|  |                 descriptor.close() | ||||||
|  |             self._descriptors.clear() | ||||||
|  |             self._descriptor_for_fd.clear() | ||||||
|  |             self._descriptor_for_path.clear() | ||||||
|  |             self._kevents = [] | ||||||
|  |  | ||||||
|  |     # Thread-unsafe methods. Locking is provided at a higher level. | ||||||
|  |     def _get(self, path): | ||||||
|  |         """Returns a kevent descriptor for a given path.""" | ||||||
|  |         return self._descriptor_for_path[path] | ||||||
|  |  | ||||||
|  |     def _has_path(self, path): | ||||||
|  |         """Determines whether a :class:`KeventDescriptor` for the specified | ||||||
|  |         path exists already in the collection.""" | ||||||
|  |         return path in self._descriptor_for_path | ||||||
|  |  | ||||||
|  |     def _add_descriptor(self, descriptor): | ||||||
|  |         """ | ||||||
|  |         Adds a descriptor to the collection. | ||||||
|  |  | ||||||
|  |         :param descriptor: | ||||||
|  |             An instance of :class:`KeventDescriptor` to be added. | ||||||
|  |         """ | ||||||
|  |         self._descriptors.add(descriptor) | ||||||
|  |         self._kevents.append(descriptor.kevent) | ||||||
|  |         self._descriptor_for_path[descriptor.path] = descriptor | ||||||
|  |         self._descriptor_for_fd[descriptor.fd] = descriptor | ||||||
|  |  | ||||||
|  |     def _remove_descriptor(self, descriptor): | ||||||
|  |         """ | ||||||
|  |         Removes a descriptor from the collection. | ||||||
|  |  | ||||||
|  |         :param descriptor: | ||||||
|  |             An instance of :class:`KeventDescriptor` to be removed. | ||||||
|  |         """ | ||||||
|  |         self._descriptors.remove(descriptor) | ||||||
|  |         del self._descriptor_for_fd[descriptor.fd] | ||||||
|  |         del self._descriptor_for_path[descriptor.path] | ||||||
|  |         self._kevents.remove(descriptor.kevent) | ||||||
|  |         descriptor.close() | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class KeventDescriptor: | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |     A kevent descriptor convenience data structure to keep together: | ||||||
|  |  | ||||||
|  |         * kevent | ||||||
|  |         * directory status | ||||||
|  |         * path | ||||||
|  |         * file descriptor | ||||||
|  |  | ||||||
|  |     :param path: | ||||||
|  |         Path string for which a kevent descriptor will be created. | ||||||
|  |     :param is_directory: | ||||||
|  |         ``True`` if the path refers to a directory; ``False`` otherwise. | ||||||
|  |     :type is_directory: | ||||||
|  |         ``bool`` | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self, path, is_directory): | ||||||
|  |         self._path = absolute_path(path) | ||||||
|  |         self._is_directory = is_directory | ||||||
|  |         self._fd = os.open(path, WATCHDOG_OS_OPEN_FLAGS) | ||||||
|  |         self._kev = select.kevent( | ||||||
|  |             self._fd, | ||||||
|  |             filter=WATCHDOG_KQ_FILTER, | ||||||
|  |             flags=WATCHDOG_KQ_EV_FLAGS, | ||||||
|  |             fflags=WATCHDOG_KQ_FFLAGS, | ||||||
|  |         ) | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def fd(self): | ||||||
|  |         """OS file descriptor for the kevent descriptor.""" | ||||||
|  |         return self._fd | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def path(self): | ||||||
|  |         """The path associated with the kevent descriptor.""" | ||||||
|  |         return self._path | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def kevent(self): | ||||||
|  |         """The kevent object associated with the kevent descriptor.""" | ||||||
|  |         return self._kev | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def is_directory(self): | ||||||
|  |         """Determines whether the kevent descriptor refers to a directory. | ||||||
|  |  | ||||||
|  |         :returns: | ||||||
|  |             ``True`` or ``False`` | ||||||
|  |         """ | ||||||
|  |         return self._is_directory | ||||||
|  |  | ||||||
|  |     def close(self): | ||||||
|  |         """ | ||||||
|  |         Closes the file descriptor associated with a kevent descriptor. | ||||||
|  |         """ | ||||||
|  |         try: | ||||||
|  |             os.close(self.fd) | ||||||
|  |         except OSError: | ||||||
|  |             pass | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def key(self): | ||||||
|  |         return (self.path, self.is_directory) | ||||||
|  |  | ||||||
|  |     def __eq__(self, descriptor): | ||||||
|  |         return self.key == descriptor.key | ||||||
|  |  | ||||||
|  |     def __ne__(self, descriptor): | ||||||
|  |         return self.key != descriptor.key | ||||||
|  |  | ||||||
|  |     def __hash__(self): | ||||||
|  |         return hash(self.key) | ||||||
|  |  | ||||||
|  |     def __repr__(self): | ||||||
|  |         return f"<{type(self).__name__}: path={self.path!r}, is_directory={self.is_directory}>" | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class KqueueEmitter(EventEmitter): | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |     kqueue(2)-based event emitter. | ||||||
|  |  | ||||||
|  |     .. ADMONITION:: About ``kqueue(2)`` behavior and this implementation | ||||||
|  |  | ||||||
|  |               ``kqueue(2)`` monitors file system events only for | ||||||
|  |               open descriptors, which means, this emitter does a lot of | ||||||
|  |               book-keeping behind the scenes to keep track of open | ||||||
|  |               descriptors for every entry in the monitored directory tree. | ||||||
|  |  | ||||||
|  |               This also means the number of maximum open file descriptors | ||||||
|  |               on your system must be increased **manually**. | ||||||
|  |               Usually, issuing a call to ``ulimit`` should suffice:: | ||||||
|  |  | ||||||
|  |                   ulimit -n 1024 | ||||||
|  |  | ||||||
|  |               Ensure that you pick a number that is larger than the | ||||||
|  |               number of files you expect to be monitored. | ||||||
|  |  | ||||||
|  |               ``kqueue(2)`` does not provide enough information about the | ||||||
|  |               following things: | ||||||
|  |  | ||||||
|  |               * The destination path of a file or directory that is renamed. | ||||||
|  |               * Creation of a file or directory within a directory; in this | ||||||
|  |                 case, ``kqueue(2)`` only indicates a modified event on the | ||||||
|  |                 parent directory. | ||||||
|  |  | ||||||
|  |               Therefore, this emitter takes a snapshot of the directory | ||||||
|  |               tree when ``kqueue(2)`` detects a change on the file system | ||||||
|  |               to be able to determine the above information. | ||||||
|  |  | ||||||
|  |     :param event_queue: | ||||||
|  |         The event queue to fill with events. | ||||||
|  |     :param watch: | ||||||
|  |         A watch object representing the directory to monitor. | ||||||
|  |     :type watch: | ||||||
|  |         :class:`libs.watchdog.observers.api.ObservedWatch` | ||||||
|  |     :param timeout: | ||||||
|  |         Read events blocking timeout (in seconds). | ||||||
|  |     :type timeout: | ||||||
|  |         ``float`` | ||||||
|  |     :param stat: stat function. See ``os.stat`` for details. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__( | ||||||
|  |         self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT, stat=os.stat | ||||||
|  |     ): | ||||||
|  |         super().__init__(event_queue, watch, timeout) | ||||||
|  |  | ||||||
|  |         self._kq = select.kqueue() | ||||||
|  |         self._lock = threading.RLock() | ||||||
|  |  | ||||||
|  |         # A collection of KeventDescriptor. | ||||||
|  |         self._descriptors = KeventDescriptorSet() | ||||||
|  |  | ||||||
|  |         def custom_stat(path, self=self): | ||||||
|  |             stat_info = stat(path) | ||||||
|  |             self._register_kevent(path, S_ISDIR(stat_info.st_mode)) | ||||||
|  |             return stat_info | ||||||
|  |  | ||||||
|  |         self._snapshot = DirectorySnapshot( | ||||||
|  |             watch.path, recursive=watch.is_recursive, stat=custom_stat | ||||||
|  |         ) | ||||||
|  |  | ||||||
|  |     def _register_kevent(self, path, is_directory): | ||||||
|  |         """ | ||||||
|  |         Registers a kevent descriptor for the given path. | ||||||
|  |  | ||||||
|  |         :param path: | ||||||
|  |             Path for which a kevent descriptor will be created. | ||||||
|  |         :param is_directory: | ||||||
|  |             ``True`` if the path refers to a directory; ``False`` otherwise. | ||||||
|  |         :type is_directory: | ||||||
|  |             ``bool`` | ||||||
|  |         """ | ||||||
|  |         try: | ||||||
|  |             self._descriptors.add(path, is_directory) | ||||||
|  |         except OSError as e: | ||||||
|  |             if e.errno == errno.ENOENT: | ||||||
|  |                 # Probably dealing with a temporary file that was created | ||||||
|  |                 # and then quickly deleted before we could open | ||||||
|  |                 # a descriptor for it. Therefore, simply queue a sequence | ||||||
|  |                 # of created and deleted events for the path. | ||||||
|  |                 # path = absolute_path(path) | ||||||
|  |                 # if is_directory: | ||||||
|  |                 #    self.queue_event(DirCreatedEvent(path)) | ||||||
|  |                 #    self.queue_event(DirDeletedEvent(path)) | ||||||
|  |                 # else: | ||||||
|  |                 #    self.queue_event(FileCreatedEvent(path)) | ||||||
|  |                 #    self.queue_event(FileDeletedEvent(path)) | ||||||
|  |  | ||||||
|  |                 # TODO: We could simply ignore these files. | ||||||
|  |                 # Locked files cause the python process to die with | ||||||
|  |                 # a bus error when we handle temporary files. | ||||||
|  |                 # eg. .git/index.lock when running tig operations. | ||||||
|  |                 # I don't fully understand this at the moment. | ||||||
|  |                 pass | ||||||
|  |             elif e.errno == errno.EOPNOTSUPP: | ||||||
|  |                 # Probably dealing with the socket or special file | ||||||
|  |                 # mounted through a file system that does not support | ||||||
|  |                 # access to it (e.g. NFS). On BSD systems look at | ||||||
|  |                 # EOPNOTSUPP in man 2 open. | ||||||
|  |                 pass | ||||||
|  |             else: | ||||||
|  |                 # All other errors are propagated. | ||||||
|  |                 raise | ||||||
|  |  | ||||||
|  |     def _unregister_kevent(self, path): | ||||||
|  |         """ | ||||||
|  |         Convenience function to close the kevent descriptor for a | ||||||
|  |         specified kqueue-monitored path. | ||||||
|  |  | ||||||
|  |         :param path: | ||||||
|  |             Path for which the kevent descriptor will be closed. | ||||||
|  |         """ | ||||||
|  |         self._descriptors.remove(path) | ||||||
|  |  | ||||||
|  |     def queue_event(self, event): | ||||||
|  |         """ | ||||||
|  |         Handles queueing a single event object. | ||||||
|  |  | ||||||
|  |         :param event: | ||||||
|  |             An instance of :class:`libs.watchdog.events.FileSystemEvent` | ||||||
|  |             or a subclass. | ||||||
|  |         """ | ||||||
|  |         # Handles all the book keeping for queued events. | ||||||
|  |         # We do not need to fire moved/deleted events for all subitems in | ||||||
|  |         # a directory tree here, because this function is called by kqueue | ||||||
|  |         # for all those events anyway. | ||||||
|  |         EventEmitter.queue_event(self, event) | ||||||
|  |         if event.event_type == EVENT_TYPE_CREATED: | ||||||
|  |             self._register_kevent(event.src_path, event.is_directory) | ||||||
|  |         elif event.event_type == EVENT_TYPE_MOVED: | ||||||
|  |             self._unregister_kevent(event.src_path) | ||||||
|  |             self._register_kevent(event.dest_path, event.is_directory) | ||||||
|  |         elif event.event_type == EVENT_TYPE_DELETED: | ||||||
|  |             self._unregister_kevent(event.src_path) | ||||||
|  |  | ||||||
|  |     def _gen_kqueue_events(self, kev, ref_snapshot, new_snapshot): | ||||||
|  |         """ | ||||||
|  |         Generate events from the kevent list returned from the call to | ||||||
|  |         :meth:`select.kqueue.control`. | ||||||
|  |  | ||||||
|  |         .. NOTE:: kqueue only tells us about deletions, file modifications, | ||||||
|  |                   attribute modifications. The other events, namely, | ||||||
|  |                   file creation, directory modification, file rename, | ||||||
|  |                   directory rename, directory creation, etc. are | ||||||
|  |                   determined by comparing directory snapshots. | ||||||
|  |         """ | ||||||
|  |         descriptor = self._descriptors.get_for_fd(kev.ident) | ||||||
|  |         src_path = descriptor.path | ||||||
|  |  | ||||||
|  |         if is_renamed(kev): | ||||||
|  |             # Kqueue does not specify the destination names for renames | ||||||
|  |             # to, so we have to process these using the a snapshot | ||||||
|  |             # of the directory. | ||||||
|  |             for event in self._gen_renamed_events( | ||||||
|  |                 src_path, descriptor.is_directory, ref_snapshot, new_snapshot | ||||||
|  |             ): | ||||||
|  |                 yield event | ||||||
|  |         elif is_attrib_modified(kev): | ||||||
|  |             if descriptor.is_directory: | ||||||
|  |                 yield DirModifiedEvent(src_path) | ||||||
|  |             else: | ||||||
|  |                 yield FileModifiedEvent(src_path) | ||||||
|  |         elif is_modified(kev): | ||||||
|  |             if descriptor.is_directory: | ||||||
|  |                 if self.watch.is_recursive or self.watch.path == src_path: | ||||||
|  |                     # When a directory is modified, it may be due to | ||||||
|  |                     # sub-file/directory renames or new file/directory | ||||||
|  |                     # creation. We determine all this by comparing | ||||||
|  |                     # snapshots later. | ||||||
|  |                     yield DirModifiedEvent(src_path) | ||||||
|  |             else: | ||||||
|  |                 yield FileModifiedEvent(src_path) | ||||||
|  |         elif is_deleted(kev): | ||||||
|  |             if descriptor.is_directory: | ||||||
|  |                 yield DirDeletedEvent(src_path) | ||||||
|  |             else: | ||||||
|  |                 yield FileDeletedEvent(src_path) | ||||||
|  |  | ||||||
|  |     def _parent_dir_modified(self, src_path): | ||||||
|  |         """ | ||||||
|  |         Helper to generate a DirModifiedEvent on the parent of src_path. | ||||||
|  |         """ | ||||||
|  |         return DirModifiedEvent(os.path.dirname(src_path)) | ||||||
|  |  | ||||||
|  |     def _gen_renamed_events(self, src_path, is_directory, ref_snapshot, new_snapshot): | ||||||
|  |         """ | ||||||
|  |         Compares information from two directory snapshots (one taken before | ||||||
|  |         the rename operation and another taken right after) to determine the | ||||||
|  |         destination path of the file system object renamed, and yields | ||||||
|  |         the appropriate events to be queued. | ||||||
|  |         """ | ||||||
|  |         try: | ||||||
|  |             f_inode = ref_snapshot.inode(src_path) | ||||||
|  |         except KeyError: | ||||||
|  |             # Probably caught a temporary file/directory that was renamed | ||||||
|  |             # and deleted. Fires a sequence of created and deleted events | ||||||
|  |             # for the path. | ||||||
|  |             if is_directory: | ||||||
|  |                 yield DirCreatedEvent(src_path) | ||||||
|  |                 yield DirDeletedEvent(src_path) | ||||||
|  |             else: | ||||||
|  |                 yield FileCreatedEvent(src_path) | ||||||
|  |                 yield FileDeletedEvent(src_path) | ||||||
|  |                 # We don't process any further and bail out assuming | ||||||
|  |             # the event represents deletion/creation instead of movement. | ||||||
|  |             return | ||||||
|  |  | ||||||
|  |         dest_path = new_snapshot.path(f_inode) | ||||||
|  |         if dest_path is not None: | ||||||
|  |             dest_path = absolute_path(dest_path) | ||||||
|  |             if is_directory: | ||||||
|  |                 event = DirMovedEvent(src_path, dest_path) | ||||||
|  |                 yield event | ||||||
|  |             else: | ||||||
|  |                 yield FileMovedEvent(src_path, dest_path) | ||||||
|  |             yield self._parent_dir_modified(src_path) | ||||||
|  |             yield self._parent_dir_modified(dest_path) | ||||||
|  |             if is_directory: | ||||||
|  |                 # TODO: Do we need to fire moved events for the items | ||||||
|  |                 # inside the directory tree? Does kqueue does this | ||||||
|  |                 # all by itself? Check this and then enable this code | ||||||
|  |                 # only if it doesn't already. | ||||||
|  |                 # A: It doesn't. So I've enabled this block. | ||||||
|  |                 if self.watch.is_recursive: | ||||||
|  |                     for sub_event in generate_sub_moved_events(src_path, dest_path): | ||||||
|  |                         yield sub_event | ||||||
|  |         else: | ||||||
|  |             # If the new snapshot does not have an inode for the | ||||||
|  |             # old path, we haven't found the new name. Therefore, | ||||||
|  |             # we mark it as deleted and remove unregister the path. | ||||||
|  |             if is_directory: | ||||||
|  |                 yield DirDeletedEvent(src_path) | ||||||
|  |             else: | ||||||
|  |                 yield FileDeletedEvent(src_path) | ||||||
|  |             yield self._parent_dir_modified(src_path) | ||||||
|  |  | ||||||
|  |     def _read_events(self, timeout=None): | ||||||
|  |         """ | ||||||
|  |         Reads events from a call to the blocking | ||||||
|  |         :meth:`select.kqueue.control()` method. | ||||||
|  |  | ||||||
|  |         :param timeout: | ||||||
|  |             Blocking timeout for reading events. | ||||||
|  |         :type timeout: | ||||||
|  |             ``float`` (seconds) | ||||||
|  |         """ | ||||||
|  |         return self._kq.control(self._descriptors.kevents, MAX_EVENTS, timeout) | ||||||
|  |  | ||||||
|  |     def queue_events(self, timeout): | ||||||
|  |         """ | ||||||
|  |         Queues events by reading them from a call to the blocking | ||||||
|  |         :meth:`select.kqueue.control()` method. | ||||||
|  |  | ||||||
|  |         :param timeout: | ||||||
|  |             Blocking timeout for reading events. | ||||||
|  |         :type timeout: | ||||||
|  |             ``float`` (seconds) | ||||||
|  |         """ | ||||||
|  |         with self._lock: | ||||||
|  |             try: | ||||||
|  |                 event_list = self._read_events(timeout) | ||||||
|  |                 # TODO: investigate why order appears to be reversed | ||||||
|  |                 event_list.reverse() | ||||||
|  |  | ||||||
|  |                 # Take a fresh snapshot of the directory and update the | ||||||
|  |                 # saved snapshot. | ||||||
|  |                 new_snapshot = DirectorySnapshot( | ||||||
|  |                     self.watch.path, self.watch.is_recursive | ||||||
|  |                 ) | ||||||
|  |                 ref_snapshot = self._snapshot | ||||||
|  |                 self._snapshot = new_snapshot | ||||||
|  |                 diff_events = new_snapshot - ref_snapshot | ||||||
|  |  | ||||||
|  |                 # Process events | ||||||
|  |                 for directory_created in diff_events.dirs_created: | ||||||
|  |                     self.queue_event(DirCreatedEvent(directory_created)) | ||||||
|  |                 for file_created in diff_events.files_created: | ||||||
|  |                     self.queue_event(FileCreatedEvent(file_created)) | ||||||
|  |                 for file_modified in diff_events.files_modified: | ||||||
|  |                     self.queue_event(FileModifiedEvent(file_modified)) | ||||||
|  |  | ||||||
|  |                 for kev in event_list: | ||||||
|  |                     for event in self._gen_kqueue_events( | ||||||
|  |                         kev, ref_snapshot, new_snapshot | ||||||
|  |                     ): | ||||||
|  |                         self.queue_event(event) | ||||||
|  |  | ||||||
|  |             except OSError as e: | ||||||
|  |                 if e.errno != errno.EBADF: | ||||||
|  |                     raise | ||||||
|  |  | ||||||
|  |     def on_thread_stop(self): | ||||||
|  |         # Clean up. | ||||||
|  |         with self._lock: | ||||||
|  |             self._descriptors.clear() | ||||||
|  |             self._kq.close() | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class KqueueObserver(BaseObserver): | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |     Observer thread that schedules watching directories and dispatches | ||||||
|  |     calls to event handlers. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT): | ||||||
|  |         super().__init__(emitter_class=KqueueEmitter, timeout=timeout) | ||||||
							
								
								
									
										145
									
								
								src/libs/watchdog/observers/polling.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										145
									
								
								src/libs/watchdog/observers/polling.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,145 @@ | |||||||
|  | # Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com> | ||||||
|  | # Copyright 2012 Google, Inc & contributors. | ||||||
|  | # | ||||||
|  | # Licensed under the Apache License, Version 2.0 (the "License"); | ||||||
|  | # you may not use this file except in compliance with the License. | ||||||
|  | # You may obtain a copy of the License at | ||||||
|  | # | ||||||
|  | #     http://www.apache.org/licenses/LICENSE-2.0 | ||||||
|  | # | ||||||
|  | # Unless required by applicable law or agreed to in writing, software | ||||||
|  | # distributed under the License is distributed on an "AS IS" BASIS, | ||||||
|  | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||||
|  | # See the License for the specific language governing permissions and | ||||||
|  | # limitations under the License. | ||||||
|  |  | ||||||
|  |  | ||||||
|  | """ | ||||||
|  | :module: libs.watchdog.observers.polling | ||||||
|  | :synopsis: Polling emitter implementation. | ||||||
|  | :author: yesudeep@google.com (Yesudeep Mangalapilly) | ||||||
|  | :author: contact@tiger-222.fr (Mickaël Schoentgen) | ||||||
|  |  | ||||||
|  | Classes | ||||||
|  | ------- | ||||||
|  | .. autoclass:: PollingObserver | ||||||
|  |    :members: | ||||||
|  |    :show-inheritance: | ||||||
|  |  | ||||||
|  | .. autoclass:: PollingObserverVFS | ||||||
|  |    :members: | ||||||
|  |    :show-inheritance: | ||||||
|  |    :special-members: | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | from __future__ import annotations | ||||||
|  |  | ||||||
|  | import os | ||||||
|  | import threading | ||||||
|  | from functools import partial | ||||||
|  |  | ||||||
|  | from libs.watchdog.events import ( | ||||||
|  |     DirCreatedEvent, | ||||||
|  |     DirDeletedEvent, | ||||||
|  |     DirModifiedEvent, | ||||||
|  |     DirMovedEvent, | ||||||
|  |     FileCreatedEvent, | ||||||
|  |     FileDeletedEvent, | ||||||
|  |     FileModifiedEvent, | ||||||
|  |     FileMovedEvent, | ||||||
|  | ) | ||||||
|  | from libs.watchdog.observers.api import DEFAULT_EMITTER_TIMEOUT, DEFAULT_OBSERVER_TIMEOUT, BaseObserver, EventEmitter | ||||||
|  | from libs.watchdog.utils.dirsnapshot import DirectorySnapshot, DirectorySnapshotDiff | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class PollingEmitter(EventEmitter): | ||||||
|  |     """ | ||||||
|  |     Platform-independent emitter that polls a directory to detect file | ||||||
|  |     system changes. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__( | ||||||
|  |         self, | ||||||
|  |         event_queue, | ||||||
|  |         watch, | ||||||
|  |         timeout=DEFAULT_EMITTER_TIMEOUT, | ||||||
|  |         stat=os.stat, | ||||||
|  |         listdir=os.scandir, | ||||||
|  |     ): | ||||||
|  |         super().__init__(event_queue, watch, timeout) | ||||||
|  |         self._snapshot = None | ||||||
|  |         self._lock = threading.Lock() | ||||||
|  |         self._take_snapshot = lambda: DirectorySnapshot( | ||||||
|  |             self.watch.path, self.watch.is_recursive, stat=stat, listdir=listdir | ||||||
|  |         ) | ||||||
|  |  | ||||||
|  |     def on_thread_start(self): | ||||||
|  |         self._snapshot = self._take_snapshot() | ||||||
|  |  | ||||||
|  |     def queue_events(self, timeout): | ||||||
|  |         # We don't want to hit the disk continuously. | ||||||
|  |         # timeout behaves like an interval for polling emitters. | ||||||
|  |         if self.stopped_event.wait(timeout): | ||||||
|  |             return | ||||||
|  |  | ||||||
|  |         with self._lock: | ||||||
|  |             if not self.should_keep_running(): | ||||||
|  |                 return | ||||||
|  |  | ||||||
|  |             # Get event diff between fresh snapshot and previous snapshot. | ||||||
|  |             # Update snapshot. | ||||||
|  |             try: | ||||||
|  |                 new_snapshot = self._take_snapshot() | ||||||
|  |             except OSError: | ||||||
|  |                 self.queue_event(DirDeletedEvent(self.watch.path)) | ||||||
|  |                 self.stop() | ||||||
|  |                 return | ||||||
|  |  | ||||||
|  |             events = DirectorySnapshotDiff(self._snapshot, new_snapshot) | ||||||
|  |             self._snapshot = new_snapshot | ||||||
|  |  | ||||||
|  |             # Files. | ||||||
|  |             for src_path in events.files_deleted: | ||||||
|  |                 self.queue_event(FileDeletedEvent(src_path)) | ||||||
|  |             for src_path in events.files_modified: | ||||||
|  |                 self.queue_event(FileModifiedEvent(src_path)) | ||||||
|  |             for src_path in events.files_created: | ||||||
|  |                 self.queue_event(FileCreatedEvent(src_path)) | ||||||
|  |             for src_path, dest_path in events.files_moved: | ||||||
|  |                 self.queue_event(FileMovedEvent(src_path, dest_path)) | ||||||
|  |  | ||||||
|  |             # Directories. | ||||||
|  |             for src_path in events.dirs_deleted: | ||||||
|  |                 self.queue_event(DirDeletedEvent(src_path)) | ||||||
|  |             for src_path in events.dirs_modified: | ||||||
|  |                 self.queue_event(DirModifiedEvent(src_path)) | ||||||
|  |             for src_path in events.dirs_created: | ||||||
|  |                 self.queue_event(DirCreatedEvent(src_path)) | ||||||
|  |             for src_path, dest_path in events.dirs_moved: | ||||||
|  |                 self.queue_event(DirMovedEvent(src_path, dest_path)) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class PollingObserver(BaseObserver): | ||||||
|  |     """ | ||||||
|  |     Platform-independent observer that polls a directory to detect file | ||||||
|  |     system changes. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT): | ||||||
|  |         super().__init__(emitter_class=PollingEmitter, timeout=timeout) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class PollingObserverVFS(BaseObserver): | ||||||
|  |     """ | ||||||
|  |     File system independent observer that polls a directory to detect changes. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self, stat, listdir, polling_interval=1): | ||||||
|  |         """ | ||||||
|  |         :param stat: stat function. See ``os.stat`` for details. | ||||||
|  |         :param listdir: listdir function. See ``os.scandir`` for details. | ||||||
|  |         :type polling_interval: float | ||||||
|  |         :param polling_interval: interval in seconds between polling the file system. | ||||||
|  |         """ | ||||||
|  |         emitter_cls = partial(PollingEmitter, stat=stat, listdir=listdir) | ||||||
|  |         super().__init__(emitter_class=emitter_cls, timeout=polling_interval) | ||||||
							
								
								
									
										142
									
								
								src/libs/watchdog/observers/read_directory_changes.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										142
									
								
								src/libs/watchdog/observers/read_directory_changes.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,142 @@ | |||||||
|  | # Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com> | ||||||
|  | # Copyright 2012 Google, Inc & contributors. | ||||||
|  | # Copyright 2014 Thomas Amland | ||||||
|  | # | ||||||
|  | # Licensed under the Apache License, Version 2.0 (the "License"); | ||||||
|  | # you may not use this file except in compliance with the License. | ||||||
|  | # You may obtain a copy of the License at | ||||||
|  | # | ||||||
|  | #     http://www.apache.org/licenses/LICENSE-2.0 | ||||||
|  | # | ||||||
|  | # Unless required by applicable law or agreed to in writing, software | ||||||
|  | # distributed under the License is distributed on an "AS IS" BASIS, | ||||||
|  | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||||
|  | # See the License for the specific language governing permissions and | ||||||
|  | # limitations under the License. | ||||||
|  |  | ||||||
|  | from __future__ import annotations | ||||||
|  |  | ||||||
|  | import os.path | ||||||
|  | import platform | ||||||
|  | import sys | ||||||
|  | import threading | ||||||
|  | import time | ||||||
|  |  | ||||||
|  | from libs.watchdog.events import ( | ||||||
|  |     DirCreatedEvent, | ||||||
|  |     DirDeletedEvent, | ||||||
|  |     DirModifiedEvent, | ||||||
|  |     DirMovedEvent, | ||||||
|  |     FileCreatedEvent, | ||||||
|  |     FileDeletedEvent, | ||||||
|  |     FileModifiedEvent, | ||||||
|  |     FileMovedEvent, | ||||||
|  |     generate_sub_created_events, | ||||||
|  |     generate_sub_moved_events, | ||||||
|  | ) | ||||||
|  | from libs.watchdog.observers.api import DEFAULT_EMITTER_TIMEOUT, DEFAULT_OBSERVER_TIMEOUT, BaseObserver, EventEmitter | ||||||
|  |  | ||||||
|  | assert sys.platform.startswith("win"), f"{__name__} requires Windows" | ||||||
|  |  | ||||||
|  | from libs.watchdog.observers.winapi import close_directory_handle, get_directory_handle, read_events  # noqa: E402 | ||||||
|  |  | ||||||
|  | # HACK: | ||||||
|  | WATCHDOG_TRAVERSE_MOVED_DIR_DELAY = 1  # seconds | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class WindowsApiEmitter(EventEmitter): | ||||||
|  |     """ | ||||||
|  |     Windows API-based emitter that uses ReadDirectoryChangesW | ||||||
|  |     to detect file system changes for a watch. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT): | ||||||
|  |         super().__init__(event_queue, watch, timeout) | ||||||
|  |         self._lock = threading.Lock() | ||||||
|  |         self._handle = None | ||||||
|  |  | ||||||
|  |     def on_thread_start(self): | ||||||
|  |         self._handle = get_directory_handle(self.watch.path) | ||||||
|  |  | ||||||
|  |     if platform.python_implementation() == "PyPy": | ||||||
|  |  | ||||||
|  |         def start(self): | ||||||
|  |             """PyPy needs some time before receiving events, see #792.""" | ||||||
|  |             super().start() | ||||||
|  |             time.sleep(0.01) | ||||||
|  |  | ||||||
|  |     def on_thread_stop(self): | ||||||
|  |         if self._handle: | ||||||
|  |             close_directory_handle(self._handle) | ||||||
|  |  | ||||||
|  |     def _read_events(self): | ||||||
|  |         return read_events(self._handle, self.watch.path, self.watch.is_recursive) | ||||||
|  |  | ||||||
|  |     def queue_events(self, timeout): | ||||||
|  |         winapi_events = self._read_events() | ||||||
|  |         with self._lock: | ||||||
|  |             last_renamed_src_path = "" | ||||||
|  |             for winapi_event in winapi_events: | ||||||
|  |                 src_path = os.path.join(self.watch.path, winapi_event.src_path) | ||||||
|  |  | ||||||
|  |                 if winapi_event.is_renamed_old: | ||||||
|  |                     last_renamed_src_path = src_path | ||||||
|  |                 elif winapi_event.is_renamed_new: | ||||||
|  |                     dest_path = src_path | ||||||
|  |                     src_path = last_renamed_src_path | ||||||
|  |                     if os.path.isdir(dest_path): | ||||||
|  |                         event = DirMovedEvent(src_path, dest_path) | ||||||
|  |                         if self.watch.is_recursive: | ||||||
|  |                             # HACK: We introduce a forced delay before | ||||||
|  |                             # traversing the moved directory. This will read | ||||||
|  |                             # only file movement that finishes within this | ||||||
|  |                             # delay time. | ||||||
|  |                             time.sleep(WATCHDOG_TRAVERSE_MOVED_DIR_DELAY) | ||||||
|  |                             # The following block of code may not | ||||||
|  |                             # obtain moved events for the entire tree if | ||||||
|  |                             # the I/O is not completed within the above | ||||||
|  |                             # delay time. So, it's not guaranteed to work. | ||||||
|  |                             # TODO: Come up with a better solution, possibly | ||||||
|  |                             # a way to wait for I/O to complete before | ||||||
|  |                             # queuing events. | ||||||
|  |                             for sub_moved_event in generate_sub_moved_events( | ||||||
|  |                                 src_path, dest_path | ||||||
|  |                             ): | ||||||
|  |                                 self.queue_event(sub_moved_event) | ||||||
|  |                         self.queue_event(event) | ||||||
|  |                     else: | ||||||
|  |                         self.queue_event(FileMovedEvent(src_path, dest_path)) | ||||||
|  |                 elif winapi_event.is_modified: | ||||||
|  |                     cls = ( | ||||||
|  |                         DirModifiedEvent | ||||||
|  |                         if os.path.isdir(src_path) | ||||||
|  |                         else FileModifiedEvent | ||||||
|  |                     ) | ||||||
|  |                     self.queue_event(cls(src_path)) | ||||||
|  |                 elif winapi_event.is_added: | ||||||
|  |                     isdir = os.path.isdir(src_path) | ||||||
|  |                     cls = DirCreatedEvent if isdir else FileCreatedEvent | ||||||
|  |                     self.queue_event(cls(src_path)) | ||||||
|  |                     if isdir and self.watch.is_recursive: | ||||||
|  |                         # If a directory is moved from outside the watched folder to inside it | ||||||
|  |                         # we only get a created directory event out of it, not any events for its children | ||||||
|  |                         # so use the same hack as for file moves to get the child events | ||||||
|  |                         time.sleep(WATCHDOG_TRAVERSE_MOVED_DIR_DELAY) | ||||||
|  |                         sub_events = generate_sub_created_events(src_path) | ||||||
|  |                         for sub_created_event in sub_events: | ||||||
|  |                             self.queue_event(sub_created_event) | ||||||
|  |                 elif winapi_event.is_removed: | ||||||
|  |                     self.queue_event(FileDeletedEvent(src_path)) | ||||||
|  |                 elif winapi_event.is_removed_self: | ||||||
|  |                     self.queue_event(DirDeletedEvent(self.watch.path)) | ||||||
|  |                     self.stop() | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class WindowsApiObserver(BaseObserver): | ||||||
|  |     """ | ||||||
|  |     Observer thread that schedules watching directories and dispatches | ||||||
|  |     calls to event handlers. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT): | ||||||
|  |         super().__init__(emitter_class=WindowsApiEmitter, timeout=timeout) | ||||||
							
								
								
									
										416
									
								
								src/libs/watchdog/observers/winapi.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										416
									
								
								src/libs/watchdog/observers/winapi.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,416 @@ | |||||||
|  | # winapi.py: Windows API-Python interface (removes dependency on pywin32) | ||||||
|  | # | ||||||
|  | # Copyright (C) 2007 Thomas Heller <theller@ctypes.org> | ||||||
|  | # Copyright (C) 2010 Will McGugan <will@willmcgugan.com> | ||||||
|  | # Copyright (C) 2010 Ryan Kelly <ryan@rfk.id.au> | ||||||
|  | # Copyright (C) 2010 Yesudeep Mangalapilly <yesudeep@gmail.com> | ||||||
|  | # Copyright (C) 2014 Thomas Amland | ||||||
|  | # All rights reserved. | ||||||
|  | # | ||||||
|  | # Redistribution and use in source and binary forms, with or without | ||||||
|  | # modification, are permitted provided that the following conditions are met: | ||||||
|  | # | ||||||
|  | # * Redistributions of source code must retain the above copyright notice, this | ||||||
|  | #   list of conditions and the following disclaimer. | ||||||
|  | # * Redistributions in binary form must reproduce the above copyright notice, | ||||||
|  | #   this list of conditions and the following disclaimer in the documentation | ||||||
|  | #   and / or other materials provided with the distribution. | ||||||
|  | # * Neither the name of the organization nor the names of its contributors may | ||||||
|  | #   be used to endorse or promote products derived from this software without | ||||||
|  | #   specific prior written permission. | ||||||
|  | # | ||||||
|  | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||||||
|  | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||||||
|  | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||||||
|  | # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE | ||||||
|  | # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||||||
|  | # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||||||
|  | # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | ||||||
|  | # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | ||||||
|  | # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||||||
|  | # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||||||
|  | # POSSIBILITY OF SUCH DAMAGE. | ||||||
|  | # | ||||||
|  | # Portions of this code were taken from pyfilesystem, which uses the above | ||||||
|  | # new BSD license. | ||||||
|  |  | ||||||
|  | from __future__ import annotations | ||||||
|  |  | ||||||
|  | import sys | ||||||
|  | from functools import reduce | ||||||
|  |  | ||||||
|  | assert sys.platform.startswith("win"), f"{__name__} requires Windows" | ||||||
|  | import ctypes.wintypes  # noqa: E402 | ||||||
|  |  | ||||||
|  | LPVOID = ctypes.wintypes.LPVOID | ||||||
|  |  | ||||||
|  | # Invalid handle value. | ||||||
|  | INVALID_HANDLE_VALUE = ctypes.c_void_p(-1).value | ||||||
|  |  | ||||||
|  | # File notification constants. | ||||||
|  | FILE_NOTIFY_CHANGE_FILE_NAME = 0x01 | ||||||
|  | FILE_NOTIFY_CHANGE_DIR_NAME = 0x02 | ||||||
|  | FILE_NOTIFY_CHANGE_ATTRIBUTES = 0x04 | ||||||
|  | FILE_NOTIFY_CHANGE_SIZE = 0x08 | ||||||
|  | FILE_NOTIFY_CHANGE_LAST_WRITE = 0x010 | ||||||
|  | FILE_NOTIFY_CHANGE_LAST_ACCESS = 0x020 | ||||||
|  | FILE_NOTIFY_CHANGE_CREATION = 0x040 | ||||||
|  | FILE_NOTIFY_CHANGE_SECURITY = 0x0100 | ||||||
|  |  | ||||||
|  | FILE_FLAG_BACKUP_SEMANTICS = 0x02000000 | ||||||
|  | FILE_FLAG_OVERLAPPED = 0x40000000 | ||||||
|  | FILE_LIST_DIRECTORY = 1 | ||||||
|  | FILE_SHARE_READ = 0x01 | ||||||
|  | FILE_SHARE_WRITE = 0x02 | ||||||
|  | FILE_SHARE_DELETE = 0x04 | ||||||
|  | OPEN_EXISTING = 3 | ||||||
|  |  | ||||||
|  | VOLUME_NAME_NT = 0x02 | ||||||
|  |  | ||||||
|  | # File action constants. | ||||||
|  | FILE_ACTION_CREATED = 1 | ||||||
|  | FILE_ACTION_DELETED = 2 | ||||||
|  | FILE_ACTION_MODIFIED = 3 | ||||||
|  | FILE_ACTION_RENAMED_OLD_NAME = 4 | ||||||
|  | FILE_ACTION_RENAMED_NEW_NAME = 5 | ||||||
|  | FILE_ACTION_DELETED_SELF = 0xFFFE | ||||||
|  | FILE_ACTION_OVERFLOW = 0xFFFF | ||||||
|  |  | ||||||
|  | # Aliases | ||||||
|  | FILE_ACTION_ADDED = FILE_ACTION_CREATED | ||||||
|  | FILE_ACTION_REMOVED = FILE_ACTION_DELETED | ||||||
|  | FILE_ACTION_REMOVED_SELF = FILE_ACTION_DELETED_SELF | ||||||
|  |  | ||||||
|  | THREAD_TERMINATE = 0x0001 | ||||||
|  |  | ||||||
|  | # IO waiting constants. | ||||||
|  | WAIT_ABANDONED = 0x00000080 | ||||||
|  | WAIT_IO_COMPLETION = 0x000000C0 | ||||||
|  | WAIT_OBJECT_0 = 0x00000000 | ||||||
|  | WAIT_TIMEOUT = 0x00000102 | ||||||
|  |  | ||||||
|  | # Error codes | ||||||
|  | ERROR_OPERATION_ABORTED = 995 | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class OVERLAPPED(ctypes.Structure): | ||||||
|  |     _fields_ = [ | ||||||
|  |         ("Internal", LPVOID), | ||||||
|  |         ("InternalHigh", LPVOID), | ||||||
|  |         ("Offset", ctypes.wintypes.DWORD), | ||||||
|  |         ("OffsetHigh", ctypes.wintypes.DWORD), | ||||||
|  |         ("Pointer", LPVOID), | ||||||
|  |         ("hEvent", ctypes.wintypes.HANDLE), | ||||||
|  |     ] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def _errcheck_bool(value, func, args): | ||||||
|  |     if not value: | ||||||
|  |         raise ctypes.WinError() | ||||||
|  |     return args | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def _errcheck_handle(value, func, args): | ||||||
|  |     if not value: | ||||||
|  |         raise ctypes.WinError() | ||||||
|  |     if value == INVALID_HANDLE_VALUE: | ||||||
|  |         raise ctypes.WinError() | ||||||
|  |     return args | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def _errcheck_dword(value, func, args): | ||||||
|  |     if value == 0xFFFFFFFF: | ||||||
|  |         raise ctypes.WinError() | ||||||
|  |     return args | ||||||
|  |  | ||||||
|  |  | ||||||
|  | kernel32 = ctypes.WinDLL("kernel32") | ||||||
|  |  | ||||||
|  | ReadDirectoryChangesW = kernel32.ReadDirectoryChangesW | ||||||
|  | ReadDirectoryChangesW.restype = ctypes.wintypes.BOOL | ||||||
|  | ReadDirectoryChangesW.errcheck = _errcheck_bool | ||||||
|  | ReadDirectoryChangesW.argtypes = ( | ||||||
|  |     ctypes.wintypes.HANDLE,  # hDirectory | ||||||
|  |     LPVOID,  # lpBuffer | ||||||
|  |     ctypes.wintypes.DWORD,  # nBufferLength | ||||||
|  |     ctypes.wintypes.BOOL,  # bWatchSubtree | ||||||
|  |     ctypes.wintypes.DWORD,  # dwNotifyFilter | ||||||
|  |     ctypes.POINTER(ctypes.wintypes.DWORD),  # lpBytesReturned | ||||||
|  |     ctypes.POINTER(OVERLAPPED),  # lpOverlapped | ||||||
|  |     LPVOID,  # FileIOCompletionRoutine # lpCompletionRoutine | ||||||
|  | ) | ||||||
|  |  | ||||||
|  | CreateFileW = kernel32.CreateFileW | ||||||
|  | CreateFileW.restype = ctypes.wintypes.HANDLE | ||||||
|  | CreateFileW.errcheck = _errcheck_handle | ||||||
|  | CreateFileW.argtypes = ( | ||||||
|  |     ctypes.wintypes.LPCWSTR,  # lpFileName | ||||||
|  |     ctypes.wintypes.DWORD,  # dwDesiredAccess | ||||||
|  |     ctypes.wintypes.DWORD,  # dwShareMode | ||||||
|  |     LPVOID,  # lpSecurityAttributes | ||||||
|  |     ctypes.wintypes.DWORD,  # dwCreationDisposition | ||||||
|  |     ctypes.wintypes.DWORD,  # dwFlagsAndAttributes | ||||||
|  |     ctypes.wintypes.HANDLE,  # hTemplateFile | ||||||
|  | ) | ||||||
|  |  | ||||||
|  | CloseHandle = kernel32.CloseHandle | ||||||
|  | CloseHandle.restype = ctypes.wintypes.BOOL | ||||||
|  | CloseHandle.argtypes = (ctypes.wintypes.HANDLE,)  # hObject | ||||||
|  |  | ||||||
|  | CancelIoEx = kernel32.CancelIoEx | ||||||
|  | CancelIoEx.restype = ctypes.wintypes.BOOL | ||||||
|  | CancelIoEx.errcheck = _errcheck_bool | ||||||
|  | CancelIoEx.argtypes = ( | ||||||
|  |     ctypes.wintypes.HANDLE,  # hObject | ||||||
|  |     ctypes.POINTER(OVERLAPPED),  # lpOverlapped | ||||||
|  | ) | ||||||
|  |  | ||||||
|  | CreateEvent = kernel32.CreateEventW | ||||||
|  | CreateEvent.restype = ctypes.wintypes.HANDLE | ||||||
|  | CreateEvent.errcheck = _errcheck_handle | ||||||
|  | CreateEvent.argtypes = ( | ||||||
|  |     LPVOID,  # lpEventAttributes | ||||||
|  |     ctypes.wintypes.BOOL,  # bManualReset | ||||||
|  |     ctypes.wintypes.BOOL,  # bInitialState | ||||||
|  |     ctypes.wintypes.LPCWSTR,  # lpName | ||||||
|  | ) | ||||||
|  |  | ||||||
|  | SetEvent = kernel32.SetEvent | ||||||
|  | SetEvent.restype = ctypes.wintypes.BOOL | ||||||
|  | SetEvent.errcheck = _errcheck_bool | ||||||
|  | SetEvent.argtypes = (ctypes.wintypes.HANDLE,)  # hEvent | ||||||
|  |  | ||||||
|  | WaitForSingleObjectEx = kernel32.WaitForSingleObjectEx | ||||||
|  | WaitForSingleObjectEx.restype = ctypes.wintypes.DWORD | ||||||
|  | WaitForSingleObjectEx.errcheck = _errcheck_dword | ||||||
|  | WaitForSingleObjectEx.argtypes = ( | ||||||
|  |     ctypes.wintypes.HANDLE,  # hObject | ||||||
|  |     ctypes.wintypes.DWORD,  # dwMilliseconds | ||||||
|  |     ctypes.wintypes.BOOL,  # bAlertable | ||||||
|  | ) | ||||||
|  |  | ||||||
|  | CreateIoCompletionPort = kernel32.CreateIoCompletionPort | ||||||
|  | CreateIoCompletionPort.restype = ctypes.wintypes.HANDLE | ||||||
|  | CreateIoCompletionPort.errcheck = _errcheck_handle | ||||||
|  | CreateIoCompletionPort.argtypes = ( | ||||||
|  |     ctypes.wintypes.HANDLE,  # FileHandle | ||||||
|  |     ctypes.wintypes.HANDLE,  # ExistingCompletionPort | ||||||
|  |     LPVOID,  # CompletionKey | ||||||
|  |     ctypes.wintypes.DWORD,  # NumberOfConcurrentThreads | ||||||
|  | ) | ||||||
|  |  | ||||||
|  | GetQueuedCompletionStatus = kernel32.GetQueuedCompletionStatus | ||||||
|  | GetQueuedCompletionStatus.restype = ctypes.wintypes.BOOL | ||||||
|  | GetQueuedCompletionStatus.errcheck = _errcheck_bool | ||||||
|  | GetQueuedCompletionStatus.argtypes = ( | ||||||
|  |     ctypes.wintypes.HANDLE,  # CompletionPort | ||||||
|  |     LPVOID,  # lpNumberOfBytesTransferred | ||||||
|  |     LPVOID,  # lpCompletionKey | ||||||
|  |     ctypes.POINTER(OVERLAPPED),  # lpOverlapped | ||||||
|  |     ctypes.wintypes.DWORD,  # dwMilliseconds | ||||||
|  | ) | ||||||
|  |  | ||||||
|  | PostQueuedCompletionStatus = kernel32.PostQueuedCompletionStatus | ||||||
|  | PostQueuedCompletionStatus.restype = ctypes.wintypes.BOOL | ||||||
|  | PostQueuedCompletionStatus.errcheck = _errcheck_bool | ||||||
|  | PostQueuedCompletionStatus.argtypes = ( | ||||||
|  |     ctypes.wintypes.HANDLE,  # CompletionPort | ||||||
|  |     ctypes.wintypes.DWORD,  # lpNumberOfBytesTransferred | ||||||
|  |     ctypes.wintypes.DWORD,  # lpCompletionKey | ||||||
|  |     ctypes.POINTER(OVERLAPPED),  # lpOverlapped | ||||||
|  | ) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | GetFinalPathNameByHandleW = kernel32.GetFinalPathNameByHandleW | ||||||
|  | GetFinalPathNameByHandleW.restype = ctypes.wintypes.DWORD | ||||||
|  | GetFinalPathNameByHandleW.errcheck = _errcheck_dword | ||||||
|  | GetFinalPathNameByHandleW.argtypes = ( | ||||||
|  |     ctypes.wintypes.HANDLE,  # hFile | ||||||
|  |     ctypes.wintypes.LPWSTR,  # lpszFilePath | ||||||
|  |     ctypes.wintypes.DWORD,  # cchFilePath | ||||||
|  |     ctypes.wintypes.DWORD,  # DWORD | ||||||
|  | ) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class FILE_NOTIFY_INFORMATION(ctypes.Structure): | ||||||
|  |     _fields_ = [ | ||||||
|  |         ("NextEntryOffset", ctypes.wintypes.DWORD), | ||||||
|  |         ("Action", ctypes.wintypes.DWORD), | ||||||
|  |         ("FileNameLength", ctypes.wintypes.DWORD), | ||||||
|  |         # ("FileName", (ctypes.wintypes.WCHAR * 1))] | ||||||
|  |         ("FileName", (ctypes.c_char * 1)), | ||||||
|  |     ] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | LPFNI = ctypes.POINTER(FILE_NOTIFY_INFORMATION) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # We don't need to recalculate these flags every time a call is made to | ||||||
|  | # the win32 API functions. | ||||||
|  | WATCHDOG_FILE_FLAGS = FILE_FLAG_BACKUP_SEMANTICS | ||||||
|  | WATCHDOG_FILE_SHARE_FLAGS = reduce( | ||||||
|  |     lambda x, y: x | y, | ||||||
|  |     [ | ||||||
|  |         FILE_SHARE_READ, | ||||||
|  |         FILE_SHARE_WRITE, | ||||||
|  |         FILE_SHARE_DELETE, | ||||||
|  |     ], | ||||||
|  | ) | ||||||
|  | WATCHDOG_FILE_NOTIFY_FLAGS = reduce( | ||||||
|  |     lambda x, y: x | y, | ||||||
|  |     [ | ||||||
|  |         FILE_NOTIFY_CHANGE_FILE_NAME, | ||||||
|  |         FILE_NOTIFY_CHANGE_DIR_NAME, | ||||||
|  |         FILE_NOTIFY_CHANGE_ATTRIBUTES, | ||||||
|  |         FILE_NOTIFY_CHANGE_SIZE, | ||||||
|  |         FILE_NOTIFY_CHANGE_LAST_WRITE, | ||||||
|  |         FILE_NOTIFY_CHANGE_SECURITY, | ||||||
|  |         FILE_NOTIFY_CHANGE_LAST_ACCESS, | ||||||
|  |         FILE_NOTIFY_CHANGE_CREATION, | ||||||
|  |     ], | ||||||
|  | ) | ||||||
|  |  | ||||||
|  | # ReadDirectoryChangesW buffer length. | ||||||
|  | # To handle cases with lot of changes, this seems the highest safest value we can use. | ||||||
|  | # Note: it will fail with ERROR_INVALID_PARAMETER when it is greater than 64 KB and | ||||||
|  | #       the application is monitoring a directory over the network. | ||||||
|  | #       This is due to a packet size limitation with the underlying file sharing protocols. | ||||||
|  | #       https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw#remarks | ||||||
|  | BUFFER_SIZE = 64000 | ||||||
|  |  | ||||||
|  | # Buffer length for path-related stuff. | ||||||
|  | # Introduced to keep the old behavior when we bumped BUFFER_SIZE from 2048 to 64000 in v1.0.0. | ||||||
|  | PATH_BUFFER_SIZE = 2048 | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def _parse_event_buffer(readBuffer, nBytes): | ||||||
|  |     results = [] | ||||||
|  |     while nBytes > 0: | ||||||
|  |         fni = ctypes.cast(readBuffer, LPFNI)[0] | ||||||
|  |         ptr = ctypes.addressof(fni) + FILE_NOTIFY_INFORMATION.FileName.offset | ||||||
|  |         # filename = ctypes.wstring_at(ptr, fni.FileNameLength) | ||||||
|  |         filename = ctypes.string_at(ptr, fni.FileNameLength) | ||||||
|  |         results.append((fni.Action, filename.decode("utf-16"))) | ||||||
|  |         numToSkip = fni.NextEntryOffset | ||||||
|  |         if numToSkip <= 0: | ||||||
|  |             break | ||||||
|  |         readBuffer = readBuffer[numToSkip:] | ||||||
|  |         nBytes -= numToSkip  # numToSkip is long. nBytes should be long too. | ||||||
|  |     return results | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def _is_observed_path_deleted(handle, path): | ||||||
|  |     # Comparison of observed path and actual path, returned by | ||||||
|  |     # GetFinalPathNameByHandleW. If directory moved to the trash bin, or | ||||||
|  |     # deleted, actual path will not be equal to observed path. | ||||||
|  |     buff = ctypes.create_unicode_buffer(PATH_BUFFER_SIZE) | ||||||
|  |     GetFinalPathNameByHandleW(handle, buff, PATH_BUFFER_SIZE, VOLUME_NAME_NT) | ||||||
|  |     return buff.value != path | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def _generate_observed_path_deleted_event(): | ||||||
|  |     # Create synthetic event for notify that observed directory is deleted | ||||||
|  |     path = ctypes.create_unicode_buffer(".") | ||||||
|  |     event = FILE_NOTIFY_INFORMATION( | ||||||
|  |         0, FILE_ACTION_DELETED_SELF, len(path), path.value.encode("utf-8") | ||||||
|  |     ) | ||||||
|  |     event_size = ctypes.sizeof(event) | ||||||
|  |     buff = ctypes.create_string_buffer(PATH_BUFFER_SIZE) | ||||||
|  |     ctypes.memmove(buff, ctypes.addressof(event), event_size) | ||||||
|  |     return buff, event_size | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def get_directory_handle(path): | ||||||
|  |     """Returns a Windows handle to the specified directory path.""" | ||||||
|  |     return CreateFileW( | ||||||
|  |         path, | ||||||
|  |         FILE_LIST_DIRECTORY, | ||||||
|  |         WATCHDOG_FILE_SHARE_FLAGS, | ||||||
|  |         None, | ||||||
|  |         OPEN_EXISTING, | ||||||
|  |         WATCHDOG_FILE_FLAGS, | ||||||
|  |         None, | ||||||
|  |     ) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def close_directory_handle(handle): | ||||||
|  |     try: | ||||||
|  |         CancelIoEx(handle, None)  # force ReadDirectoryChangesW to return | ||||||
|  |         CloseHandle(handle)  # close directory handle | ||||||
|  |     except OSError: | ||||||
|  |         try: | ||||||
|  |             CloseHandle(handle)  # close directory handle | ||||||
|  |         except Exception: | ||||||
|  |             return | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def read_directory_changes(handle, path, recursive): | ||||||
|  |     """Read changes to the directory using the specified directory handle. | ||||||
|  |  | ||||||
|  |     http://timgolden.me.uk/pywin32-docs/win32file__ReadDirectoryChangesW_meth.html | ||||||
|  |     """ | ||||||
|  |     event_buffer = ctypes.create_string_buffer(BUFFER_SIZE) | ||||||
|  |     nbytes = ctypes.wintypes.DWORD() | ||||||
|  |     try: | ||||||
|  |         ReadDirectoryChangesW( | ||||||
|  |             handle, | ||||||
|  |             ctypes.byref(event_buffer), | ||||||
|  |             len(event_buffer), | ||||||
|  |             recursive, | ||||||
|  |             WATCHDOG_FILE_NOTIFY_FLAGS, | ||||||
|  |             ctypes.byref(nbytes), | ||||||
|  |             None, | ||||||
|  |             None, | ||||||
|  |         ) | ||||||
|  |     except OSError as e: | ||||||
|  |         if e.winerror == ERROR_OPERATION_ABORTED: | ||||||
|  |             return [], 0 | ||||||
|  |  | ||||||
|  |         # Handle the case when the root path is deleted | ||||||
|  |         if _is_observed_path_deleted(handle, path): | ||||||
|  |             return _generate_observed_path_deleted_event() | ||||||
|  |  | ||||||
|  |         raise e | ||||||
|  |  | ||||||
|  |     return event_buffer.raw, int(nbytes.value) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class WinAPINativeEvent: | ||||||
|  |     def __init__(self, action, src_path): | ||||||
|  |         self.action = action | ||||||
|  |         self.src_path = src_path | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def is_added(self): | ||||||
|  |         return self.action == FILE_ACTION_CREATED | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def is_removed(self): | ||||||
|  |         return self.action == FILE_ACTION_REMOVED | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def is_modified(self): | ||||||
|  |         return self.action == FILE_ACTION_MODIFIED | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def is_renamed_old(self): | ||||||
|  |         return self.action == FILE_ACTION_RENAMED_OLD_NAME | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def is_renamed_new(self): | ||||||
|  |         return self.action == FILE_ACTION_RENAMED_NEW_NAME | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def is_removed_self(self): | ||||||
|  |         return self.action == FILE_ACTION_REMOVED_SELF | ||||||
|  |  | ||||||
|  |     def __repr__(self): | ||||||
|  |         return ( | ||||||
|  |             f"<{type(self).__name__}: action={self.action}, src_path={self.src_path!r}>" | ||||||
|  |         ) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def read_events(handle, path, recursive): | ||||||
|  |     buf, nbytes = read_directory_changes(handle, path, recursive) | ||||||
|  |     events = _parse_event_buffer(buf, nbytes) | ||||||
|  |     return [WinAPINativeEvent(action, src_path) for action, src_path in events] | ||||||
							
								
								
									
										0
									
								
								src/libs/watchdog/py.typed
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										0
									
								
								src/libs/watchdog/py.typed
									
									
									
									
									
										Normal file
									
								
							
							
								
								
									
										334
									
								
								src/libs/watchdog/tricks/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										334
									
								
								src/libs/watchdog/tricks/__init__.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,334 @@ | |||||||
|  | # Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com> | ||||||
|  | # Copyright 2012 Google, Inc & contributors. | ||||||
|  | # | ||||||
|  | # Licensed under the Apache License, Version 2.0 (the "License"); | ||||||
|  | # you may not use this file except in compliance with the License. | ||||||
|  | # You may obtain a copy of the License at | ||||||
|  | # | ||||||
|  | #     http://www.apache.org/licenses/LICENSE-2.0 | ||||||
|  | # | ||||||
|  | # Unless required by applicable law or agreed to in writing, software | ||||||
|  | # distributed under the License is distributed on an "AS IS" BASIS, | ||||||
|  | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||||
|  | # See the License for the specific language governing permissions and | ||||||
|  | # limitations under the License. | ||||||
|  |  | ||||||
|  | """ | ||||||
|  | :module: libs.watchdog.tricks | ||||||
|  | :synopsis: Utility event handlers. | ||||||
|  | :author: yesudeep@google.com (Yesudeep Mangalapilly) | ||||||
|  | :author: contact@tiger-222.fr (Mickaël Schoentgen) | ||||||
|  |  | ||||||
|  | Classes | ||||||
|  | ------- | ||||||
|  | .. autoclass:: Trick | ||||||
|  |    :members: | ||||||
|  |    :show-inheritance: | ||||||
|  |  | ||||||
|  | .. autoclass:: LoggerTrick | ||||||
|  |    :members: | ||||||
|  |    :show-inheritance: | ||||||
|  |  | ||||||
|  | .. autoclass:: ShellCommandTrick | ||||||
|  |    :members: | ||||||
|  |    :show-inheritance: | ||||||
|  |  | ||||||
|  | .. autoclass:: AutoRestartTrick | ||||||
|  |    :members: | ||||||
|  |    :show-inheritance: | ||||||
|  |  | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | from __future__ import annotations | ||||||
|  |  | ||||||
|  | import functools | ||||||
|  | import logging | ||||||
|  | import os | ||||||
|  | import signal | ||||||
|  | import subprocess | ||||||
|  | import sys | ||||||
|  | import threading | ||||||
|  | import time | ||||||
|  |  | ||||||
|  | from libs.watchdog.events import EVENT_TYPE_OPENED, PatternMatchingEventHandler | ||||||
|  | from libs.watchdog.utils import echo | ||||||
|  | from libs.watchdog.utils.event_debouncer import EventDebouncer | ||||||
|  | from libs.watchdog.utils.process_watcher import ProcessWatcher | ||||||
|  |  | ||||||
|  | logger = logging.getLogger(__name__) | ||||||
|  | echo_events = functools.partial(echo.echo, write=lambda msg: logger.info(msg)) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class Trick(PatternMatchingEventHandler): | ||||||
|  |  | ||||||
|  |     """Your tricks should subclass this class.""" | ||||||
|  |  | ||||||
|  |     @classmethod | ||||||
|  |     def generate_yaml(cls): | ||||||
|  |         return f"""- {cls.__module__}.{cls.__name__}: | ||||||
|  |   args: | ||||||
|  |   - argument1 | ||||||
|  |   - argument2 | ||||||
|  |   kwargs: | ||||||
|  |     patterns: | ||||||
|  |     - "*.py" | ||||||
|  |     - "*.js" | ||||||
|  |     ignore_patterns: | ||||||
|  |     - "version.py" | ||||||
|  |     ignore_directories: false | ||||||
|  | """ | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class LoggerTrick(Trick): | ||||||
|  |  | ||||||
|  |     """A simple trick that does only logs events.""" | ||||||
|  |  | ||||||
|  |     def on_any_event(self, event): | ||||||
|  |         pass | ||||||
|  |  | ||||||
|  |     @echo_events | ||||||
|  |     def on_modified(self, event): | ||||||
|  |         pass | ||||||
|  |  | ||||||
|  |     @echo_events | ||||||
|  |     def on_deleted(self, event): | ||||||
|  |         pass | ||||||
|  |  | ||||||
|  |     @echo_events | ||||||
|  |     def on_created(self, event): | ||||||
|  |         pass | ||||||
|  |  | ||||||
|  |     @echo_events | ||||||
|  |     def on_moved(self, event): | ||||||
|  |         pass | ||||||
|  |  | ||||||
|  |     @echo_events | ||||||
|  |     def on_closed(self, event): | ||||||
|  |         pass | ||||||
|  |  | ||||||
|  |     @echo_events | ||||||
|  |     def on_opened(self, event): | ||||||
|  |         pass | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class ShellCommandTrick(Trick): | ||||||
|  |  | ||||||
|  |     """Executes shell commands in response to matched events.""" | ||||||
|  |  | ||||||
|  |     def __init__( | ||||||
|  |         self, | ||||||
|  |         shell_command=None, | ||||||
|  |         patterns=None, | ||||||
|  |         ignore_patterns=None, | ||||||
|  |         ignore_directories=False, | ||||||
|  |         wait_for_process=False, | ||||||
|  |         drop_during_process=False, | ||||||
|  |     ): | ||||||
|  |         super().__init__( | ||||||
|  |             patterns=patterns, | ||||||
|  |             ignore_patterns=ignore_patterns, | ||||||
|  |             ignore_directories=ignore_directories, | ||||||
|  |         ) | ||||||
|  |         self.shell_command = shell_command | ||||||
|  |         self.wait_for_process = wait_for_process | ||||||
|  |         self.drop_during_process = drop_during_process | ||||||
|  |  | ||||||
|  |         self.process = None | ||||||
|  |         self._process_watchers = set() | ||||||
|  |  | ||||||
|  |     def on_any_event(self, event): | ||||||
|  |         if event.event_type == EVENT_TYPE_OPENED: | ||||||
|  |             # FIXME: see issue #949, and find a way to better handle that scenario | ||||||
|  |             return | ||||||
|  |  | ||||||
|  |         from string import Template | ||||||
|  |  | ||||||
|  |         if self.drop_during_process and self.is_process_running(): | ||||||
|  |             return | ||||||
|  |  | ||||||
|  |         object_type = "directory" if event.is_directory else "file" | ||||||
|  |         context = { | ||||||
|  |             "watch_src_path": event.src_path, | ||||||
|  |             "watch_dest_path": "", | ||||||
|  |             "watch_event_type": event.event_type, | ||||||
|  |             "watch_object": object_type, | ||||||
|  |         } | ||||||
|  |  | ||||||
|  |         if self.shell_command is None: | ||||||
|  |             if hasattr(event, "dest_path"): | ||||||
|  |                 context["dest_path"] = event.dest_path | ||||||
|  |                 command = 'echo "${watch_event_type} ${watch_object} from ${watch_src_path} to ${watch_dest_path}"' | ||||||
|  |             else: | ||||||
|  |                 command = 'echo "${watch_event_type} ${watch_object} ${watch_src_path}"' | ||||||
|  |         else: | ||||||
|  |             if hasattr(event, "dest_path"): | ||||||
|  |                 context["watch_dest_path"] = event.dest_path | ||||||
|  |             command = self.shell_command | ||||||
|  |  | ||||||
|  |         command = Template(command).safe_substitute(**context) | ||||||
|  |         self.process = subprocess.Popen(command, shell=True) | ||||||
|  |         if self.wait_for_process: | ||||||
|  |             self.process.wait() | ||||||
|  |         else: | ||||||
|  |             process_watcher = ProcessWatcher(self.process, None) | ||||||
|  |             self._process_watchers.add(process_watcher) | ||||||
|  |             process_watcher.process_termination_callback = functools.partial( | ||||||
|  |                 self._process_watchers.discard, process_watcher | ||||||
|  |             ) | ||||||
|  |             process_watcher.start() | ||||||
|  |  | ||||||
|  |     def is_process_running(self): | ||||||
|  |         return self._process_watchers or ( | ||||||
|  |             self.process is not None and self.process.poll() is None | ||||||
|  |         ) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class AutoRestartTrick(Trick): | ||||||
|  |  | ||||||
|  |     """Starts a long-running subprocess and restarts it on matched events. | ||||||
|  |  | ||||||
|  |     The command parameter is a list of command arguments, such as | ||||||
|  |     `['bin/myserver', '-c', 'etc/myconfig.ini']`. | ||||||
|  |  | ||||||
|  |     Call `start()` after creating the Trick. Call `stop()` when stopping | ||||||
|  |     the process. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__( | ||||||
|  |         self, | ||||||
|  |         command, | ||||||
|  |         patterns=None, | ||||||
|  |         ignore_patterns=None, | ||||||
|  |         ignore_directories=False, | ||||||
|  |         stop_signal=signal.SIGINT, | ||||||
|  |         kill_after=10, | ||||||
|  |         debounce_interval_seconds=0, | ||||||
|  |         restart_on_command_exit=True, | ||||||
|  |     ): | ||||||
|  |         if kill_after < 0: | ||||||
|  |             raise ValueError("kill_after must be non-negative.") | ||||||
|  |         if debounce_interval_seconds < 0: | ||||||
|  |             raise ValueError("debounce_interval_seconds must be non-negative.") | ||||||
|  |  | ||||||
|  |         super().__init__( | ||||||
|  |             patterns=patterns, | ||||||
|  |             ignore_patterns=ignore_patterns, | ||||||
|  |             ignore_directories=ignore_directories, | ||||||
|  |         ) | ||||||
|  |  | ||||||
|  |         self.command = command | ||||||
|  |         self.stop_signal = stop_signal | ||||||
|  |         self.kill_after = kill_after | ||||||
|  |         self.debounce_interval_seconds = debounce_interval_seconds | ||||||
|  |         self.restart_on_command_exit = restart_on_command_exit | ||||||
|  |  | ||||||
|  |         self.process = None | ||||||
|  |         self.process_watcher = None | ||||||
|  |         self.event_debouncer = None | ||||||
|  |         self.restart_count = 0 | ||||||
|  |  | ||||||
|  |         self._is_process_stopping = False | ||||||
|  |         self._is_trick_stopping = False | ||||||
|  |         self._stopping_lock = threading.RLock() | ||||||
|  |  | ||||||
|  |     def start(self): | ||||||
|  |         if self.debounce_interval_seconds: | ||||||
|  |             self.event_debouncer = EventDebouncer( | ||||||
|  |                 debounce_interval_seconds=self.debounce_interval_seconds, | ||||||
|  |                 events_callback=lambda events: self._restart_process(), | ||||||
|  |             ) | ||||||
|  |             self.event_debouncer.start() | ||||||
|  |         self._start_process() | ||||||
|  |  | ||||||
|  |     def stop(self): | ||||||
|  |         # Ensure the body of the function is only run once. | ||||||
|  |         with self._stopping_lock: | ||||||
|  |             if self._is_trick_stopping: | ||||||
|  |                 return | ||||||
|  |             self._is_trick_stopping = True | ||||||
|  |  | ||||||
|  |         process_watcher = self.process_watcher | ||||||
|  |         if self.event_debouncer is not None: | ||||||
|  |             self.event_debouncer.stop() | ||||||
|  |         self._stop_process() | ||||||
|  |  | ||||||
|  |         # Don't leak threads: Wait for background threads to stop. | ||||||
|  |         if self.event_debouncer is not None: | ||||||
|  |             self.event_debouncer.join() | ||||||
|  |         if process_watcher is not None: | ||||||
|  |             process_watcher.join() | ||||||
|  |  | ||||||
|  |     def _start_process(self): | ||||||
|  |         if self._is_trick_stopping: | ||||||
|  |             return | ||||||
|  |  | ||||||
|  |         # windows doesn't have setsid | ||||||
|  |         self.process = subprocess.Popen( | ||||||
|  |             self.command, preexec_fn=getattr(os, "setsid", None) | ||||||
|  |         ) | ||||||
|  |         if self.restart_on_command_exit: | ||||||
|  |             self.process_watcher = ProcessWatcher(self.process, self._restart_process) | ||||||
|  |             self.process_watcher.start() | ||||||
|  |  | ||||||
|  |     def _stop_process(self): | ||||||
|  |         # Ensure the body of the function is not run in parallel in different threads. | ||||||
|  |         with self._stopping_lock: | ||||||
|  |             if self._is_process_stopping: | ||||||
|  |                 return | ||||||
|  |             self._is_process_stopping = True | ||||||
|  |  | ||||||
|  |         try: | ||||||
|  |             if self.process_watcher is not None: | ||||||
|  |                 self.process_watcher.stop() | ||||||
|  |                 self.process_watcher = None | ||||||
|  |  | ||||||
|  |             if self.process is not None: | ||||||
|  |                 try: | ||||||
|  |                     kill_process(self.process.pid, self.stop_signal) | ||||||
|  |                 except OSError: | ||||||
|  |                     # Process is already gone | ||||||
|  |                     pass | ||||||
|  |                 else: | ||||||
|  |                     kill_time = time.time() + self.kill_after | ||||||
|  |                     while time.time() < kill_time: | ||||||
|  |                         if self.process.poll() is not None: | ||||||
|  |                             break | ||||||
|  |                         time.sleep(0.25) | ||||||
|  |                     else: | ||||||
|  |                         try: | ||||||
|  |                             kill_process(self.process.pid, 9) | ||||||
|  |                         except OSError: | ||||||
|  |                             # Process is already gone | ||||||
|  |                             pass | ||||||
|  |                 self.process = None | ||||||
|  |         finally: | ||||||
|  |             self._is_process_stopping = False | ||||||
|  |  | ||||||
|  |     @echo_events | ||||||
|  |     def on_any_event(self, event): | ||||||
|  |         if event.event_type == EVENT_TYPE_OPENED: | ||||||
|  |             # FIXME: see issue #949, and find a way to better handle that scenario | ||||||
|  |             return | ||||||
|  |  | ||||||
|  |         if self.event_debouncer is not None: | ||||||
|  |             self.event_debouncer.handle_event(event) | ||||||
|  |         else: | ||||||
|  |             self._restart_process() | ||||||
|  |  | ||||||
|  |     def _restart_process(self): | ||||||
|  |         if self._is_trick_stopping: | ||||||
|  |             return | ||||||
|  |         self._stop_process() | ||||||
|  |         self._start_process() | ||||||
|  |         self.restart_count += 1 | ||||||
|  |  | ||||||
|  |  | ||||||
|  | if not sys.platform.startswith("win"): | ||||||
|  |  | ||||||
|  |     def kill_process(pid, stop_signal): | ||||||
|  |         os.killpg(os.getpgid(pid), stop_signal) | ||||||
|  |  | ||||||
|  | else: | ||||||
|  |  | ||||||
|  |     def kill_process(pid, stop_signal): | ||||||
|  |         os.kill(pid, stop_signal) | ||||||
							
								
								
									
										150
									
								
								src/libs/watchdog/utils/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										150
									
								
								src/libs/watchdog/utils/__init__.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,150 @@ | |||||||
|  | # Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com> | ||||||
|  | # Copyright 2012 Google, Inc & contributors. | ||||||
|  | # | ||||||
|  | # Licensed under the Apache License, Version 2.0 (the "License"); | ||||||
|  | # you may not use this file except in compliance with the License. | ||||||
|  | # You may obtain a copy of the License at | ||||||
|  | # | ||||||
|  | #     http://www.apache.org/licenses/LICENSE-2.0 | ||||||
|  | # | ||||||
|  | # Unless required by applicable law or agreed to in writing, software | ||||||
|  | # distributed under the License is distributed on an "AS IS" BASIS, | ||||||
|  | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||||
|  | # See the License for the specific language governing permissions and | ||||||
|  | # limitations under the License. | ||||||
|  |  | ||||||
|  |  | ||||||
|  | """ | ||||||
|  | :module: libs.watchdog.utils | ||||||
|  | :synopsis: Utility classes and functions. | ||||||
|  | :author: yesudeep@google.com (Yesudeep Mangalapilly) | ||||||
|  | :author: contact@tiger-222.fr (Mickaël Schoentgen) | ||||||
|  |  | ||||||
|  | Classes | ||||||
|  | ------- | ||||||
|  | .. autoclass:: BaseThread | ||||||
|  |    :members: | ||||||
|  |    :show-inheritance: | ||||||
|  |    :inherited-members: | ||||||
|  |  | ||||||
|  | """ | ||||||
|  | from __future__ import annotations | ||||||
|  |  | ||||||
|  | import sys | ||||||
|  | import threading | ||||||
|  | from typing import TYPE_CHECKING | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class UnsupportedLibc(Exception): | ||||||
|  |     pass | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class WatchdogShutdown(Exception): | ||||||
|  |     """ | ||||||
|  |     Semantic exception used to signal an external shutdown event. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     pass | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class BaseThread(threading.Thread): | ||||||
|  |     """Convenience class for creating stoppable threads.""" | ||||||
|  |  | ||||||
|  |     def __init__(self): | ||||||
|  |         threading.Thread.__init__(self) | ||||||
|  |         if hasattr(self, "daemon"): | ||||||
|  |             self.daemon = True | ||||||
|  |         else: | ||||||
|  |             self.setDaemon(True) | ||||||
|  |         self._stopped_event = threading.Event() | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def stopped_event(self): | ||||||
|  |         return self._stopped_event | ||||||
|  |  | ||||||
|  |     def should_keep_running(self): | ||||||
|  |         """Determines whether the thread should continue running.""" | ||||||
|  |         return not self._stopped_event.is_set() | ||||||
|  |  | ||||||
|  |     def on_thread_stop(self): | ||||||
|  |         """Override this method instead of :meth:`stop()`. | ||||||
|  |         :meth:`stop()` calls this method. | ||||||
|  |  | ||||||
|  |         This method is called immediately after the thread is signaled to stop. | ||||||
|  |         """ | ||||||
|  |         pass | ||||||
|  |  | ||||||
|  |     def stop(self): | ||||||
|  |         """Signals the thread to stop.""" | ||||||
|  |         self._stopped_event.set() | ||||||
|  |         self.on_thread_stop() | ||||||
|  |  | ||||||
|  |     def on_thread_start(self): | ||||||
|  |         """Override this method instead of :meth:`start()`. :meth:`start()` | ||||||
|  |         calls this method. | ||||||
|  |  | ||||||
|  |         This method is called right before this thread is started and this | ||||||
|  |         object’s run() method is invoked. | ||||||
|  |         """ | ||||||
|  |         pass | ||||||
|  |  | ||||||
|  |     def start(self): | ||||||
|  |         self.on_thread_start() | ||||||
|  |         threading.Thread.start(self) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def load_module(module_name): | ||||||
|  |     """Imports a module given its name and returns a handle to it.""" | ||||||
|  |     try: | ||||||
|  |         __import__(module_name) | ||||||
|  |     except ImportError: | ||||||
|  |         raise ImportError(f"No module named {module_name}") | ||||||
|  |     return sys.modules[module_name] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def load_class(dotted_path): | ||||||
|  |     """Loads and returns a class definition provided a dotted path | ||||||
|  |     specification the last part of the dotted path is the class name | ||||||
|  |     and there is at least one module name preceding the class name. | ||||||
|  |  | ||||||
|  |     Notes: | ||||||
|  |     You will need to ensure that the module you are trying to load | ||||||
|  |     exists in the Python path. | ||||||
|  |  | ||||||
|  |     Examples: | ||||||
|  |     - module.name.ClassName    # Provided module.name is in the Python path. | ||||||
|  |     - module.ClassName         # Provided module is in the Python path. | ||||||
|  |  | ||||||
|  |     What won't work: | ||||||
|  |     - ClassName | ||||||
|  |     - modle.name.ClassName     # Typo in module name. | ||||||
|  |     - module.name.ClasNam      # Typo in classname. | ||||||
|  |     """ | ||||||
|  |     dotted_path_split = dotted_path.split(".") | ||||||
|  |     if len(dotted_path_split) <= 1: | ||||||
|  |         raise ValueError( | ||||||
|  |             f"Dotted module path {dotted_path} must contain a module name and a classname" | ||||||
|  |         ) | ||||||
|  |     klass_name = dotted_path_split[-1] | ||||||
|  |     module_name = ".".join(dotted_path_split[:-1]) | ||||||
|  |  | ||||||
|  |     module = load_module(module_name) | ||||||
|  |     if hasattr(module, klass_name): | ||||||
|  |         return getattr(module, klass_name) | ||||||
|  |         # Finally create and return an instance of the class | ||||||
|  |         # return klass(*args, **kwargs) | ||||||
|  |     else: | ||||||
|  |         raise AttributeError( | ||||||
|  |             f"Module {module_name} does not have class attribute {klass_name}" | ||||||
|  |         ) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | if TYPE_CHECKING or sys.version_info >= (3, 8): | ||||||
|  |     # using `as` to explicitly re-export this since this is a compatibility layer | ||||||
|  |     from typing import Protocol as Protocol | ||||||
|  | else: | ||||||
|  |     # Provide a dummy Protocol class when not available from stdlib.  Should be used | ||||||
|  |     # only for hinting.  This could be had from typing_protocol, but not worth adding | ||||||
|  |     # the _first_ dependency just for this. | ||||||
|  |     class Protocol: | ||||||
|  |         ... | ||||||
							
								
								
									
										102
									
								
								src/libs/watchdog/utils/bricks.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										102
									
								
								src/libs/watchdog/utils/bricks.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,102 @@ | |||||||
|  | # Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com> | ||||||
|  | # Copyright 2012 Google, Inc & contributors. | ||||||
|  | # | ||||||
|  | # Licensed under the Apache License, Version 2.0 (the "License"); | ||||||
|  | # you may not use this file except in compliance with the License. | ||||||
|  | # You may obtain a copy of the License at | ||||||
|  | # | ||||||
|  | #     http://www.apache.org/licenses/LICENSE-2.0 | ||||||
|  | # | ||||||
|  | # Unless required by applicable law or agreed to in writing, software | ||||||
|  | # distributed under the License is distributed on an "AS IS" BASIS, | ||||||
|  | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||||
|  | # See the License for the specific language governing permissions and | ||||||
|  | # limitations under the License. | ||||||
|  |  | ||||||
|  |  | ||||||
|  | """ | ||||||
|  | Utility collections or "bricks". | ||||||
|  |  | ||||||
|  | :module: libs.watchdog.utils.bricks | ||||||
|  | :author: yesudeep@google.com (Yesudeep Mangalapilly) | ||||||
|  | :author: lalinsky@gmail.com (Lukáš Lalinský) | ||||||
|  | :author: python@rcn.com (Raymond Hettinger) | ||||||
|  | :author: contact@tiger-222.fr (Mickaël Schoentgen) | ||||||
|  |  | ||||||
|  | Classes | ||||||
|  | ======= | ||||||
|  | .. autoclass:: OrderedSetQueue | ||||||
|  |    :members: | ||||||
|  |    :show-inheritance: | ||||||
|  |    :inherited-members: | ||||||
|  |  | ||||||
|  | .. autoclass:: OrderedSet | ||||||
|  |  | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | from __future__ import annotations | ||||||
|  |  | ||||||
|  | import queue | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class SkipRepeatsQueue(queue.Queue): | ||||||
|  |  | ||||||
|  |     """Thread-safe implementation of an special queue where a | ||||||
|  |     put of the last-item put'd will be dropped. | ||||||
|  |  | ||||||
|  |     The implementation leverages locking already implemented in the base class | ||||||
|  |     redefining only the primitives. | ||||||
|  |  | ||||||
|  |     Queued items must be immutable and hashable so that they can be used | ||||||
|  |     as dictionary keys. You must implement **only read-only properties** and | ||||||
|  |     the :meth:`Item.__hash__()`, :meth:`Item.__eq__()`, and | ||||||
|  |     :meth:`Item.__ne__()` methods for items to be hashable. | ||||||
|  |  | ||||||
|  |     An example implementation follows:: | ||||||
|  |  | ||||||
|  |         class Item: | ||||||
|  |             def __init__(self, a, b): | ||||||
|  |                 self._a = a | ||||||
|  |                 self._b = b | ||||||
|  |  | ||||||
|  |             @property | ||||||
|  |             def a(self): | ||||||
|  |                 return self._a | ||||||
|  |  | ||||||
|  |             @property | ||||||
|  |             def b(self): | ||||||
|  |                 return self._b | ||||||
|  |  | ||||||
|  |             def _key(self): | ||||||
|  |                 return (self._a, self._b) | ||||||
|  |  | ||||||
|  |             def __eq__(self, item): | ||||||
|  |                 return self._key() == item._key() | ||||||
|  |  | ||||||
|  |             def __ne__(self, item): | ||||||
|  |                 return self._key() != item._key() | ||||||
|  |  | ||||||
|  |             def __hash__(self): | ||||||
|  |                 return hash(self._key()) | ||||||
|  |  | ||||||
|  |     based on the OrderedSetQueue below | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def _init(self, maxsize): | ||||||
|  |         super()._init(maxsize) | ||||||
|  |         self._last_item = None | ||||||
|  |  | ||||||
|  |     def _put(self, item): | ||||||
|  |         if self._last_item is None or item != self._last_item: | ||||||
|  |             super()._put(item) | ||||||
|  |             self._last_item = item | ||||||
|  |         else: | ||||||
|  |             # `put` increments `unfinished_tasks` even if we did not put | ||||||
|  |             # anything into the queue here | ||||||
|  |             self.unfinished_tasks -= 1 | ||||||
|  |  | ||||||
|  |     def _get(self): | ||||||
|  |         item = super()._get() | ||||||
|  |         if item is self._last_item: | ||||||
|  |             self._last_item = None | ||||||
|  |         return item | ||||||
							
								
								
									
										85
									
								
								src/libs/watchdog/utils/delayed_queue.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										85
									
								
								src/libs/watchdog/utils/delayed_queue.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,85 @@ | |||||||
|  | # Copyright 2014 Thomas Amland <thomas.amland@gmail.com> | ||||||
|  | # | ||||||
|  | # Licensed under the Apache License, Version 2.0 (the "License"); | ||||||
|  | # you may not use this file except in compliance with the License. | ||||||
|  | # You may obtain a copy of the License at | ||||||
|  | # | ||||||
|  | #     http://www.apache.org/licenses/LICENSE-2.0 | ||||||
|  | # | ||||||
|  | # Unless required by applicable law or agreed to in writing, software | ||||||
|  | # distributed under the License is distributed on an "AS IS" BASIS, | ||||||
|  | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||||
|  | # See the License for the specific language governing permissions and | ||||||
|  | # limitations under the License. | ||||||
|  |  | ||||||
|  | from __future__ import annotations | ||||||
|  |  | ||||||
|  | import threading | ||||||
|  | import time | ||||||
|  | from collections import deque | ||||||
|  | from typing import Callable, Deque, Generic, Optional, Tuple, TypeVar | ||||||
|  |  | ||||||
|  | T = TypeVar("T") | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class DelayedQueue(Generic[T]): | ||||||
|  |     def __init__(self, delay): | ||||||
|  |         self.delay_sec = delay | ||||||
|  |         self._lock = threading.Lock() | ||||||
|  |         self._not_empty = threading.Condition(self._lock) | ||||||
|  |         self._queue: Deque[Tuple[T, float, bool]] = deque() | ||||||
|  |         self._closed = False | ||||||
|  |  | ||||||
|  |     def put(self, element: T, delay: bool = False) -> None: | ||||||
|  |         """Add element to queue.""" | ||||||
|  |         self._lock.acquire() | ||||||
|  |         self._queue.append((element, time.time(), delay)) | ||||||
|  |         self._not_empty.notify() | ||||||
|  |         self._lock.release() | ||||||
|  |  | ||||||
|  |     def close(self): | ||||||
|  |         """Close queue, indicating no more items will be added.""" | ||||||
|  |         self._closed = True | ||||||
|  |         # Interrupt the blocking _not_empty.wait() call in get | ||||||
|  |         self._not_empty.acquire() | ||||||
|  |         self._not_empty.notify() | ||||||
|  |         self._not_empty.release() | ||||||
|  |  | ||||||
|  |     def get(self) -> Optional[T]: | ||||||
|  |         """Remove and return an element from the queue, or this queue has been | ||||||
|  |         closed raise the Closed exception. | ||||||
|  |         """ | ||||||
|  |         while True: | ||||||
|  |             # wait for element to be added to queue | ||||||
|  |             self._not_empty.acquire() | ||||||
|  |             while len(self._queue) == 0 and not self._closed: | ||||||
|  |                 self._not_empty.wait() | ||||||
|  |  | ||||||
|  |             if self._closed: | ||||||
|  |                 self._not_empty.release() | ||||||
|  |                 return None | ||||||
|  |             head, insert_time, delay = self._queue[0] | ||||||
|  |             self._not_empty.release() | ||||||
|  |  | ||||||
|  |             # wait for delay if required | ||||||
|  |             if delay: | ||||||
|  |                 time_left = insert_time + self.delay_sec - time.time() | ||||||
|  |                 while time_left > 0: | ||||||
|  |                     time.sleep(time_left) | ||||||
|  |                     time_left = insert_time + self.delay_sec - time.time() | ||||||
|  |  | ||||||
|  |             # return element if it's still in the queue | ||||||
|  |             with self._lock: | ||||||
|  |                 if len(self._queue) > 0 and self._queue[0][0] is head: | ||||||
|  |                     self._queue.popleft() | ||||||
|  |                     return head | ||||||
|  |  | ||||||
|  |     def remove(self, predicate: Callable[[T], bool]) -> Optional[T]: | ||||||
|  |         """Remove and return the first items for which predicate is True, | ||||||
|  |         ignoring delay.""" | ||||||
|  |         with self._lock: | ||||||
|  |             for i, (elem, t, delay) in enumerate(self._queue): | ||||||
|  |                 if predicate(elem): | ||||||
|  |                     del self._queue[i] | ||||||
|  |                     return elem | ||||||
|  |         return None | ||||||
							
								
								
									
										375
									
								
								src/libs/watchdog/utils/dirsnapshot.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										375
									
								
								src/libs/watchdog/utils/dirsnapshot.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,375 @@ | |||||||
|  | # Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com> | ||||||
|  | # Copyright 2012 Google, Inc & contributors. | ||||||
|  | # Copyright 2014 Thomas Amland <thomas.amland@gmail.com> | ||||||
|  | # | ||||||
|  | # Licensed under the Apache License, Version 2.0 (the "License"); | ||||||
|  | # you may not use this file except in compliance with the License. | ||||||
|  | # You may obtain a copy of the License at | ||||||
|  | # | ||||||
|  | #     http://www.apache.org/licenses/LICENSE-2.0 | ||||||
|  | # | ||||||
|  | # Unless required by applicable law or agreed to in writing, software | ||||||
|  | # distributed under the License is distributed on an "AS IS" BASIS, | ||||||
|  | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||||
|  | # See the License for the specific language governing permissions and | ||||||
|  | # limitations under the License. | ||||||
|  |  | ||||||
|  | """ | ||||||
|  | :module: libs.watchdog.utils.dirsnapshot | ||||||
|  | :synopsis: Directory snapshots and comparison. | ||||||
|  | :author: yesudeep@google.com (Yesudeep Mangalapilly) | ||||||
|  | :author: contact@tiger-222.fr (Mickaël Schoentgen) | ||||||
|  |  | ||||||
|  | .. ADMONITION:: Where are the moved events? They "disappeared" | ||||||
|  |  | ||||||
|  |         This implementation does not take partition boundaries | ||||||
|  |         into consideration. It will only work when the directory | ||||||
|  |         tree is entirely on the same file system. More specifically, | ||||||
|  |         any part of the code that depends on inode numbers can | ||||||
|  |         break if partition boundaries are crossed. In these cases, | ||||||
|  |         the snapshot diff will represent file/directory movement as | ||||||
|  |         created and deleted events. | ||||||
|  |  | ||||||
|  | Classes | ||||||
|  | ------- | ||||||
|  | .. autoclass:: DirectorySnapshot | ||||||
|  |    :members: | ||||||
|  |    :show-inheritance: | ||||||
|  |  | ||||||
|  | .. autoclass:: DirectorySnapshotDiff | ||||||
|  |    :members: | ||||||
|  |    :show-inheritance: | ||||||
|  |  | ||||||
|  | .. autoclass:: EmptyDirectorySnapshot | ||||||
|  |    :members: | ||||||
|  |    :show-inheritance: | ||||||
|  |  | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | from __future__ import annotations | ||||||
|  |  | ||||||
|  | import errno | ||||||
|  | import os | ||||||
|  | from stat import S_ISDIR | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class DirectorySnapshotDiff: | ||||||
|  |     """ | ||||||
|  |     Compares two directory snapshots and creates an object that represents | ||||||
|  |     the difference between the two snapshots. | ||||||
|  |  | ||||||
|  |     :param ref: | ||||||
|  |         The reference directory snapshot. | ||||||
|  |     :type ref: | ||||||
|  |         :class:`DirectorySnapshot` | ||||||
|  |     :param snapshot: | ||||||
|  |         The directory snapshot which will be compared | ||||||
|  |         with the reference snapshot. | ||||||
|  |     :type snapshot: | ||||||
|  |         :class:`DirectorySnapshot` | ||||||
|  |     :param ignore_device: | ||||||
|  |         A boolean indicating whether to ignore the device id or not. | ||||||
|  |         By default, a file may be uniquely identified by a combination of its first | ||||||
|  |         inode and its device id. The problem is that the device id may (or may not) | ||||||
|  |         change between system boots. This problem would cause the DirectorySnapshotDiff | ||||||
|  |         to think a file has been deleted and created again but it would be the | ||||||
|  |         exact same file. | ||||||
|  |         Set to True only if you are sure you will always use the same device. | ||||||
|  |     :type ignore_device: | ||||||
|  |         :class:`bool` | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self, ref, snapshot, ignore_device=False): | ||||||
|  |         created = snapshot.paths - ref.paths | ||||||
|  |         deleted = ref.paths - snapshot.paths | ||||||
|  |  | ||||||
|  |         if ignore_device: | ||||||
|  |  | ||||||
|  |             def get_inode(directory, full_path): | ||||||
|  |                 return directory.inode(full_path)[0] | ||||||
|  |  | ||||||
|  |         else: | ||||||
|  |  | ||||||
|  |             def get_inode(directory, full_path): | ||||||
|  |                 return directory.inode(full_path) | ||||||
|  |  | ||||||
|  |         # check that all unchanged paths have the same inode | ||||||
|  |         for path in ref.paths & snapshot.paths: | ||||||
|  |             if get_inode(ref, path) != get_inode(snapshot, path): | ||||||
|  |                 created.add(path) | ||||||
|  |                 deleted.add(path) | ||||||
|  |  | ||||||
|  |         # find moved paths | ||||||
|  |         moved = set() | ||||||
|  |         for path in set(deleted): | ||||||
|  |             inode = ref.inode(path) | ||||||
|  |             new_path = snapshot.path(inode) | ||||||
|  |             if new_path: | ||||||
|  |                 # file is not deleted but moved | ||||||
|  |                 deleted.remove(path) | ||||||
|  |                 moved.add((path, new_path)) | ||||||
|  |  | ||||||
|  |         for path in set(created): | ||||||
|  |             inode = snapshot.inode(path) | ||||||
|  |             old_path = ref.path(inode) | ||||||
|  |             if old_path: | ||||||
|  |                 created.remove(path) | ||||||
|  |                 moved.add((old_path, path)) | ||||||
|  |  | ||||||
|  |         # find modified paths | ||||||
|  |         # first check paths that have not moved | ||||||
|  |         modified = set() | ||||||
|  |         for path in ref.paths & snapshot.paths: | ||||||
|  |             if get_inode(ref, path) == get_inode(snapshot, path): | ||||||
|  |                 if ref.mtime(path) != snapshot.mtime(path) or ref.size( | ||||||
|  |                     path | ||||||
|  |                 ) != snapshot.size(path): | ||||||
|  |                     modified.add(path) | ||||||
|  |  | ||||||
|  |         for old_path, new_path in moved: | ||||||
|  |             if ref.mtime(old_path) != snapshot.mtime(new_path) or ref.size( | ||||||
|  |                 old_path | ||||||
|  |             ) != snapshot.size(new_path): | ||||||
|  |                 modified.add(old_path) | ||||||
|  |  | ||||||
|  |         self._dirs_created = [path for path in created if snapshot.isdir(path)] | ||||||
|  |         self._dirs_deleted = [path for path in deleted if ref.isdir(path)] | ||||||
|  |         self._dirs_modified = [path for path in modified if ref.isdir(path)] | ||||||
|  |         self._dirs_moved = [(frm, to) for (frm, to) in moved if ref.isdir(frm)] | ||||||
|  |  | ||||||
|  |         self._files_created = list(created - set(self._dirs_created)) | ||||||
|  |         self._files_deleted = list(deleted - set(self._dirs_deleted)) | ||||||
|  |         self._files_modified = list(modified - set(self._dirs_modified)) | ||||||
|  |         self._files_moved = list(moved - set(self._dirs_moved)) | ||||||
|  |  | ||||||
|  |     def __str__(self): | ||||||
|  |         return self.__repr__() | ||||||
|  |  | ||||||
|  |     def __repr__(self): | ||||||
|  |         fmt = ( | ||||||
|  |             "<{0} files(created={1}, deleted={2}, modified={3}, moved={4})," | ||||||
|  |             " folders(created={5}, deleted={6}, modified={7}, moved={8})>" | ||||||
|  |         ) | ||||||
|  |         return fmt.format( | ||||||
|  |             type(self).__name__, | ||||||
|  |             len(self._files_created), | ||||||
|  |             len(self._files_deleted), | ||||||
|  |             len(self._files_modified), | ||||||
|  |             len(self._files_moved), | ||||||
|  |             len(self._dirs_created), | ||||||
|  |             len(self._dirs_deleted), | ||||||
|  |             len(self._dirs_modified), | ||||||
|  |             len(self._dirs_moved), | ||||||
|  |         ) | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def files_created(self): | ||||||
|  |         """List of files that were created.""" | ||||||
|  |         return self._files_created | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def files_deleted(self): | ||||||
|  |         """List of files that were deleted.""" | ||||||
|  |         return self._files_deleted | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def files_modified(self): | ||||||
|  |         """List of files that were modified.""" | ||||||
|  |         return self._files_modified | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def files_moved(self): | ||||||
|  |         """ | ||||||
|  |         List of files that were moved. | ||||||
|  |  | ||||||
|  |         Each event is a two-tuple the first item of which is the path | ||||||
|  |         that has been renamed to the second item in the tuple. | ||||||
|  |         """ | ||||||
|  |         return self._files_moved | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def dirs_modified(self): | ||||||
|  |         """ | ||||||
|  |         List of directories that were modified. | ||||||
|  |         """ | ||||||
|  |         return self._dirs_modified | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def dirs_moved(self): | ||||||
|  |         """ | ||||||
|  |         List of directories that were moved. | ||||||
|  |  | ||||||
|  |         Each event is a two-tuple the first item of which is the path | ||||||
|  |         that has been renamed to the second item in the tuple. | ||||||
|  |         """ | ||||||
|  |         return self._dirs_moved | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def dirs_deleted(self): | ||||||
|  |         """ | ||||||
|  |         List of directories that were deleted. | ||||||
|  |         """ | ||||||
|  |         return self._dirs_deleted | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def dirs_created(self): | ||||||
|  |         """ | ||||||
|  |         List of directories that were created. | ||||||
|  |         """ | ||||||
|  |         return self._dirs_created | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class DirectorySnapshot: | ||||||
|  |     """ | ||||||
|  |     A snapshot of stat information of files in a directory. | ||||||
|  |  | ||||||
|  |     :param path: | ||||||
|  |         The directory path for which a snapshot should be taken. | ||||||
|  |     :type path: | ||||||
|  |         ``str`` | ||||||
|  |     :param recursive: | ||||||
|  |         ``True`` if the entire directory tree should be included in the | ||||||
|  |         snapshot; ``False`` otherwise. | ||||||
|  |     :type recursive: | ||||||
|  |         ``bool`` | ||||||
|  |     :param stat: | ||||||
|  |         Use custom stat function that returns a stat structure for path. | ||||||
|  |         Currently only st_dev, st_ino, st_mode and st_mtime are needed. | ||||||
|  |  | ||||||
|  |         A function taking a ``path`` as argument which will be called | ||||||
|  |         for every entry in the directory tree. | ||||||
|  |     :param listdir: | ||||||
|  |         Use custom listdir function. For details see ``os.scandir``. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self, path, recursive=True, stat=os.stat, listdir=os.scandir): | ||||||
|  |         self.recursive = recursive | ||||||
|  |         self.stat = stat | ||||||
|  |         self.listdir = listdir | ||||||
|  |  | ||||||
|  |         self._stat_info = {} | ||||||
|  |         self._inode_to_path = {} | ||||||
|  |  | ||||||
|  |         st = self.stat(path) | ||||||
|  |         self._stat_info[path] = st | ||||||
|  |         self._inode_to_path[(st.st_ino, st.st_dev)] = path | ||||||
|  |  | ||||||
|  |         for p, st in self.walk(path): | ||||||
|  |             i = (st.st_ino, st.st_dev) | ||||||
|  |             self._inode_to_path[i] = p | ||||||
|  |             self._stat_info[p] = st | ||||||
|  |  | ||||||
|  |     def walk(self, root): | ||||||
|  |         try: | ||||||
|  |             paths = [os.path.join(root, entry.name) for entry in self.listdir(root)] | ||||||
|  |         except OSError as e: | ||||||
|  |             # Directory may have been deleted between finding it in the directory | ||||||
|  |             # list of its parent and trying to delete its contents. If this | ||||||
|  |             # happens we treat it as empty. Likewise if the directory was replaced | ||||||
|  |             # with a file of the same name (less likely, but possible). | ||||||
|  |             if e.errno in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL): | ||||||
|  |                 return | ||||||
|  |             else: | ||||||
|  |                 raise | ||||||
|  |  | ||||||
|  |         entries = [] | ||||||
|  |         for p in paths: | ||||||
|  |             try: | ||||||
|  |                 entry = (p, self.stat(p)) | ||||||
|  |                 entries.append(entry) | ||||||
|  |                 yield entry | ||||||
|  |             except OSError: | ||||||
|  |                 continue | ||||||
|  |  | ||||||
|  |         if self.recursive: | ||||||
|  |             for path, st in entries: | ||||||
|  |                 try: | ||||||
|  |                     if S_ISDIR(st.st_mode): | ||||||
|  |                         for entry in self.walk(path): | ||||||
|  |                             yield entry | ||||||
|  |                 except PermissionError: | ||||||
|  |                     pass | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def paths(self): | ||||||
|  |         """ | ||||||
|  |         Set of file/directory paths in the snapshot. | ||||||
|  |         """ | ||||||
|  |         return set(self._stat_info.keys()) | ||||||
|  |  | ||||||
|  |     def path(self, id): | ||||||
|  |         """ | ||||||
|  |         Returns path for id. None if id is unknown to this snapshot. | ||||||
|  |         """ | ||||||
|  |         return self._inode_to_path.get(id) | ||||||
|  |  | ||||||
|  |     def inode(self, path): | ||||||
|  |         """Returns an id for path.""" | ||||||
|  |         st = self._stat_info[path] | ||||||
|  |         return (st.st_ino, st.st_dev) | ||||||
|  |  | ||||||
|  |     def isdir(self, path): | ||||||
|  |         return S_ISDIR(self._stat_info[path].st_mode) | ||||||
|  |  | ||||||
|  |     def mtime(self, path): | ||||||
|  |         return self._stat_info[path].st_mtime | ||||||
|  |  | ||||||
|  |     def size(self, path): | ||||||
|  |         return self._stat_info[path].st_size | ||||||
|  |  | ||||||
|  |     def stat_info(self, path): | ||||||
|  |         """ | ||||||
|  |         Returns a stat information object for the specified path from | ||||||
|  |         the snapshot. | ||||||
|  |  | ||||||
|  |         Attached information is subject to change. Do not use unless | ||||||
|  |         you specify `stat` in constructor. Use :func:`inode`, :func:`mtime`, | ||||||
|  |         :func:`isdir` instead. | ||||||
|  |  | ||||||
|  |         :param path: | ||||||
|  |             The path for which stat information should be obtained | ||||||
|  |             from a snapshot. | ||||||
|  |         """ | ||||||
|  |         return self._stat_info[path] | ||||||
|  |  | ||||||
|  |     def __sub__(self, previous_dirsnap): | ||||||
|  |         """Allow subtracting a DirectorySnapshot object instance from | ||||||
|  |         another. | ||||||
|  |  | ||||||
|  |         :returns: | ||||||
|  |             A :class:`DirectorySnapshotDiff` object. | ||||||
|  |         """ | ||||||
|  |         return DirectorySnapshotDiff(previous_dirsnap, self) | ||||||
|  |  | ||||||
|  |     def __str__(self): | ||||||
|  |         return self.__repr__() | ||||||
|  |  | ||||||
|  |     def __repr__(self): | ||||||
|  |         return str(self._stat_info) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class EmptyDirectorySnapshot: | ||||||
|  |     """Class to implement an empty snapshot. This is used together with | ||||||
|  |     DirectorySnapshot and DirectorySnapshotDiff in order to get all the files/folders | ||||||
|  |     in the directory as created. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     @staticmethod | ||||||
|  |     def path(_): | ||||||
|  |         """Mock up method to return the path of the received inode. As the snapshot | ||||||
|  |         is intended to be empty, it always returns None. | ||||||
|  |  | ||||||
|  |         :returns: | ||||||
|  |             None. | ||||||
|  |         """ | ||||||
|  |         return None | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def paths(self): | ||||||
|  |         """Mock up method to return a set of file/directory paths in the snapshot. As | ||||||
|  |         the snapshot is intended to be empty, it always returns an empty set. | ||||||
|  |  | ||||||
|  |         :returns: | ||||||
|  |             An empty set. | ||||||
|  |         """ | ||||||
|  |         return set() | ||||||
							
								
								
									
										167
									
								
								src/libs/watchdog/utils/echo.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										167
									
								
								src/libs/watchdog/utils/echo.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,167 @@ | |||||||
|  | # echo.py: Tracing function calls using Python decorators. | ||||||
|  | # | ||||||
|  | # Written by Thomas Guest <tag@wordaligned.org> | ||||||
|  | # Please see http://wordaligned.org/articles/echo | ||||||
|  | # | ||||||
|  | # Place into the public domain. | ||||||
|  |  | ||||||
|  | """ Echo calls made to functions and methods in a module. | ||||||
|  |  | ||||||
|  | "Echoing" a function call means printing out the name of the function | ||||||
|  | and the values of its arguments before making the call (which is more | ||||||
|  | commonly referred to as "tracing", but Python already has a trace module). | ||||||
|  |  | ||||||
|  | Example: to echo calls made to functions in "my_module" do: | ||||||
|  |  | ||||||
|  |   import echo | ||||||
|  |   import my_module | ||||||
|  |   echo.echo_module(my_module) | ||||||
|  |  | ||||||
|  | Example: to echo calls made to functions in "my_module.my_class" do: | ||||||
|  |  | ||||||
|  |   echo.echo_class(my_module.my_class) | ||||||
|  |  | ||||||
|  | Alternatively, echo.echo can be used to decorate functions. Calls to the | ||||||
|  | decorated function will be echoed. | ||||||
|  |  | ||||||
|  | Example: | ||||||
|  |  | ||||||
|  |   @echo.echo | ||||||
|  |   def my_function(args): | ||||||
|  |       pass | ||||||
|  | """ | ||||||
|  | from __future__ import annotations | ||||||
|  |  | ||||||
|  | import inspect | ||||||
|  | import sys | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def name(item): | ||||||
|  |     """Return an item's name.""" | ||||||
|  |     return item.__name__ | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def is_classmethod(instancemethod, klass): | ||||||
|  |     """Determine if an instancemethod is a classmethod.""" | ||||||
|  |     return inspect.ismethod(instancemethod) and instancemethod.__self__ is klass | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def is_static_method(method, klass): | ||||||
|  |     """Returns True if method is an instance method of klass.""" | ||||||
|  |     return next( | ||||||
|  |         ( | ||||||
|  |             isinstance(c.__dict__[name(method)], staticmethod) | ||||||
|  |             for c in klass.mro() | ||||||
|  |             if name(method) in c.__dict__ | ||||||
|  |         ), | ||||||
|  |         False, | ||||||
|  |     ) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def is_class_private_name(name): | ||||||
|  |     """Determine if a name is a class private name.""" | ||||||
|  |     # Exclude system defined names such as __init__, __add__ etc | ||||||
|  |     return name.startswith("__") and not name.endswith("__") | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def method_name(method): | ||||||
|  |     """Return a method's name. | ||||||
|  |  | ||||||
|  |     This function returns the name the method is accessed by from | ||||||
|  |     outside the class (i.e. it prefixes "private" methods appropriately). | ||||||
|  |     """ | ||||||
|  |     mname = name(method) | ||||||
|  |     if is_class_private_name(mname): | ||||||
|  |         mname = f"_{name(method.__self__.__class__)}{mname}" | ||||||
|  |     return mname | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def format_arg_value(arg_val): | ||||||
|  |     """Return a string representing a (name, value) pair. | ||||||
|  |  | ||||||
|  |     >>> format_arg_value(('x', (1, 2, 3))) | ||||||
|  |     'x=(1, 2, 3)' | ||||||
|  |     """ | ||||||
|  |     arg, val = arg_val | ||||||
|  |     return f"{arg}={val!r}" | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def echo(fn, write=sys.stdout.write): | ||||||
|  |     """Echo calls to a function. | ||||||
|  |  | ||||||
|  |     Returns a decorated version of the input function which "echoes" calls | ||||||
|  |     made to it by writing out the function's name and the arguments it was | ||||||
|  |     called with. | ||||||
|  |     """ | ||||||
|  |     import functools | ||||||
|  |  | ||||||
|  |     # Unpack function's arg count, arg names, arg defaults | ||||||
|  |     code = fn.__code__ | ||||||
|  |     argcount = code.co_argcount | ||||||
|  |     argnames = code.co_varnames[:argcount] | ||||||
|  |     fn_defaults = fn.__defaults__ or [] | ||||||
|  |     argdefs = dict(list(zip(argnames[-len(fn_defaults) :], fn_defaults))) | ||||||
|  |  | ||||||
|  |     @functools.wraps(fn) | ||||||
|  |     def wrapped(*v, **k): | ||||||
|  |         # Collect function arguments by chaining together positional, | ||||||
|  |         # defaulted, extra positional and keyword arguments. | ||||||
|  |         positional = list(map(format_arg_value, list(zip(argnames, v)))) | ||||||
|  |         defaulted = [ | ||||||
|  |             format_arg_value((a, argdefs[a])) for a in argnames[len(v) :] if a not in k | ||||||
|  |         ] | ||||||
|  |         nameless = list(map(repr, v[argcount:])) | ||||||
|  |         keyword = list(map(format_arg_value, list(k.items()))) | ||||||
|  |         args = positional + defaulted + nameless + keyword | ||||||
|  |         write(f"{name(fn)}({', '.join(args)})\n") | ||||||
|  |         return fn(*v, **k) | ||||||
|  |  | ||||||
|  |     return wrapped | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def echo_instancemethod(klass, method, write=sys.stdout.write): | ||||||
|  |     """Change an instancemethod so that calls to it are echoed. | ||||||
|  |  | ||||||
|  |     Replacing a classmethod is a little more tricky. | ||||||
|  |     See: http://www.python.org/doc/current/ref/types.html | ||||||
|  |     """ | ||||||
|  |     mname = method_name(method) | ||||||
|  |     never_echo = ( | ||||||
|  |         "__str__", | ||||||
|  |         "__repr__", | ||||||
|  |     )  # Avoid recursion printing method calls | ||||||
|  |     if mname in never_echo: | ||||||
|  |         pass | ||||||
|  |     elif is_classmethod(method, klass): | ||||||
|  |         setattr(klass, mname, classmethod(echo(method.__func__, write))) | ||||||
|  |     else: | ||||||
|  |         setattr(klass, mname, echo(method, write)) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def echo_class(klass, write=sys.stdout.write): | ||||||
|  |     """Echo calls to class methods and static functions""" | ||||||
|  |     for _, method in inspect.getmembers(klass, inspect.ismethod): | ||||||
|  |         # In python 3 only class methods are returned here | ||||||
|  |         echo_instancemethod(klass, method, write) | ||||||
|  |     for _, fn in inspect.getmembers(klass, inspect.isfunction): | ||||||
|  |         if is_static_method(fn, klass): | ||||||
|  |             setattr(klass, name(fn), staticmethod(echo(fn, write))) | ||||||
|  |         else: | ||||||
|  |             # It's not a class or a static method, so it must be an instance method. | ||||||
|  |             echo_instancemethod(klass, fn, write) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def echo_module(mod, write=sys.stdout.write): | ||||||
|  |     """Echo calls to functions and methods in a module.""" | ||||||
|  |     for fname, fn in inspect.getmembers(mod, inspect.isfunction): | ||||||
|  |         setattr(mod, fname, echo(fn, write)) | ||||||
|  |     for _, klass in inspect.getmembers(mod, inspect.isclass): | ||||||
|  |         echo_class(klass, write) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | if __name__ == "__main__": | ||||||
|  |     import doctest | ||||||
|  |  | ||||||
|  |     optionflags = doctest.ELLIPSIS | ||||||
|  |     doctest.testfile("echoexample.txt", optionflags=optionflags) | ||||||
|  |     doctest.testmod(optionflags=optionflags) | ||||||
							
								
								
									
										56
									
								
								src/libs/watchdog/utils/event_debouncer.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										56
									
								
								src/libs/watchdog/utils/event_debouncer.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,56 @@ | |||||||
|  | from __future__ import annotations | ||||||
|  |  | ||||||
|  | import logging | ||||||
|  | import threading | ||||||
|  |  | ||||||
|  | from libs.watchdog.utils import BaseThread | ||||||
|  |  | ||||||
|  | logger = logging.getLogger(__name__) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class EventDebouncer(BaseThread): | ||||||
|  |     """Background thread for debouncing event handling. | ||||||
|  |  | ||||||
|  |     When an event is received, wait until the configured debounce interval | ||||||
|  |     passes before calling the callback.  If additional events are received | ||||||
|  |     before the interval passes, reset the timer and keep waiting.  When the | ||||||
|  |     debouncing interval passes, the callback will be called with a list of | ||||||
|  |     events in the order in which they were received. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self, debounce_interval_seconds, events_callback): | ||||||
|  |         super().__init__() | ||||||
|  |         self.debounce_interval_seconds = debounce_interval_seconds | ||||||
|  |         self.events_callback = events_callback | ||||||
|  |  | ||||||
|  |         self._events = [] | ||||||
|  |         self._cond = threading.Condition() | ||||||
|  |  | ||||||
|  |     def handle_event(self, event): | ||||||
|  |         with self._cond: | ||||||
|  |             self._events.append(event) | ||||||
|  |             self._cond.notify() | ||||||
|  |  | ||||||
|  |     def stop(self): | ||||||
|  |         with self._cond: | ||||||
|  |             super().stop() | ||||||
|  |             self._cond.notify() | ||||||
|  |  | ||||||
|  |     def run(self): | ||||||
|  |         with self._cond: | ||||||
|  |             while True: | ||||||
|  |                 # Wait for first event (or shutdown). | ||||||
|  |                 self._cond.wait() | ||||||
|  |  | ||||||
|  |                 if self.debounce_interval_seconds: | ||||||
|  |                     # Wait for additional events (or shutdown) until the debounce interval passes. | ||||||
|  |                     while self.should_keep_running(): | ||||||
|  |                         if not self._cond.wait(timeout=self.debounce_interval_seconds): | ||||||
|  |                             break | ||||||
|  |  | ||||||
|  |                 if not self.should_keep_running(): | ||||||
|  |                     break | ||||||
|  |  | ||||||
|  |                 events = self._events | ||||||
|  |                 self._events = [] | ||||||
|  |                 self.events_callback(events) | ||||||
							
								
								
									
										95
									
								
								src/libs/watchdog/utils/patterns.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										95
									
								
								src/libs/watchdog/utils/patterns.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,95 @@ | |||||||
|  | # patterns.py: Common wildcard searching/filtering functionality for files. | ||||||
|  | # | ||||||
|  | # Copyright (C) 2010 Yesudeep Mangalapilly <yesudeep@gmail.com> | ||||||
|  | # | ||||||
|  | # Written by Boris Staletic <boris.staletic@gmail.com> | ||||||
|  |  | ||||||
|  | from __future__ import annotations | ||||||
|  |  | ||||||
|  | # Non-pure path objects are only allowed on their respective OS's. | ||||||
|  | # Thus, these utilities require "pure" path objects that don't access the filesystem. | ||||||
|  | # Since pathlib doesn't have a `case_sensitive` parameter, we have to approximate it | ||||||
|  | # by converting input paths to `PureWindowsPath` and `PurePosixPath` where: | ||||||
|  | #   - `PureWindowsPath` is always case-insensitive. | ||||||
|  | #   - `PurePosixPath` is always case-sensitive. | ||||||
|  | # Reference: https://docs.python.org/3/library/pathlib.html#pathlib.PurePath.match | ||||||
|  | from pathlib import PurePosixPath, PureWindowsPath | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def _match_path(path, included_patterns, excluded_patterns, case_sensitive): | ||||||
|  |     """Internal function same as :func:`match_path` but does not check arguments.""" | ||||||
|  |     if case_sensitive: | ||||||
|  |         path = PurePosixPath(path) | ||||||
|  |     else: | ||||||
|  |         included_patterns = {pattern.lower() for pattern in included_patterns} | ||||||
|  |         excluded_patterns = {pattern.lower() for pattern in excluded_patterns} | ||||||
|  |         path = PureWindowsPath(path) | ||||||
|  |  | ||||||
|  |     common_patterns = included_patterns & excluded_patterns | ||||||
|  |     if common_patterns: | ||||||
|  |         raise ValueError( | ||||||
|  |             "conflicting patterns `{}` included and excluded".format(common_patterns) | ||||||
|  |         ) | ||||||
|  |     return any(path.match(p) for p in included_patterns) and not any( | ||||||
|  |         path.match(p) for p in excluded_patterns | ||||||
|  |     ) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def filter_paths( | ||||||
|  |     paths, included_patterns=None, excluded_patterns=None, case_sensitive=True | ||||||
|  | ): | ||||||
|  |     """ | ||||||
|  |     Filters from a set of paths based on acceptable patterns and | ||||||
|  |     ignorable patterns. | ||||||
|  |     :param pathnames: | ||||||
|  |         A list of path names that will be filtered based on matching and | ||||||
|  |         ignored patterns. | ||||||
|  |     :param included_patterns: | ||||||
|  |         Allow filenames matching wildcard patterns specified in this list. | ||||||
|  |         If no pattern list is specified, ["*"] is used as the default pattern, | ||||||
|  |         which matches all files. | ||||||
|  |     :param excluded_patterns: | ||||||
|  |         Ignores filenames matching wildcard patterns specified in this list. | ||||||
|  |         If no pattern list is specified, no files are ignored. | ||||||
|  |     :param case_sensitive: | ||||||
|  |         ``True`` if matching should be case-sensitive; ``False`` otherwise. | ||||||
|  |     :returns: | ||||||
|  |         A list of pathnames that matched the allowable patterns and passed | ||||||
|  |         through the ignored patterns. | ||||||
|  |     """ | ||||||
|  |     included = ["*"] if included_patterns is None else included_patterns | ||||||
|  |     excluded = [] if excluded_patterns is None else excluded_patterns | ||||||
|  |  | ||||||
|  |     for path in paths: | ||||||
|  |         if _match_path(path, set(included), set(excluded), case_sensitive): | ||||||
|  |             yield path | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def match_any_paths( | ||||||
|  |     paths, included_patterns=None, excluded_patterns=None, case_sensitive=True | ||||||
|  | ): | ||||||
|  |     """ | ||||||
|  |     Matches from a set of paths based on acceptable patterns and | ||||||
|  |     ignorable patterns. | ||||||
|  |     :param pathnames: | ||||||
|  |         A list of path names that will be filtered based on matching and | ||||||
|  |         ignored patterns. | ||||||
|  |     :param included_patterns: | ||||||
|  |         Allow filenames matching wildcard patterns specified in this list. | ||||||
|  |         If no pattern list is specified, ["*"] is used as the default pattern, | ||||||
|  |         which matches all files. | ||||||
|  |     :param excluded_patterns: | ||||||
|  |         Ignores filenames matching wildcard patterns specified in this list. | ||||||
|  |         If no pattern list is specified, no files are ignored. | ||||||
|  |     :param case_sensitive: | ||||||
|  |         ``True`` if matching should be case-sensitive; ``False`` otherwise. | ||||||
|  |     :returns: | ||||||
|  |         ``True`` if any of the paths matches; ``False`` otherwise. | ||||||
|  |     """ | ||||||
|  |     included = ["*"] if included_patterns is None else included_patterns | ||||||
|  |     excluded = [] if excluded_patterns is None else excluded_patterns | ||||||
|  |  | ||||||
|  |     for path in paths: | ||||||
|  |         if _match_path(path, set(included), set(excluded), case_sensitive): | ||||||
|  |             return True | ||||||
|  |     return False | ||||||
							
								
								
									
										57
									
								
								src/libs/watchdog/utils/platform.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										57
									
								
								src/libs/watchdog/utils/platform.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,57 @@ | |||||||
|  | # Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com> | ||||||
|  | # Copyright 2012 Google, Inc & contributors. | ||||||
|  | # | ||||||
|  | # Licensed under the Apache License, Version 2.0 (the "License"); | ||||||
|  | # you may not use this file except in compliance with the License. | ||||||
|  | # You may obtain a copy of the License at | ||||||
|  | # | ||||||
|  | #     http://www.apache.org/licenses/LICENSE-2.0 | ||||||
|  | # | ||||||
|  | # Unless required by applicable law or agreed to in writing, software | ||||||
|  | # distributed under the License is distributed on an "AS IS" BASIS, | ||||||
|  | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||||
|  | # See the License for the specific language governing permissions and | ||||||
|  | # limitations under the License. | ||||||
|  |  | ||||||
|  |  | ||||||
|  | from __future__ import annotations | ||||||
|  |  | ||||||
|  | import sys | ||||||
|  |  | ||||||
|  | PLATFORM_WINDOWS = "windows" | ||||||
|  | PLATFORM_LINUX = "linux" | ||||||
|  | PLATFORM_BSD = "bsd" | ||||||
|  | PLATFORM_DARWIN = "darwin" | ||||||
|  | PLATFORM_UNKNOWN = "unknown" | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def get_platform_name(): | ||||||
|  |     if sys.platform.startswith("win"): | ||||||
|  |         return PLATFORM_WINDOWS | ||||||
|  |     elif sys.platform.startswith("darwin"): | ||||||
|  |         return PLATFORM_DARWIN | ||||||
|  |     elif sys.platform.startswith("linux"): | ||||||
|  |         return PLATFORM_LINUX | ||||||
|  |     elif sys.platform.startswith(("dragonfly", "freebsd", "netbsd", "openbsd", "bsd")): | ||||||
|  |         return PLATFORM_BSD | ||||||
|  |     else: | ||||||
|  |         return PLATFORM_UNKNOWN | ||||||
|  |  | ||||||
|  |  | ||||||
|  | __platform__ = get_platform_name() | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def is_linux(): | ||||||
|  |     return __platform__ == PLATFORM_LINUX | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def is_bsd(): | ||||||
|  |     return __platform__ == PLATFORM_BSD | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def is_darwin(): | ||||||
|  |     return __platform__ == PLATFORM_DARWIN | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def is_windows(): | ||||||
|  |     return __platform__ == PLATFORM_WINDOWS | ||||||
							
								
								
									
										26
									
								
								src/libs/watchdog/utils/process_watcher.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										26
									
								
								src/libs/watchdog/utils/process_watcher.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,26 @@ | |||||||
|  | from __future__ import annotations | ||||||
|  |  | ||||||
|  | import logging | ||||||
|  |  | ||||||
|  | from libs.watchdog.utils import BaseThread | ||||||
|  |  | ||||||
|  | logger = logging.getLogger(__name__) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class ProcessWatcher(BaseThread): | ||||||
|  |     def __init__(self, popen_obj, process_termination_callback): | ||||||
|  |         super().__init__() | ||||||
|  |         self.popen_obj = popen_obj | ||||||
|  |         self.process_termination_callback = process_termination_callback | ||||||
|  |  | ||||||
|  |     def run(self): | ||||||
|  |         while True: | ||||||
|  |             if self.popen_obj.poll() is not None: | ||||||
|  |                 break | ||||||
|  |             if self.stopped_event.wait(timeout=0.1): | ||||||
|  |                 return | ||||||
|  |  | ||||||
|  |         try: | ||||||
|  |             self.process_termination_callback() | ||||||
|  |         except Exception: | ||||||
|  |             logger.exception("Error calling process termination callback") | ||||||
							
								
								
									
										27
									
								
								src/libs/watchdog/version.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										27
									
								
								src/libs/watchdog/version.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,27 @@ | |||||||
|  | # Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com> | ||||||
|  | # Copyright 2012 Google, Inc & contributors. | ||||||
|  | # | ||||||
|  | # Licensed under the Apache License, Version 2.0 (the "License"); | ||||||
|  | # you may not use this file except in compliance with the License. | ||||||
|  | # You may obtain a copy of the License at | ||||||
|  | # | ||||||
|  | #     http://www.apache.org/licenses/LICENSE-2.0 | ||||||
|  | # | ||||||
|  | # Unless required by applicable law or agreed to in writing, software | ||||||
|  | # distributed under the License is distributed on an "AS IS" BASIS, | ||||||
|  | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||||
|  | # See the License for the specific language governing permissions and | ||||||
|  | # limitations under the License. | ||||||
|  |  | ||||||
|  |  | ||||||
|  | from __future__ import annotations | ||||||
|  |  | ||||||
|  | # When updating this version number, please update the | ||||||
|  | # ``docs/source/global.rst.inc`` file as well. | ||||||
|  | VERSION_MAJOR = 3 | ||||||
|  | VERSION_MINOR = 0 | ||||||
|  | VERSION_BUILD = 0 | ||||||
|  | VERSION_INFO = (VERSION_MAJOR, VERSION_MINOR, VERSION_BUILD) | ||||||
|  | VERSION_STRING = f"{VERSION_MAJOR}.{VERSION_MINOR}.{VERSION_BUILD}" | ||||||
|  |  | ||||||
|  | __version__ = VERSION_INFO | ||||||
							
								
								
									
										808
									
								
								src/libs/watchdog/watchmedo.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										808
									
								
								src/libs/watchdog/watchmedo.py
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,808 @@ | |||||||
|  | # | ||||||
|  | # Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com> | ||||||
|  | # Copyright 2012 Google, Inc & contributors. | ||||||
|  | # | ||||||
|  | # Licensed under the Apache License, Version 2.0 (the "License"); | ||||||
|  | # you may not use this file except in compliance with the License. | ||||||
|  | # You may obtain a copy of the License at | ||||||
|  | # | ||||||
|  | #     http://www.apache.org/licenses/LICENSE-2.0 | ||||||
|  | # | ||||||
|  | # Unless required by applicable law or agreed to in writing, software | ||||||
|  | # distributed under the License is distributed on an "AS IS" BASIS, | ||||||
|  | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||||
|  | # See the License for the specific language governing permissions and | ||||||
|  | # limitations under the License. | ||||||
|  |  | ||||||
|  | """ | ||||||
|  | :module: libs.watchdog.watchmedo | ||||||
|  | :author: yesudeep@google.com (Yesudeep Mangalapilly) | ||||||
|  | :author: contact@tiger-222.fr (Mickaël Schoentgen) | ||||||
|  | :synopsis: ``watchmedo`` shell script utility. | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | from __future__ import annotations | ||||||
|  |  | ||||||
|  | import errno | ||||||
|  | import logging | ||||||
|  | import os | ||||||
|  | import os.path | ||||||
|  | import sys | ||||||
|  | import time | ||||||
|  | from argparse import ArgumentParser, RawDescriptionHelpFormatter | ||||||
|  | from io import StringIO | ||||||
|  | from textwrap import dedent | ||||||
|  | from typing import TYPE_CHECKING | ||||||
|  |  | ||||||
|  | from libs.watchdog.observers.api import BaseObserverSubclassCallable | ||||||
|  | from libs.watchdog.utils import WatchdogShutdown, load_class | ||||||
|  | from libs.watchdog.version import VERSION_STRING | ||||||
|  |  | ||||||
|  | logging.basicConfig(level=logging.INFO) | ||||||
|  |  | ||||||
|  | CONFIG_KEY_TRICKS = "tricks" | ||||||
|  | CONFIG_KEY_PYTHON_PATH = "python-path" | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class HelpFormatter(RawDescriptionHelpFormatter): | ||||||
|  |     """A nicer help formatter. | ||||||
|  |  | ||||||
|  |     Help for arguments can be indented and contain new lines. | ||||||
|  |     It will be de-dented and arguments in the help | ||||||
|  |     will be separated by a blank line for better readability. | ||||||
|  |  | ||||||
|  |     Source: https://github.com/httpie/httpie/blob/2423f89/httpie/cli/argparser.py#L31 | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self, *args, max_help_position=6, **kwargs): | ||||||
|  |         # A smaller indent for args help. | ||||||
|  |         kwargs["max_help_position"] = max_help_position | ||||||
|  |         super().__init__(*args, **kwargs) | ||||||
|  |  | ||||||
|  |     def _split_lines(self, text, width): | ||||||
|  |         text = dedent(text).strip() + "\n\n" | ||||||
|  |         return text.splitlines() | ||||||
|  |  | ||||||
|  |  | ||||||
|  | epilog = """\ | ||||||
|  | Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>. | ||||||
|  | Copyright 2012 Google, Inc & contributors. | ||||||
|  |  | ||||||
|  | Licensed under the terms of the Apache license, version 2.0. Please see | ||||||
|  | LICENSE in the source code for more information.""" | ||||||
|  |  | ||||||
|  | cli = ArgumentParser(epilog=epilog, formatter_class=HelpFormatter) | ||||||
|  | cli.add_argument("--version", action="version", version=VERSION_STRING) | ||||||
|  | subparsers = cli.add_subparsers(dest="top_command") | ||||||
|  | command_parsers = {} | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def argument(*name_or_flags, **kwargs): | ||||||
|  |     """Convenience function to properly format arguments to pass to the | ||||||
|  |     command decorator. | ||||||
|  |     """ | ||||||
|  |     return list(name_or_flags), kwargs | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def command(args=[], parent=subparsers, cmd_aliases=[]): | ||||||
|  |     """Decorator to define a new command in a sanity-preserving way. | ||||||
|  |     The function will be stored in the ``func`` variable when the parser | ||||||
|  |     parses arguments so that it can be called directly like so:: | ||||||
|  |  | ||||||
|  |       >>> args = cli.parse_args() | ||||||
|  |       >>> args.func(args) | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def decorator(func): | ||||||
|  |         name = func.__name__.replace("_", "-") | ||||||
|  |         desc = dedent(func.__doc__) | ||||||
|  |         parser = parent.add_parser( | ||||||
|  |             name, description=desc, aliases=cmd_aliases, formatter_class=HelpFormatter | ||||||
|  |         ) | ||||||
|  |         command_parsers[name] = parser | ||||||
|  |         verbosity_group = parser.add_mutually_exclusive_group() | ||||||
|  |         verbosity_group.add_argument( | ||||||
|  |             "-q", "--quiet", dest="verbosity", action="append_const", const=-1 | ||||||
|  |         ) | ||||||
|  |         verbosity_group.add_argument( | ||||||
|  |             "-v", "--verbose", dest="verbosity", action="append_const", const=1 | ||||||
|  |         ) | ||||||
|  |         for arg in args: | ||||||
|  |             parser.add_argument(*arg[0], **arg[1]) | ||||||
|  |             parser.set_defaults(func=func) | ||||||
|  |         return func | ||||||
|  |  | ||||||
|  |     return decorator | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def path_split(pathname_spec, separator=os.pathsep): | ||||||
|  |     """ | ||||||
|  |     Splits a pathname specification separated by an OS-dependent separator. | ||||||
|  |  | ||||||
|  |     :param pathname_spec: | ||||||
|  |         The pathname specification. | ||||||
|  |     :param separator: | ||||||
|  |         (OS Dependent) `:` on Unix and `;` on Windows or user-specified. | ||||||
|  |     """ | ||||||
|  |     return list(pathname_spec.split(separator)) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def add_to_sys_path(pathnames, index=0): | ||||||
|  |     """ | ||||||
|  |     Adds specified paths at specified index into the sys.path list. | ||||||
|  |  | ||||||
|  |     :param paths: | ||||||
|  |         A list of paths to add to the sys.path | ||||||
|  |     :param index: | ||||||
|  |         (Default 0) The index in the sys.path list where the paths will be | ||||||
|  |         added. | ||||||
|  |     """ | ||||||
|  |     for pathname in pathnames[::-1]: | ||||||
|  |         sys.path.insert(index, pathname) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def load_config(tricks_file_pathname): | ||||||
|  |     """ | ||||||
|  |     Loads the YAML configuration from the specified file. | ||||||
|  |  | ||||||
|  |     :param tricks_file_path: | ||||||
|  |         The path to the tricks configuration file. | ||||||
|  |     :returns: | ||||||
|  |         A dictionary of configuration information. | ||||||
|  |     """ | ||||||
|  |     import yaml | ||||||
|  |  | ||||||
|  |     with open(tricks_file_pathname, "rb") as f: | ||||||
|  |         return yaml.safe_load(f.read()) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def parse_patterns(patterns_spec, ignore_patterns_spec, separator=";"): | ||||||
|  |     """ | ||||||
|  |     Parses pattern argument specs and returns a two-tuple of | ||||||
|  |     (patterns, ignore_patterns). | ||||||
|  |     """ | ||||||
|  |     patterns = patterns_spec.split(separator) | ||||||
|  |     ignore_patterns = ignore_patterns_spec.split(separator) | ||||||
|  |     if ignore_patterns == [""]: | ||||||
|  |         ignore_patterns = [] | ||||||
|  |     return (patterns, ignore_patterns) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def observe_with(observer, event_handler, pathnames, recursive): | ||||||
|  |     """ | ||||||
|  |     Single observer thread with a scheduled path and event handler. | ||||||
|  |  | ||||||
|  |     :param observer: | ||||||
|  |         The observer thread. | ||||||
|  |     :param event_handler: | ||||||
|  |         Event handler which will be called in response to file system events. | ||||||
|  |     :param pathnames: | ||||||
|  |         A list of pathnames to monitor. | ||||||
|  |     :param recursive: | ||||||
|  |         ``True`` if recursive; ``False`` otherwise. | ||||||
|  |     """ | ||||||
|  |     for pathname in set(pathnames): | ||||||
|  |         observer.schedule(event_handler, pathname, recursive) | ||||||
|  |     observer.start() | ||||||
|  |     try: | ||||||
|  |         while True: | ||||||
|  |             time.sleep(1) | ||||||
|  |     except WatchdogShutdown: | ||||||
|  |         observer.stop() | ||||||
|  |     observer.join() | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def schedule_tricks(observer, tricks, pathname, recursive): | ||||||
|  |     """ | ||||||
|  |     Schedules tricks with the specified observer and for the given watch | ||||||
|  |     path. | ||||||
|  |  | ||||||
|  |     :param observer: | ||||||
|  |         The observer thread into which to schedule the trick and watch. | ||||||
|  |     :param tricks: | ||||||
|  |         A list of tricks. | ||||||
|  |     :param pathname: | ||||||
|  |         A path name which should be watched. | ||||||
|  |     :param recursive: | ||||||
|  |         ``True`` if recursive; ``False`` otherwise. | ||||||
|  |     """ | ||||||
|  |     for trick in tricks: | ||||||
|  |         for name, value in list(trick.items()): | ||||||
|  |             TrickClass = load_class(name) | ||||||
|  |             handler = TrickClass(**value) | ||||||
|  |             trick_pathname = getattr(handler, "source_directory", None) or pathname | ||||||
|  |             observer.schedule(handler, trick_pathname, recursive) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | @command( | ||||||
|  |     [ | ||||||
|  |         argument("files", nargs="*", help="perform tricks from given file"), | ||||||
|  |         argument( | ||||||
|  |             "--python-path", | ||||||
|  |             default=".", | ||||||
|  |             help=f"Paths separated by {os.pathsep!r} to add to the Python path.", | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "--interval", | ||||||
|  |             "--timeout", | ||||||
|  |             dest="timeout", | ||||||
|  |             default=1.0, | ||||||
|  |             type=float, | ||||||
|  |             help="Use this as the polling interval/blocking timeout (in seconds).", | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "--recursive", | ||||||
|  |             action="store_true", | ||||||
|  |             default=True, | ||||||
|  |             help="Recursively monitor paths (defaults to True).", | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "--debug-force-polling", action="store_true", help="[debug] Forces polling." | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "--debug-force-kqueue", | ||||||
|  |             action="store_true", | ||||||
|  |             help="[debug] Forces BSD kqueue(2).", | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "--debug-force-winapi", | ||||||
|  |             action="store_true", | ||||||
|  |             help="[debug] Forces Windows API.", | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "--debug-force-fsevents", | ||||||
|  |             action="store_true", | ||||||
|  |             help="[debug] Forces macOS FSEvents.", | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "--debug-force-inotify", | ||||||
|  |             action="store_true", | ||||||
|  |             help="[debug] Forces Linux inotify(7).", | ||||||
|  |         ), | ||||||
|  |     ], | ||||||
|  |     cmd_aliases=["tricks"], | ||||||
|  | ) | ||||||
|  | def tricks_from(args): | ||||||
|  |     """ | ||||||
|  |     Command to execute tricks from a tricks configuration file. | ||||||
|  |     """ | ||||||
|  |     Observer: BaseObserverSubclassCallable | ||||||
|  |     if args.debug_force_polling: | ||||||
|  |         from libs.watchdog.observers.polling import PollingObserver as Observer | ||||||
|  |     elif args.debug_force_kqueue: | ||||||
|  |         from libs.watchdog.observers.kqueue import KqueueObserver as Observer | ||||||
|  |     elif (not TYPE_CHECKING and args.debug_force_winapi) or (TYPE_CHECKING and sys.platform.startswith("win")): | ||||||
|  |         from libs.watchdog.observers.read_directory_changes import WindowsApiObserver as Observer | ||||||
|  |     elif args.debug_force_inotify: | ||||||
|  |         from libs.watchdog.observers.inotify import InotifyObserver as Observer | ||||||
|  |     elif args.debug_force_fsevents: | ||||||
|  |         from libs.watchdog.observers.fsevents import FSEventsObserver as Observer | ||||||
|  |     else: | ||||||
|  |         # Automatically picks the most appropriate observer for the platform | ||||||
|  |         # on which it is running. | ||||||
|  |         from libs.watchdog.observers import Observer | ||||||
|  |  | ||||||
|  |     add_to_sys_path(path_split(args.python_path)) | ||||||
|  |     observers = [] | ||||||
|  |     for tricks_file in args.files: | ||||||
|  |         observer = Observer(timeout=args.timeout) | ||||||
|  |  | ||||||
|  |         if not os.path.exists(tricks_file): | ||||||
|  |             raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), tricks_file) | ||||||
|  |  | ||||||
|  |         config = load_config(tricks_file) | ||||||
|  |  | ||||||
|  |         try: | ||||||
|  |             tricks = config[CONFIG_KEY_TRICKS] | ||||||
|  |         except KeyError: | ||||||
|  |             raise KeyError( | ||||||
|  |                 f"No {CONFIG_KEY_TRICKS!r} key specified in {tricks_file!r}." | ||||||
|  |             ) | ||||||
|  |  | ||||||
|  |         if CONFIG_KEY_PYTHON_PATH in config: | ||||||
|  |             add_to_sys_path(config[CONFIG_KEY_PYTHON_PATH]) | ||||||
|  |  | ||||||
|  |         dir_path = os.path.dirname(tricks_file) | ||||||
|  |         if not dir_path: | ||||||
|  |             dir_path = os.path.relpath(os.getcwd()) | ||||||
|  |         schedule_tricks(observer, tricks, dir_path, args.recursive) | ||||||
|  |         observer.start() | ||||||
|  |         observers.append(observer) | ||||||
|  |  | ||||||
|  |     try: | ||||||
|  |         while True: | ||||||
|  |             time.sleep(1) | ||||||
|  |     except WatchdogShutdown: | ||||||
|  |         for o in observers: | ||||||
|  |             o.unschedule_all() | ||||||
|  |             o.stop() | ||||||
|  |     for o in observers: | ||||||
|  |         o.join() | ||||||
|  |  | ||||||
|  |  | ||||||
|  | @command( | ||||||
|  |     [ | ||||||
|  |         argument( | ||||||
|  |             "trick_paths", | ||||||
|  |             nargs="*", | ||||||
|  |             help="Dotted paths for all the tricks you want to generate.", | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "--python-path", | ||||||
|  |             default=".", | ||||||
|  |             help=f"Paths separated by {os.pathsep!r} to add to the Python path.", | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "--append-to-file", | ||||||
|  |             default=None, | ||||||
|  |             help=""" | ||||||
|  |                    Appends the generated tricks YAML to a file. | ||||||
|  |                    If not specified, prints to standard output.""", | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "-a", | ||||||
|  |             "--append-only", | ||||||
|  |             dest="append_only", | ||||||
|  |             action="store_true", | ||||||
|  |             help=""" | ||||||
|  |                    If --append-to-file is not specified, produces output for | ||||||
|  |                    appending instead of a complete tricks YAML file.""", | ||||||
|  |         ), | ||||||
|  |     ], | ||||||
|  |     cmd_aliases=["generate-tricks-yaml"], | ||||||
|  | ) | ||||||
|  | def tricks_generate_yaml(args): | ||||||
|  |     """ | ||||||
|  |     Command to generate Yaml configuration for tricks named on the command line. | ||||||
|  |     """ | ||||||
|  |     import yaml | ||||||
|  |  | ||||||
|  |     python_paths = path_split(args.python_path) | ||||||
|  |     add_to_sys_path(python_paths) | ||||||
|  |     output = StringIO() | ||||||
|  |  | ||||||
|  |     for trick_path in args.trick_paths: | ||||||
|  |         TrickClass = load_class(trick_path) | ||||||
|  |         output.write(TrickClass.generate_yaml()) | ||||||
|  |  | ||||||
|  |     content = output.getvalue() | ||||||
|  |     output.close() | ||||||
|  |  | ||||||
|  |     header = yaml.dump({CONFIG_KEY_PYTHON_PATH: python_paths}) | ||||||
|  |     header += f"{CONFIG_KEY_TRICKS}:\n" | ||||||
|  |     if args.append_to_file is None: | ||||||
|  |         # Output to standard output. | ||||||
|  |         if not args.append_only: | ||||||
|  |             content = header + content | ||||||
|  |         sys.stdout.write(content) | ||||||
|  |     else: | ||||||
|  |         if not os.path.exists(args.append_to_file): | ||||||
|  |             content = header + content | ||||||
|  |         with open(args.append_to_file, "a", encoding="utf-8") as file: | ||||||
|  |             file.write(content) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | @command( | ||||||
|  |     [ | ||||||
|  |         argument( | ||||||
|  |             "directories", | ||||||
|  |             nargs="*", | ||||||
|  |             default=".", | ||||||
|  |             help="Directories to watch. (default: '.').", | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "-p", | ||||||
|  |             "--pattern", | ||||||
|  |             "--patterns", | ||||||
|  |             dest="patterns", | ||||||
|  |             default="*", | ||||||
|  |             help="Matches event paths with these patterns (separated by ;).", | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "-i", | ||||||
|  |             "--ignore-pattern", | ||||||
|  |             "--ignore-patterns", | ||||||
|  |             dest="ignore_patterns", | ||||||
|  |             default="", | ||||||
|  |             help="Ignores event paths with these patterns (separated by ;).", | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "-D", | ||||||
|  |             "--ignore-directories", | ||||||
|  |             dest="ignore_directories", | ||||||
|  |             action="store_true", | ||||||
|  |             help="Ignores events for directories.", | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "-R", | ||||||
|  |             "--recursive", | ||||||
|  |             dest="recursive", | ||||||
|  |             action="store_true", | ||||||
|  |             help="Monitors the directories recursively.", | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "--interval", | ||||||
|  |             "--timeout", | ||||||
|  |             dest="timeout", | ||||||
|  |             default=1.0, | ||||||
|  |             type=float, | ||||||
|  |             help="Use this as the polling interval/blocking timeout.", | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "--trace", action="store_true", help="Dumps complete dispatching trace." | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "--debug-force-polling", action="store_true", help="[debug] Forces polling." | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "--debug-force-kqueue", | ||||||
|  |             action="store_true", | ||||||
|  |             help="[debug] Forces BSD kqueue(2).", | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "--debug-force-winapi", | ||||||
|  |             action="store_true", | ||||||
|  |             help="[debug] Forces Windows API.", | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "--debug-force-fsevents", | ||||||
|  |             action="store_true", | ||||||
|  |             help="[debug] Forces macOS FSEvents.", | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "--debug-force-inotify", | ||||||
|  |             action="store_true", | ||||||
|  |             help="[debug] Forces Linux inotify(7).", | ||||||
|  |         ), | ||||||
|  |     ] | ||||||
|  | ) | ||||||
|  | def log(args): | ||||||
|  |     """ | ||||||
|  |     Command to log file system events to the console. | ||||||
|  |     """ | ||||||
|  |     from libs.watchdog.tricks import LoggerTrick | ||||||
|  |     from libs.watchdog.utils import echo | ||||||
|  |  | ||||||
|  |     if args.trace: | ||||||
|  |         class_module_logger = logging.getLogger(LoggerTrick.__module__) | ||||||
|  |         echo.echo_class(LoggerTrick, write=lambda msg: class_module_logger.info(msg)) | ||||||
|  |  | ||||||
|  |     patterns, ignore_patterns = parse_patterns(args.patterns, args.ignore_patterns) | ||||||
|  |     handler = LoggerTrick( | ||||||
|  |         patterns=patterns, | ||||||
|  |         ignore_patterns=ignore_patterns, | ||||||
|  |         ignore_directories=args.ignore_directories, | ||||||
|  |     ) | ||||||
|  |  | ||||||
|  |     Observer: BaseObserverSubclassCallable | ||||||
|  |     if args.debug_force_polling: | ||||||
|  |         from libs.watchdog.observers.polling import PollingObserver as Observer | ||||||
|  |     elif args.debug_force_kqueue: | ||||||
|  |         from libs.watchdog.observers.kqueue import KqueueObserver as Observer | ||||||
|  |     elif (not TYPE_CHECKING and args.debug_force_winapi) or (TYPE_CHECKING and sys.platform.startswith("win")): | ||||||
|  |         from libs.watchdog.observers.read_directory_changes import WindowsApiObserver as Observer | ||||||
|  |     elif args.debug_force_inotify: | ||||||
|  |         from libs.watchdog.observers.inotify import InotifyObserver as Observer | ||||||
|  |     elif args.debug_force_fsevents: | ||||||
|  |         from libs.watchdog.observers.fsevents import FSEventsObserver as Observer | ||||||
|  |     else: | ||||||
|  |         # Automatically picks the most appropriate observer for the platform | ||||||
|  |         # on which it is running. | ||||||
|  |         from libs.watchdog.observers import Observer | ||||||
|  |     observer = Observer(timeout=args.timeout) | ||||||
|  |     observe_with(observer, handler, args.directories, args.recursive) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | @command( | ||||||
|  |     [ | ||||||
|  |         argument("directories", nargs="*", default=".", help="Directories to watch."), | ||||||
|  |         argument( | ||||||
|  |             "-c", | ||||||
|  |             "--command", | ||||||
|  |             dest="command", | ||||||
|  |             default=None, | ||||||
|  |             help=""" | ||||||
|  |     Shell command executed in response to matching events. | ||||||
|  |     These interpolation variables are available to your command string: | ||||||
|  |  | ||||||
|  |         ${watch_src_path}   - event source path | ||||||
|  |         ${watch_dest_path}  - event destination path (for moved events) | ||||||
|  |         ${watch_event_type} - event type | ||||||
|  |         ${watch_object}     - 'file' or 'directory' | ||||||
|  |  | ||||||
|  |     Note: | ||||||
|  |         Please ensure you do not use double quotes (") to quote | ||||||
|  |         your command string. That will force your shell to | ||||||
|  |         interpolate before the command is processed by this | ||||||
|  |         command. | ||||||
|  |  | ||||||
|  |     Example: | ||||||
|  |  | ||||||
|  |         --command='echo "${watch_src_path}"' | ||||||
|  |     """, | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "-p", | ||||||
|  |             "--pattern", | ||||||
|  |             "--patterns", | ||||||
|  |             dest="patterns", | ||||||
|  |             default="*", | ||||||
|  |             help="Matches event paths with these patterns (separated by ;).", | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "-i", | ||||||
|  |             "--ignore-pattern", | ||||||
|  |             "--ignore-patterns", | ||||||
|  |             dest="ignore_patterns", | ||||||
|  |             default="", | ||||||
|  |             help="Ignores event paths with these patterns (separated by ;).", | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "-D", | ||||||
|  |             "--ignore-directories", | ||||||
|  |             dest="ignore_directories", | ||||||
|  |             default=False, | ||||||
|  |             action="store_true", | ||||||
|  |             help="Ignores events for directories.", | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "-R", | ||||||
|  |             "--recursive", | ||||||
|  |             dest="recursive", | ||||||
|  |             action="store_true", | ||||||
|  |             help="Monitors the directories recursively.", | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "--interval", | ||||||
|  |             "--timeout", | ||||||
|  |             dest="timeout", | ||||||
|  |             default=1.0, | ||||||
|  |             type=float, | ||||||
|  |             help="Use this as the polling interval/blocking timeout.", | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "-w", | ||||||
|  |             "--wait", | ||||||
|  |             dest="wait_for_process", | ||||||
|  |             action="store_true", | ||||||
|  |             help="Wait for process to finish to avoid multiple simultaneous instances.", | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "-W", | ||||||
|  |             "--drop", | ||||||
|  |             dest="drop_during_process", | ||||||
|  |             action="store_true", | ||||||
|  |             help="Ignore events that occur while command is still being" | ||||||
|  |             " executed to avoid multiple simultaneous instances.", | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "--debug-force-polling", action="store_true", help="[debug] Forces polling." | ||||||
|  |         ), | ||||||
|  |     ] | ||||||
|  | ) | ||||||
|  | def shell_command(args): | ||||||
|  |     """ | ||||||
|  |     Command to execute shell commands in response to file system events. | ||||||
|  |     """ | ||||||
|  |     from libs.watchdog.tricks import ShellCommandTrick | ||||||
|  |  | ||||||
|  |     if not args.command: | ||||||
|  |         args.command = None | ||||||
|  |  | ||||||
|  |     Observer: BaseObserverSubclassCallable | ||||||
|  |     if args.debug_force_polling: | ||||||
|  |         from libs.watchdog.observers.polling import PollingObserver as Observer | ||||||
|  |     else: | ||||||
|  |         from libs.watchdog.observers import Observer | ||||||
|  |  | ||||||
|  |     patterns, ignore_patterns = parse_patterns(args.patterns, args.ignore_patterns) | ||||||
|  |     handler = ShellCommandTrick( | ||||||
|  |         shell_command=args.command, | ||||||
|  |         patterns=patterns, | ||||||
|  |         ignore_patterns=ignore_patterns, | ||||||
|  |         ignore_directories=args.ignore_directories, | ||||||
|  |         wait_for_process=args.wait_for_process, | ||||||
|  |         drop_during_process=args.drop_during_process, | ||||||
|  |     ) | ||||||
|  |     observer = Observer(timeout=args.timeout) | ||||||
|  |     observe_with(observer, handler, args.directories, args.recursive) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | @command( | ||||||
|  |     [ | ||||||
|  |         argument("command", help="Long-running command to run in a subprocess."), | ||||||
|  |         argument( | ||||||
|  |             "command_args", | ||||||
|  |             metavar="arg", | ||||||
|  |             nargs="*", | ||||||
|  |             help=""" | ||||||
|  |     Command arguments. | ||||||
|  |  | ||||||
|  |     Note: Use -- before the command arguments, otherwise watchmedo will | ||||||
|  |     try to interpret them. | ||||||
|  |     """, | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "-d", | ||||||
|  |             "--directory", | ||||||
|  |             dest="directories", | ||||||
|  |             metavar="DIRECTORY", | ||||||
|  |             action="append", | ||||||
|  |             help="Directory to watch. Use another -d or --directory option " | ||||||
|  |             "for each directory.", | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "-p", | ||||||
|  |             "--pattern", | ||||||
|  |             "--patterns", | ||||||
|  |             dest="patterns", | ||||||
|  |             default="*", | ||||||
|  |             help="Matches event paths with these patterns (separated by ;).", | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "-i", | ||||||
|  |             "--ignore-pattern", | ||||||
|  |             "--ignore-patterns", | ||||||
|  |             dest="ignore_patterns", | ||||||
|  |             default="", | ||||||
|  |             help="Ignores event paths with these patterns (separated by ;).", | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "-D", | ||||||
|  |             "--ignore-directories", | ||||||
|  |             dest="ignore_directories", | ||||||
|  |             default=False, | ||||||
|  |             action="store_true", | ||||||
|  |             help="Ignores events for directories.", | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "-R", | ||||||
|  |             "--recursive", | ||||||
|  |             dest="recursive", | ||||||
|  |             action="store_true", | ||||||
|  |             help="Monitors the directories recursively.", | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "--interval", | ||||||
|  |             "--timeout", | ||||||
|  |             dest="timeout", | ||||||
|  |             default=1.0, | ||||||
|  |             type=float, | ||||||
|  |             help="Use this as the polling interval/blocking timeout.", | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "--signal", | ||||||
|  |             dest="signal", | ||||||
|  |             default="SIGINT", | ||||||
|  |             help="Stop the subprocess with this signal (default SIGINT).", | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "--debug-force-polling", action="store_true", help="[debug] Forces polling." | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "--kill-after", | ||||||
|  |             dest="kill_after", | ||||||
|  |             default=10.0, | ||||||
|  |             type=float, | ||||||
|  |             help="When stopping, kill the subprocess after the specified timeout " | ||||||
|  |             "in seconds (default 10.0).", | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "--debounce-interval", | ||||||
|  |             dest="debounce_interval", | ||||||
|  |             default=0.0, | ||||||
|  |             type=float, | ||||||
|  |             help="After a file change, Wait until the specified interval (in " | ||||||
|  |             "seconds) passes with no file changes, and only then restart.", | ||||||
|  |         ), | ||||||
|  |         argument( | ||||||
|  |             "--no-restart-on-command-exit", | ||||||
|  |             dest="restart_on_command_exit", | ||||||
|  |             default=True, | ||||||
|  |             action="store_false", | ||||||
|  |             help="Don't auto-restart the command after it exits.", | ||||||
|  |         ), | ||||||
|  |     ] | ||||||
|  | ) | ||||||
|  | def auto_restart(args): | ||||||
|  |     """ | ||||||
|  |     Command to start a long-running subprocess and restart it on matched events. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     Observer: BaseObserverSubclassCallable | ||||||
|  |     if args.debug_force_polling: | ||||||
|  |         from libs.watchdog.observers.polling import PollingObserver as Observer | ||||||
|  |     else: | ||||||
|  |         from libs.watchdog.observers import Observer | ||||||
|  |  | ||||||
|  |     import signal | ||||||
|  |  | ||||||
|  |     from libs.watchdog.tricks import AutoRestartTrick | ||||||
|  |  | ||||||
|  |     if not args.directories: | ||||||
|  |         args.directories = ["."] | ||||||
|  |  | ||||||
|  |     # Allow either signal name or number. | ||||||
|  |     if args.signal.startswith("SIG"): | ||||||
|  |         stop_signal = getattr(signal, args.signal) | ||||||
|  |     else: | ||||||
|  |         stop_signal = int(args.signal) | ||||||
|  |  | ||||||
|  |     # Handle termination signals by raising a semantic exception which will | ||||||
|  |     # allow us to gracefully unwind and stop the observer | ||||||
|  |     termination_signals = {signal.SIGTERM, signal.SIGINT} | ||||||
|  |  | ||||||
|  |     if hasattr(signal, "SIGHUP"): | ||||||
|  |         termination_signals.add(signal.SIGHUP) | ||||||
|  |  | ||||||
|  |     def handler_termination_signal(_signum, _frame): | ||||||
|  |         # Neuter all signals so that we don't attempt a double shutdown | ||||||
|  |         for signum in termination_signals: | ||||||
|  |             signal.signal(signum, signal.SIG_IGN) | ||||||
|  |         raise WatchdogShutdown | ||||||
|  |  | ||||||
|  |     for signum in termination_signals: | ||||||
|  |         signal.signal(signum, handler_termination_signal) | ||||||
|  |  | ||||||
|  |     patterns, ignore_patterns = parse_patterns(args.patterns, args.ignore_patterns) | ||||||
|  |     command = [args.command] | ||||||
|  |     command.extend(args.command_args) | ||||||
|  |     handler = AutoRestartTrick( | ||||||
|  |         command=command, | ||||||
|  |         patterns=patterns, | ||||||
|  |         ignore_patterns=ignore_patterns, | ||||||
|  |         ignore_directories=args.ignore_directories, | ||||||
|  |         stop_signal=stop_signal, | ||||||
|  |         kill_after=args.kill_after, | ||||||
|  |         debounce_interval_seconds=args.debounce_interval, | ||||||
|  |         restart_on_command_exit=args.restart_on_command_exit, | ||||||
|  |     ) | ||||||
|  |     handler.start() | ||||||
|  |     observer = Observer(timeout=args.timeout) | ||||||
|  |     try: | ||||||
|  |         observe_with(observer, handler, args.directories, args.recursive) | ||||||
|  |     except WatchdogShutdown: | ||||||
|  |         pass | ||||||
|  |     finally: | ||||||
|  |         handler.stop() | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class LogLevelException(Exception): | ||||||
|  |     pass | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def _get_log_level_from_args(args): | ||||||
|  |     verbosity = sum(args.verbosity or []) | ||||||
|  |     if verbosity < -1: | ||||||
|  |         raise LogLevelException("-q/--quiet may be specified only once.") | ||||||
|  |     if verbosity > 2: | ||||||
|  |         raise LogLevelException("-v/--verbose may be specified up to 2 times.") | ||||||
|  |     return ["ERROR", "WARNING", "INFO", "DEBUG"][1 + verbosity] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def main(): | ||||||
|  |     """Entry-point function.""" | ||||||
|  |     args = cli.parse_args() | ||||||
|  |     if args.top_command is None: | ||||||
|  |         cli.print_help() | ||||||
|  |         return 1 | ||||||
|  |  | ||||||
|  |     try: | ||||||
|  |         log_level = _get_log_level_from_args(args) | ||||||
|  |     except LogLevelException as exc: | ||||||
|  |         print(f"Error: {exc.args[0]}", file=sys.stderr) | ||||||
|  |         command_parsers[args.top_command].print_help() | ||||||
|  |         return 1 | ||||||
|  |     logging.getLogger("libs.watchdog").setLevel(log_level) | ||||||
|  |  | ||||||
|  |     try: | ||||||
|  |         args.func(args) | ||||||
|  |     except KeyboardInterrupt: | ||||||
|  |         return 130 | ||||||
|  |  | ||||||
|  |     return 0 | ||||||
|  |  | ||||||
|  |  | ||||||
|  | if __name__ == "__main__": | ||||||
|  |     sys.exit(main()) | ||||||
| @@ -1,114 +0,0 @@ | |||||||
| # Python imports |  | ||||||
| import os |  | ||||||
| import threading |  | ||||||
| import time |  | ||||||
| from multiprocessing.connection import Client |  | ||||||
| from multiprocessing.connection import Listener |  | ||||||
|  |  | ||||||
| # Lib imports |  | ||||||
|  |  | ||||||
| # Application imports |  | ||||||
| from .singleton import Singleton |  | ||||||
|  |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class IPCServer(Singleton): |  | ||||||
|     """ Create a listener so that other {app_name} instances send requests back to existing instance. """ |  | ||||||
|     def __init__(self, ipc_address: str = '127.0.0.1', conn_type: str = "socket"): |  | ||||||
|         self.is_ipc_alive     = False |  | ||||||
|         self._ipc_port        = 4848 |  | ||||||
|         self._ipc_address     = ipc_address |  | ||||||
|         self._conn_type       = conn_type |  | ||||||
|         self._ipc_authkey     = b'' + bytes(f'{app_name}-ipc', 'utf-8') |  | ||||||
|         self._ipc_timeout     = 15.0 |  | ||||||
|  |  | ||||||
|         if conn_type == "socket": |  | ||||||
|             self._ipc_address = f'/tmp/{app_name}-ipc.sock' |  | ||||||
|         elif conn_type == "full_network": |  | ||||||
|             self._ipc_address = '0.0.0.0' |  | ||||||
|         elif conn_type == "full_network_unsecured": |  | ||||||
|             self._ipc_authkey = None |  | ||||||
|             self._ipc_address = '0.0.0.0' |  | ||||||
|         elif conn_type == "local_network_unsecured": |  | ||||||
|             self._ipc_authkey = None |  | ||||||
|  |  | ||||||
|         self._subscribe_to_events() |  | ||||||
|  |  | ||||||
|     def _subscribe_to_events(self): |  | ||||||
|         event_system.subscribe("post_file_to_ipc", self.send_ipc_message) |  | ||||||
|  |  | ||||||
|  |  | ||||||
|     def create_ipc_listener(self) -> None: |  | ||||||
|         if self._conn_type == "socket": |  | ||||||
|             if os.path.exists(self._ipc_address) and settings_manager.is_dirty_start(): |  | ||||||
|                 os.unlink(self._ipc_address) |  | ||||||
|  |  | ||||||
|             listener = Listener(address=self._ipc_address, family="AF_UNIX", authkey=self._ipc_authkey) |  | ||||||
|         elif "unsecured" not in self._conn_type: |  | ||||||
|             listener = Listener((self._ipc_address, self._ipc_port), authkey=self._ipc_authkey) |  | ||||||
|         else: |  | ||||||
|             listener = Listener((self._ipc_address, self._ipc_port)) |  | ||||||
|  |  | ||||||
|  |  | ||||||
|         self.is_ipc_alive = True |  | ||||||
|         self._run_ipc_loop(listener) |  | ||||||
|  |  | ||||||
|     @daemon_threaded |  | ||||||
|     def _run_ipc_loop(self, listener) -> None: |  | ||||||
|         # NOTE: Not thread safe if using with Gtk. Need to import GLib and use idle_add |  | ||||||
|         while True: |  | ||||||
|             try: |  | ||||||
|                 conn       = listener.accept() |  | ||||||
|                 start_time = time.perf_counter() |  | ||||||
|                 self._handle_ipc_message(conn, start_time) |  | ||||||
|             except Exception as e: |  | ||||||
|                 ... |  | ||||||
|  |  | ||||||
|         listener.close() |  | ||||||
|  |  | ||||||
|     def _handle_ipc_message(self, conn, start_time) -> None: |  | ||||||
|         while True: |  | ||||||
|             msg = conn.recv() |  | ||||||
|             if settings_manager.is_debug(): |  | ||||||
|                 print(msg) |  | ||||||
|  |  | ||||||
|             if "FILE|" in msg: |  | ||||||
|                 file = msg.split("FILE|")[1].strip() |  | ||||||
|                 if file: |  | ||||||
|                     event_system.emit("handle_file_from_ipc", file) |  | ||||||
|  |  | ||||||
|             if "DIR|" in msg: |  | ||||||
|                 file = msg.split("DIR|")[1].strip() |  | ||||||
|                 if file: |  | ||||||
|                     event_system.emit("handle_dir_from_ipc", file) |  | ||||||
|  |  | ||||||
|                 conn.close() |  | ||||||
|                 break |  | ||||||
|  |  | ||||||
|  |  | ||||||
|             if msg in ['close connection', 'close server']: |  | ||||||
|                 conn.close() |  | ||||||
|                 break |  | ||||||
|  |  | ||||||
|             # NOTE: Not perfect but insures we don't lock up the connection for too long. |  | ||||||
|             end_time = time.perf_counter() |  | ||||||
|             if (end_time - start_time) > self._ipc_timeout: |  | ||||||
|                 conn.close() |  | ||||||
|                 break |  | ||||||
|  |  | ||||||
|  |  | ||||||
|     def send_ipc_message(self, message: str = "Empty Data...") -> None: |  | ||||||
|         try: |  | ||||||
|             if self._conn_type == "socket": |  | ||||||
|                 conn = Client(address=self._ipc_address, family="AF_UNIX", authkey=self._ipc_authkey) |  | ||||||
|             elif "unsecured" not in self._conn_type: |  | ||||||
|                 conn = Client((self._ipc_address, self._ipc_port), authkey=self._ipc_authkey) |  | ||||||
|             else: |  | ||||||
|                 conn = Client((self._ipc_address, self._ipc_port)) |  | ||||||
|  |  | ||||||
|             conn.send(message) |  | ||||||
|             conn.close() |  | ||||||
|         except ConnectionRefusedError as e: |  | ||||||
|             print("Connection refused...") |  | ||||||
|         except Exception as e: |  | ||||||
|             print(repr(e)) |  | ||||||
		Reference in New Issue
	
	Block a user