Added Singleton class to inherit as needed
This commit is contained in:
parent
4b69622cc6
commit
ca61d5348f
@ -1,20 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# . CONFIG.sh
|
||||
|
||||
# set -o xtrace ## To debug scripts
|
||||
# set -o errexit ## To exit on error
|
||||
# set -o errunset ## To exit if a variable is referenced but not set
|
||||
|
||||
|
||||
function main() {
|
||||
cd "$(dirname "")"
|
||||
echo "Working Dir: " $(pwd)
|
||||
|
||||
TARGETDIR="${1}"
|
||||
LINK=`xclip -selection clipboard -o`
|
||||
|
||||
cd "${TARGETDIR}"
|
||||
git clone "${LINK}"
|
||||
}
|
||||
main "$@";
|
351
plugins/git_clone/pexpect/ANSI.py
Normal file
351
plugins/git_clone/pexpect/ANSI.py
Normal file
@ -0,0 +1,351 @@
|
||||
'''This implements an ANSI (VT100) terminal emulator as a subclass of screen.
|
||||
|
||||
PEXPECT LICENSE
|
||||
|
||||
This license is approved by the OSI and FSF as GPL-compatible.
|
||||
http://opensource.org/licenses/isc-license.txt
|
||||
|
||||
Copyright (c) 2012, Noah Spurrier <noah@noah.org>
|
||||
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
|
||||
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
|
||||
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
'''
|
||||
|
||||
# references:
|
||||
# http://en.wikipedia.org/wiki/ANSI_escape_code
|
||||
# http://www.retards.org/terminals/vt102.html
|
||||
# http://vt100.net/docs/vt102-ug/contents.html
|
||||
# http://vt100.net/docs/vt220-rm/
|
||||
# http://www.termsys.demon.co.uk/vtansi.htm
|
||||
|
||||
from . import screen
|
||||
from . import FSM
|
||||
import string
|
||||
|
||||
#
|
||||
# The 'Do.*' functions are helper functions for the ANSI class.
|
||||
#
|
||||
def DoEmit (fsm):
|
||||
|
||||
screen = fsm.memory[0]
|
||||
screen.write_ch(fsm.input_symbol)
|
||||
|
||||
def DoStartNumber (fsm):
|
||||
|
||||
fsm.memory.append (fsm.input_symbol)
|
||||
|
||||
def DoBuildNumber (fsm):
|
||||
|
||||
ns = fsm.memory.pop()
|
||||
ns = ns + fsm.input_symbol
|
||||
fsm.memory.append (ns)
|
||||
|
||||
def DoBackOne (fsm):
|
||||
|
||||
screen = fsm.memory[0]
|
||||
screen.cursor_back ()
|
||||
|
||||
def DoBack (fsm):
|
||||
|
||||
count = int(fsm.memory.pop())
|
||||
screen = fsm.memory[0]
|
||||
screen.cursor_back (count)
|
||||
|
||||
def DoDownOne (fsm):
|
||||
|
||||
screen = fsm.memory[0]
|
||||
screen.cursor_down ()
|
||||
|
||||
def DoDown (fsm):
|
||||
|
||||
count = int(fsm.memory.pop())
|
||||
screen = fsm.memory[0]
|
||||
screen.cursor_down (count)
|
||||
|
||||
def DoForwardOne (fsm):
|
||||
|
||||
screen = fsm.memory[0]
|
||||
screen.cursor_forward ()
|
||||
|
||||
def DoForward (fsm):
|
||||
|
||||
count = int(fsm.memory.pop())
|
||||
screen = fsm.memory[0]
|
||||
screen.cursor_forward (count)
|
||||
|
||||
def DoUpReverse (fsm):
|
||||
|
||||
screen = fsm.memory[0]
|
||||
screen.cursor_up_reverse()
|
||||
|
||||
def DoUpOne (fsm):
|
||||
|
||||
screen = fsm.memory[0]
|
||||
screen.cursor_up ()
|
||||
|
||||
def DoUp (fsm):
|
||||
|
||||
count = int(fsm.memory.pop())
|
||||
screen = fsm.memory[0]
|
||||
screen.cursor_up (count)
|
||||
|
||||
def DoHome (fsm):
|
||||
|
||||
c = int(fsm.memory.pop())
|
||||
r = int(fsm.memory.pop())
|
||||
screen = fsm.memory[0]
|
||||
screen.cursor_home (r,c)
|
||||
|
||||
def DoHomeOrigin (fsm):
|
||||
|
||||
c = 1
|
||||
r = 1
|
||||
screen = fsm.memory[0]
|
||||
screen.cursor_home (r,c)
|
||||
|
||||
def DoEraseDown (fsm):
|
||||
|
||||
screen = fsm.memory[0]
|
||||
screen.erase_down()
|
||||
|
||||
def DoErase (fsm):
|
||||
|
||||
arg = int(fsm.memory.pop())
|
||||
screen = fsm.memory[0]
|
||||
if arg == 0:
|
||||
screen.erase_down()
|
||||
elif arg == 1:
|
||||
screen.erase_up()
|
||||
elif arg == 2:
|
||||
screen.erase_screen()
|
||||
|
||||
def DoEraseEndOfLine (fsm):
|
||||
|
||||
screen = fsm.memory[0]
|
||||
screen.erase_end_of_line()
|
||||
|
||||
def DoEraseLine (fsm):
|
||||
|
||||
arg = int(fsm.memory.pop())
|
||||
screen = fsm.memory[0]
|
||||
if arg == 0:
|
||||
screen.erase_end_of_line()
|
||||
elif arg == 1:
|
||||
screen.erase_start_of_line()
|
||||
elif arg == 2:
|
||||
screen.erase_line()
|
||||
|
||||
def DoEnableScroll (fsm):
|
||||
|
||||
screen = fsm.memory[0]
|
||||
screen.scroll_screen()
|
||||
|
||||
def DoCursorSave (fsm):
|
||||
|
||||
screen = fsm.memory[0]
|
||||
screen.cursor_save_attrs()
|
||||
|
||||
def DoCursorRestore (fsm):
|
||||
|
||||
screen = fsm.memory[0]
|
||||
screen.cursor_restore_attrs()
|
||||
|
||||
def DoScrollRegion (fsm):
|
||||
|
||||
screen = fsm.memory[0]
|
||||
r2 = int(fsm.memory.pop())
|
||||
r1 = int(fsm.memory.pop())
|
||||
screen.scroll_screen_rows (r1,r2)
|
||||
|
||||
def DoMode (fsm):
|
||||
|
||||
screen = fsm.memory[0]
|
||||
mode = fsm.memory.pop() # Should be 4
|
||||
# screen.setReplaceMode ()
|
||||
|
||||
def DoLog (fsm):
|
||||
|
||||
screen = fsm.memory[0]
|
||||
fsm.memory = [screen]
|
||||
fout = open ('log', 'a')
|
||||
fout.write (fsm.input_symbol + ',' + fsm.current_state + '\n')
|
||||
fout.close()
|
||||
|
||||
class term (screen.screen):
|
||||
|
||||
'''This class is an abstract, generic terminal.
|
||||
This does nothing. This is a placeholder that
|
||||
provides a common base class for other terminals
|
||||
such as an ANSI terminal. '''
|
||||
|
||||
def __init__ (self, r=24, c=80, *args, **kwargs):
|
||||
|
||||
screen.screen.__init__(self, r,c,*args,**kwargs)
|
||||
|
||||
class ANSI (term):
|
||||
'''This class implements an ANSI (VT100) terminal.
|
||||
It is a stream filter that recognizes ANSI terminal
|
||||
escape sequences and maintains the state of a screen object. '''
|
||||
|
||||
def __init__ (self, r=24,c=80,*args,**kwargs):
|
||||
|
||||
term.__init__(self,r,c,*args,**kwargs)
|
||||
|
||||
#self.screen = screen (24,80)
|
||||
self.state = FSM.FSM ('INIT',[self])
|
||||
self.state.set_default_transition (DoLog, 'INIT')
|
||||
self.state.add_transition_any ('INIT', DoEmit, 'INIT')
|
||||
self.state.add_transition ('\x1b', 'INIT', None, 'ESC')
|
||||
self.state.add_transition_any ('ESC', DoLog, 'INIT')
|
||||
self.state.add_transition ('(', 'ESC', None, 'G0SCS')
|
||||
self.state.add_transition (')', 'ESC', None, 'G1SCS')
|
||||
self.state.add_transition_list ('AB012', 'G0SCS', None, 'INIT')
|
||||
self.state.add_transition_list ('AB012', 'G1SCS', None, 'INIT')
|
||||
self.state.add_transition ('7', 'ESC', DoCursorSave, 'INIT')
|
||||
self.state.add_transition ('8', 'ESC', DoCursorRestore, 'INIT')
|
||||
self.state.add_transition ('M', 'ESC', DoUpReverse, 'INIT')
|
||||
self.state.add_transition ('>', 'ESC', DoUpReverse, 'INIT')
|
||||
self.state.add_transition ('<', 'ESC', DoUpReverse, 'INIT')
|
||||
self.state.add_transition ('=', 'ESC', None, 'INIT') # Selects application keypad.
|
||||
self.state.add_transition ('#', 'ESC', None, 'GRAPHICS_POUND')
|
||||
self.state.add_transition_any ('GRAPHICS_POUND', None, 'INIT')
|
||||
self.state.add_transition ('[', 'ESC', None, 'ELB')
|
||||
# ELB means Escape Left Bracket. That is ^[[
|
||||
self.state.add_transition ('H', 'ELB', DoHomeOrigin, 'INIT')
|
||||
self.state.add_transition ('D', 'ELB', DoBackOne, 'INIT')
|
||||
self.state.add_transition ('B', 'ELB', DoDownOne, 'INIT')
|
||||
self.state.add_transition ('C', 'ELB', DoForwardOne, 'INIT')
|
||||
self.state.add_transition ('A', 'ELB', DoUpOne, 'INIT')
|
||||
self.state.add_transition ('J', 'ELB', DoEraseDown, 'INIT')
|
||||
self.state.add_transition ('K', 'ELB', DoEraseEndOfLine, 'INIT')
|
||||
self.state.add_transition ('r', 'ELB', DoEnableScroll, 'INIT')
|
||||
self.state.add_transition ('m', 'ELB', self.do_sgr, 'INIT')
|
||||
self.state.add_transition ('?', 'ELB', None, 'MODECRAP')
|
||||
self.state.add_transition_list (string.digits, 'ELB', DoStartNumber, 'NUMBER_1')
|
||||
self.state.add_transition_list (string.digits, 'NUMBER_1', DoBuildNumber, 'NUMBER_1')
|
||||
self.state.add_transition ('D', 'NUMBER_1', DoBack, 'INIT')
|
||||
self.state.add_transition ('B', 'NUMBER_1', DoDown, 'INIT')
|
||||
self.state.add_transition ('C', 'NUMBER_1', DoForward, 'INIT')
|
||||
self.state.add_transition ('A', 'NUMBER_1', DoUp, 'INIT')
|
||||
self.state.add_transition ('J', 'NUMBER_1', DoErase, 'INIT')
|
||||
self.state.add_transition ('K', 'NUMBER_1', DoEraseLine, 'INIT')
|
||||
self.state.add_transition ('l', 'NUMBER_1', DoMode, 'INIT')
|
||||
### It gets worse... the 'm' code can have infinite number of
|
||||
### number;number;number before it. I've never seen more than two,
|
||||
### but the specs say it's allowed. crap!
|
||||
self.state.add_transition ('m', 'NUMBER_1', self.do_sgr, 'INIT')
|
||||
### LED control. Same implementation problem as 'm' code.
|
||||
self.state.add_transition ('q', 'NUMBER_1', self.do_decsca, 'INIT')
|
||||
|
||||
# \E[?47h switch to alternate screen
|
||||
# \E[?47l restores to normal screen from alternate screen.
|
||||
self.state.add_transition_list (string.digits, 'MODECRAP', DoStartNumber, 'MODECRAP_NUM')
|
||||
self.state.add_transition_list (string.digits, 'MODECRAP_NUM', DoBuildNumber, 'MODECRAP_NUM')
|
||||
self.state.add_transition ('l', 'MODECRAP_NUM', self.do_modecrap, 'INIT')
|
||||
self.state.add_transition ('h', 'MODECRAP_NUM', self.do_modecrap, 'INIT')
|
||||
|
||||
#RM Reset Mode Esc [ Ps l none
|
||||
self.state.add_transition (';', 'NUMBER_1', None, 'SEMICOLON')
|
||||
self.state.add_transition_any ('SEMICOLON', DoLog, 'INIT')
|
||||
self.state.add_transition_list (string.digits, 'SEMICOLON', DoStartNumber, 'NUMBER_2')
|
||||
self.state.add_transition_list (string.digits, 'NUMBER_2', DoBuildNumber, 'NUMBER_2')
|
||||
self.state.add_transition_any ('NUMBER_2', DoLog, 'INIT')
|
||||
self.state.add_transition ('H', 'NUMBER_2', DoHome, 'INIT')
|
||||
self.state.add_transition ('f', 'NUMBER_2', DoHome, 'INIT')
|
||||
self.state.add_transition ('r', 'NUMBER_2', DoScrollRegion, 'INIT')
|
||||
### It gets worse... the 'm' code can have infinite number of
|
||||
### number;number;number before it. I've never seen more than two,
|
||||
### but the specs say it's allowed. crap!
|
||||
self.state.add_transition ('m', 'NUMBER_2', self.do_sgr, 'INIT')
|
||||
### LED control. Same problem as 'm' code.
|
||||
self.state.add_transition ('q', 'NUMBER_2', self.do_decsca, 'INIT')
|
||||
self.state.add_transition (';', 'NUMBER_2', None, 'SEMICOLON_X')
|
||||
|
||||
# Create a state for 'q' and 'm' which allows an infinite number of ignored numbers
|
||||
self.state.add_transition_any ('SEMICOLON_X', DoLog, 'INIT')
|
||||
self.state.add_transition_list (string.digits, 'SEMICOLON_X', DoStartNumber, 'NUMBER_X')
|
||||
self.state.add_transition_list (string.digits, 'NUMBER_X', DoBuildNumber, 'NUMBER_X')
|
||||
self.state.add_transition_any ('NUMBER_X', DoLog, 'INIT')
|
||||
self.state.add_transition ('m', 'NUMBER_X', self.do_sgr, 'INIT')
|
||||
self.state.add_transition ('q', 'NUMBER_X', self.do_decsca, 'INIT')
|
||||
self.state.add_transition (';', 'NUMBER_X', None, 'SEMICOLON_X')
|
||||
|
||||
def process (self, c):
|
||||
"""Process a single character. Called by :meth:`write`."""
|
||||
if isinstance(c, bytes):
|
||||
c = self._decode(c)
|
||||
self.state.process(c)
|
||||
|
||||
def process_list (self, l):
|
||||
|
||||
self.write(l)
|
||||
|
||||
def write (self, s):
|
||||
"""Process text, writing it to the virtual screen while handling
|
||||
ANSI escape codes.
|
||||
"""
|
||||
if isinstance(s, bytes):
|
||||
s = self._decode(s)
|
||||
for c in s:
|
||||
self.process(c)
|
||||
|
||||
def flush (self):
|
||||
pass
|
||||
|
||||
def write_ch (self, ch):
|
||||
'''This puts a character at the current cursor position. The cursor
|
||||
position is moved forward with wrap-around, but no scrolling is done if
|
||||
the cursor hits the lower-right corner of the screen. '''
|
||||
|
||||
if isinstance(ch, bytes):
|
||||
ch = self._decode(ch)
|
||||
|
||||
#\r and \n both produce a call to cr() and lf(), respectively.
|
||||
ch = ch[0]
|
||||
|
||||
if ch == u'\r':
|
||||
self.cr()
|
||||
return
|
||||
if ch == u'\n':
|
||||
self.crlf()
|
||||
return
|
||||
if ch == chr(screen.BS):
|
||||
self.cursor_back()
|
||||
return
|
||||
self.put_abs(self.cur_r, self.cur_c, ch)
|
||||
old_r = self.cur_r
|
||||
old_c = self.cur_c
|
||||
self.cursor_forward()
|
||||
if old_c == self.cur_c:
|
||||
self.cursor_down()
|
||||
if old_r != self.cur_r:
|
||||
self.cursor_home (self.cur_r, 1)
|
||||
else:
|
||||
self.scroll_up ()
|
||||
self.cursor_home (self.cur_r, 1)
|
||||
self.erase_line()
|
||||
|
||||
def do_sgr (self, fsm):
|
||||
'''Select Graphic Rendition, e.g. color. '''
|
||||
screen = fsm.memory[0]
|
||||
fsm.memory = [screen]
|
||||
|
||||
def do_decsca (self, fsm):
|
||||
'''Select character protection attribute. '''
|
||||
screen = fsm.memory[0]
|
||||
fsm.memory = [screen]
|
||||
|
||||
def do_modecrap (self, fsm):
|
||||
'''Handler for \x1b[?<number>h and \x1b[?<number>l. If anyone
|
||||
wanted to actually use these, they'd need to add more states to the
|
||||
FSM rather than just improve or override this method. '''
|
||||
screen = fsm.memory[0]
|
||||
fsm.memory = [screen]
|
334
plugins/git_clone/pexpect/FSM.py
Normal file
334
plugins/git_clone/pexpect/FSM.py
Normal file
@ -0,0 +1,334 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
'''This module implements a Finite State Machine (FSM). In addition to state
|
||||
this FSM also maintains a user defined "memory". So this FSM can be used as a
|
||||
Push-down Automata (PDA) since a PDA is a FSM + memory.
|
||||
|
||||
The following describes how the FSM works, but you will probably also need to
|
||||
see the example function to understand how the FSM is used in practice.
|
||||
|
||||
You define an FSM by building tables of transitions. For a given input symbol
|
||||
the process() method uses these tables to decide what action to call and what
|
||||
the next state will be. The FSM has a table of transitions that associate:
|
||||
|
||||
(input_symbol, current_state) --> (action, next_state)
|
||||
|
||||
Where "action" is a function you define. The symbols and states can be any
|
||||
objects. You use the add_transition() and add_transition_list() methods to add
|
||||
to the transition table. The FSM also has a table of transitions that
|
||||
associate:
|
||||
|
||||
(current_state) --> (action, next_state)
|
||||
|
||||
You use the add_transition_any() method to add to this transition table. The
|
||||
FSM also has one default transition that is not associated with any specific
|
||||
input_symbol or state. You use the set_default_transition() method to set the
|
||||
default transition.
|
||||
|
||||
When an action function is called it is passed a reference to the FSM. The
|
||||
action function may then access attributes of the FSM such as input_symbol,
|
||||
current_state, or "memory". The "memory" attribute can be any object that you
|
||||
want to pass along to the action functions. It is not used by the FSM itself.
|
||||
For parsing you would typically pass a list to be used as a stack.
|
||||
|
||||
The processing sequence is as follows. The process() method is given an
|
||||
input_symbol to process. The FSM will search the table of transitions that
|
||||
associate:
|
||||
|
||||
(input_symbol, current_state) --> (action, next_state)
|
||||
|
||||
If the pair (input_symbol, current_state) is found then process() will call the
|
||||
associated action function and then set the current state to the next_state.
|
||||
|
||||
If the FSM cannot find a match for (input_symbol, current_state) it will then
|
||||
search the table of transitions that associate:
|
||||
|
||||
(current_state) --> (action, next_state)
|
||||
|
||||
If the current_state is found then the process() method will call the
|
||||
associated action function and then set the current state to the next_state.
|
||||
Notice that this table lacks an input_symbol. It lets you define transitions
|
||||
for a current_state and ANY input_symbol. Hence, it is called the "any" table.
|
||||
Remember, it is always checked after first searching the table for a specific
|
||||
(input_symbol, current_state).
|
||||
|
||||
For the case where the FSM did not match either of the previous two cases the
|
||||
FSM will try to use the default transition. If the default transition is
|
||||
defined then the process() method will call the associated action function and
|
||||
then set the current state to the next_state. This lets you define a default
|
||||
transition as a catch-all case. You can think of it as an exception handler.
|
||||
There can be only one default transition.
|
||||
|
||||
Finally, if none of the previous cases are defined for an input_symbol and
|
||||
current_state then the FSM will raise an exception. This may be desirable, but
|
||||
you can always prevent this just by defining a default transition.
|
||||
|
||||
Noah Spurrier 20020822
|
||||
|
||||
PEXPECT LICENSE
|
||||
|
||||
This license is approved by the OSI and FSF as GPL-compatible.
|
||||
http://opensource.org/licenses/isc-license.txt
|
||||
|
||||
Copyright (c) 2012, Noah Spurrier <noah@noah.org>
|
||||
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
|
||||
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
|
||||
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
'''
|
||||
|
||||
class ExceptionFSM(Exception):
|
||||
|
||||
'''This is the FSM Exception class.'''
|
||||
|
||||
def __init__(self, value):
|
||||
self.value = value
|
||||
|
||||
def __str__(self):
|
||||
return 'ExceptionFSM: ' + str(self.value)
|
||||
|
||||
class FSM:
|
||||
|
||||
'''This is a Finite State Machine (FSM).
|
||||
'''
|
||||
|
||||
def __init__(self, initial_state, memory=None):
|
||||
|
||||
'''This creates the FSM. You set the initial state here. The "memory"
|
||||
attribute is any object that you want to pass along to the action
|
||||
functions. It is not used by the FSM. For parsing you would typically
|
||||
pass a list to be used as a stack. '''
|
||||
|
||||
# Map (input_symbol, current_state) --> (action, next_state).
|
||||
self.state_transitions = {}
|
||||
# Map (current_state) --> (action, next_state).
|
||||
self.state_transitions_any = {}
|
||||
self.default_transition = None
|
||||
|
||||
self.input_symbol = None
|
||||
self.initial_state = initial_state
|
||||
self.current_state = self.initial_state
|
||||
self.next_state = None
|
||||
self.action = None
|
||||
self.memory = memory
|
||||
|
||||
def reset (self):
|
||||
|
||||
'''This sets the current_state to the initial_state and sets
|
||||
input_symbol to None. The initial state was set by the constructor
|
||||
__init__(). '''
|
||||
|
||||
self.current_state = self.initial_state
|
||||
self.input_symbol = None
|
||||
|
||||
def add_transition (self, input_symbol, state, action=None, next_state=None):
|
||||
|
||||
'''This adds a transition that associates:
|
||||
|
||||
(input_symbol, current_state) --> (action, next_state)
|
||||
|
||||
The action may be set to None in which case the process() method will
|
||||
ignore the action and only set the next_state. The next_state may be
|
||||
set to None in which case the current state will be unchanged.
|
||||
|
||||
You can also set transitions for a list of symbols by using
|
||||
add_transition_list(). '''
|
||||
|
||||
if next_state is None:
|
||||
next_state = state
|
||||
self.state_transitions[(input_symbol, state)] = (action, next_state)
|
||||
|
||||
def add_transition_list (self, list_input_symbols, state, action=None, next_state=None):
|
||||
|
||||
'''This adds the same transition for a list of input symbols.
|
||||
You can pass a list or a string. Note that it is handy to use
|
||||
string.digits, string.whitespace, string.letters, etc. to add
|
||||
transitions that match character classes.
|
||||
|
||||
The action may be set to None in which case the process() method will
|
||||
ignore the action and only set the next_state. The next_state may be
|
||||
set to None in which case the current state will be unchanged. '''
|
||||
|
||||
if next_state is None:
|
||||
next_state = state
|
||||
for input_symbol in list_input_symbols:
|
||||
self.add_transition (input_symbol, state, action, next_state)
|
||||
|
||||
def add_transition_any (self, state, action=None, next_state=None):
|
||||
|
||||
'''This adds a transition that associates:
|
||||
|
||||
(current_state) --> (action, next_state)
|
||||
|
||||
That is, any input symbol will match the current state.
|
||||
The process() method checks the "any" state associations after it first
|
||||
checks for an exact match of (input_symbol, current_state).
|
||||
|
||||
The action may be set to None in which case the process() method will
|
||||
ignore the action and only set the next_state. The next_state may be
|
||||
set to None in which case the current state will be unchanged. '''
|
||||
|
||||
if next_state is None:
|
||||
next_state = state
|
||||
self.state_transitions_any [state] = (action, next_state)
|
||||
|
||||
def set_default_transition (self, action, next_state):
|
||||
|
||||
'''This sets the default transition. This defines an action and
|
||||
next_state if the FSM cannot find the input symbol and the current
|
||||
state in the transition list and if the FSM cannot find the
|
||||
current_state in the transition_any list. This is useful as a final
|
||||
fall-through state for catching errors and undefined states.
|
||||
|
||||
The default transition can be removed by setting the attribute
|
||||
default_transition to None. '''
|
||||
|
||||
self.default_transition = (action, next_state)
|
||||
|
||||
def get_transition (self, input_symbol, state):
|
||||
|
||||
'''This returns (action, next state) given an input_symbol and state.
|
||||
This does not modify the FSM state, so calling this method has no side
|
||||
effects. Normally you do not call this method directly. It is called by
|
||||
process().
|
||||
|
||||
The sequence of steps to check for a defined transition goes from the
|
||||
most specific to the least specific.
|
||||
|
||||
1. Check state_transitions[] that match exactly the tuple,
|
||||
(input_symbol, state)
|
||||
|
||||
2. Check state_transitions_any[] that match (state)
|
||||
In other words, match a specific state and ANY input_symbol.
|
||||
|
||||
3. Check if the default_transition is defined.
|
||||
This catches any input_symbol and any state.
|
||||
This is a handler for errors, undefined states, or defaults.
|
||||
|
||||
4. No transition was defined. If we get here then raise an exception.
|
||||
'''
|
||||
|
||||
if (input_symbol, state) in self.state_transitions:
|
||||
return self.state_transitions[(input_symbol, state)]
|
||||
elif state in self.state_transitions_any:
|
||||
return self.state_transitions_any[state]
|
||||
elif self.default_transition is not None:
|
||||
return self.default_transition
|
||||
else:
|
||||
raise ExceptionFSM ('Transition is undefined: (%s, %s).' %
|
||||
(str(input_symbol), str(state)) )
|
||||
|
||||
def process (self, input_symbol):
|
||||
|
||||
'''This is the main method that you call to process input. This may
|
||||
cause the FSM to change state and call an action. This method calls
|
||||
get_transition() to find the action and next_state associated with the
|
||||
input_symbol and current_state. If the action is None then the action
|
||||
is not called and only the current state is changed. This method
|
||||
processes one complete input symbol. You can process a list of symbols
|
||||
(or a string) by calling process_list(). '''
|
||||
|
||||
self.input_symbol = input_symbol
|
||||
(self.action, self.next_state) = self.get_transition (self.input_symbol, self.current_state)
|
||||
if self.action is not None:
|
||||
self.action (self)
|
||||
self.current_state = self.next_state
|
||||
self.next_state = None
|
||||
|
||||
def process_list (self, input_symbols):
|
||||
|
||||
'''This takes a list and sends each element to process(). The list may
|
||||
be a string or any iterable object. '''
|
||||
|
||||
for s in input_symbols:
|
||||
self.process (s)
|
||||
|
||||
##############################################################################
|
||||
# The following is an example that demonstrates the use of the FSM class to
|
||||
# process an RPN expression. Run this module from the command line. You will
|
||||
# get a prompt > for input. Enter an RPN Expression. Numbers may be integers.
|
||||
# Operators are * / + - Use the = sign to evaluate and print the expression.
|
||||
# For example:
|
||||
#
|
||||
# 167 3 2 2 * * * 1 - =
|
||||
#
|
||||
# will print:
|
||||
#
|
||||
# 2003
|
||||
##############################################################################
|
||||
|
||||
import sys
|
||||
import string
|
||||
|
||||
PY3 = (sys.version_info[0] >= 3)
|
||||
|
||||
#
|
||||
# These define the actions.
|
||||
# Note that "memory" is a list being used as a stack.
|
||||
#
|
||||
|
||||
def BeginBuildNumber (fsm):
|
||||
fsm.memory.append (fsm.input_symbol)
|
||||
|
||||
def BuildNumber (fsm):
|
||||
s = fsm.memory.pop ()
|
||||
s = s + fsm.input_symbol
|
||||
fsm.memory.append (s)
|
||||
|
||||
def EndBuildNumber (fsm):
|
||||
s = fsm.memory.pop ()
|
||||
fsm.memory.append (int(s))
|
||||
|
||||
def DoOperator (fsm):
|
||||
ar = fsm.memory.pop()
|
||||
al = fsm.memory.pop()
|
||||
if fsm.input_symbol == '+':
|
||||
fsm.memory.append (al + ar)
|
||||
elif fsm.input_symbol == '-':
|
||||
fsm.memory.append (al - ar)
|
||||
elif fsm.input_symbol == '*':
|
||||
fsm.memory.append (al * ar)
|
||||
elif fsm.input_symbol == '/':
|
||||
fsm.memory.append (al / ar)
|
||||
|
||||
def DoEqual (fsm):
|
||||
print(str(fsm.memory.pop()))
|
||||
|
||||
def Error (fsm):
|
||||
print('That does not compute.')
|
||||
print(str(fsm.input_symbol))
|
||||
|
||||
def main():
|
||||
|
||||
'''This is where the example starts and the FSM state transitions are
|
||||
defined. Note that states are strings (such as 'INIT'). This is not
|
||||
necessary, but it makes the example easier to read. '''
|
||||
|
||||
f = FSM ('INIT', [])
|
||||
f.set_default_transition (Error, 'INIT')
|
||||
f.add_transition_any ('INIT', None, 'INIT')
|
||||
f.add_transition ('=', 'INIT', DoEqual, 'INIT')
|
||||
f.add_transition_list (string.digits, 'INIT', BeginBuildNumber, 'BUILDING_NUMBER')
|
||||
f.add_transition_list (string.digits, 'BUILDING_NUMBER', BuildNumber, 'BUILDING_NUMBER')
|
||||
f.add_transition_list (string.whitespace, 'BUILDING_NUMBER', EndBuildNumber, 'INIT')
|
||||
f.add_transition_list ('+-*/', 'INIT', DoOperator, 'INIT')
|
||||
|
||||
print()
|
||||
print('Enter an RPN Expression.')
|
||||
print('Numbers may be integers. Operators are * / + -')
|
||||
print('Use the = sign to evaluate and print the expression.')
|
||||
print('For example: ')
|
||||
print(' 167 3 2 2 * * * 1 - =')
|
||||
inputstr = (input if PY3 else raw_input)('> ') # analysis:ignore
|
||||
f.process_list(inputstr)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
20
plugins/git_clone/pexpect/LICENSE
Normal file
20
plugins/git_clone/pexpect/LICENSE
Normal file
@ -0,0 +1,20 @@
|
||||
ISC LICENSE
|
||||
|
||||
This license is approved by the OSI and FSF as GPL-compatible.
|
||||
http://opensource.org/licenses/isc-license.txt
|
||||
|
||||
Copyright (c) 2013-2014, Pexpect development team
|
||||
Copyright (c) 2012, Noah Spurrier <noah@noah.org>
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
85
plugins/git_clone/pexpect/__init__.py
Normal file
85
plugins/git_clone/pexpect/__init__.py
Normal file
@ -0,0 +1,85 @@
|
||||
'''Pexpect is a Python module for spawning child applications and controlling
|
||||
them automatically. Pexpect can be used for automating interactive applications
|
||||
such as ssh, ftp, passwd, telnet, etc. It can be used to a automate setup
|
||||
scripts for duplicating software package installations on different servers. It
|
||||
can be used for automated software testing. Pexpect is in the spirit of Don
|
||||
Libes' Expect, but Pexpect is pure Python. Other Expect-like modules for Python
|
||||
require TCL and Expect or require C extensions to be compiled. Pexpect does not
|
||||
use C, Expect, or TCL extensions. It should work on any platform that supports
|
||||
the standard Python pty module. The Pexpect interface focuses on ease of use so
|
||||
that simple tasks are easy.
|
||||
|
||||
There are two main interfaces to the Pexpect system; these are the function,
|
||||
run() and the class, spawn. The spawn class is more powerful. The run()
|
||||
function is simpler than spawn, and is good for quickly calling program. When
|
||||
you call the run() function it executes a given program and then returns the
|
||||
output. This is a handy replacement for os.system().
|
||||
|
||||
For example::
|
||||
|
||||
pexpect.run('ls -la')
|
||||
|
||||
The spawn class is the more powerful interface to the Pexpect system. You can
|
||||
use this to spawn a child program then interact with it by sending input and
|
||||
expecting responses (waiting for patterns in the child's output).
|
||||
|
||||
For example::
|
||||
|
||||
child = pexpect.spawn('scp foo user@example.com:.')
|
||||
child.expect('Password:')
|
||||
child.sendline(mypassword)
|
||||
|
||||
This works even for commands that ask for passwords or other input outside of
|
||||
the normal stdio streams. For example, ssh reads input directly from the TTY
|
||||
device which bypasses stdin.
|
||||
|
||||
Credits: Noah Spurrier, Richard Holden, Marco Molteni, Kimberley Burchett,
|
||||
Robert Stone, Hartmut Goebel, Chad Schroeder, Erick Tryzelaar, Dave Kirby, Ids
|
||||
vander Molen, George Todd, Noel Taylor, Nicolas D. Cesar, Alexander Gattin,
|
||||
Jacques-Etienne Baudoux, Geoffrey Marshall, Francisco Lourenco, Glen Mabey,
|
||||
Karthik Gurusamy, Fernando Perez, Corey Minyard, Jon Cohen, Guillaume
|
||||
Chazarain, Andrew Ryan, Nick Craig-Wood, Andrew Stone, Jorgen Grahn, John
|
||||
Spiegel, Jan Grant, and Shane Kerr. Let me know if I forgot anyone.
|
||||
|
||||
Pexpect is free, open source, and all that good stuff.
|
||||
http://pexpect.sourceforge.net/
|
||||
|
||||
PEXPECT LICENSE
|
||||
|
||||
This license is approved by the OSI and FSF as GPL-compatible.
|
||||
http://opensource.org/licenses/isc-license.txt
|
||||
|
||||
Copyright (c) 2012, Noah Spurrier <noah@noah.org>
|
||||
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
|
||||
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
|
||||
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
'''
|
||||
|
||||
import sys
|
||||
PY3 = (sys.version_info[0] >= 3)
|
||||
|
||||
from .exceptions import ExceptionPexpect, EOF, TIMEOUT
|
||||
from .utils import split_command_line, which, is_executable_file
|
||||
from .expect import Expecter, searcher_re, searcher_string
|
||||
|
||||
if sys.platform != 'win32':
|
||||
# On Unix, these are available at the top level for backwards compatibility
|
||||
from .pty_spawn import spawn, spawnu
|
||||
from .run import run, runu
|
||||
|
||||
__version__ = '4.8.0'
|
||||
__revision__ = ''
|
||||
__all__ = ['ExceptionPexpect', 'EOF', 'TIMEOUT', 'spawn', 'spawnu', 'run', 'runu',
|
||||
'which', 'split_command_line', '__version__', '__revision__']
|
||||
|
||||
|
||||
|
||||
# vim: set shiftround expandtab tabstop=4 shiftwidth=4 ft=python autoindent :
|
103
plugins/git_clone/pexpect/_async.py
Normal file
103
plugins/git_clone/pexpect/_async.py
Normal file
@ -0,0 +1,103 @@
|
||||
import asyncio
|
||||
import errno
|
||||
import signal
|
||||
|
||||
from pexpect import EOF
|
||||
|
||||
@asyncio.coroutine
|
||||
def expect_async(expecter, timeout=None):
|
||||
# First process data that was previously read - if it maches, we don't need
|
||||
# async stuff.
|
||||
idx = expecter.existing_data()
|
||||
if idx is not None:
|
||||
return idx
|
||||
if not expecter.spawn.async_pw_transport:
|
||||
pw = PatternWaiter()
|
||||
pw.set_expecter(expecter)
|
||||
transport, pw = yield from asyncio.get_event_loop()\
|
||||
.connect_read_pipe(lambda: pw, expecter.spawn)
|
||||
expecter.spawn.async_pw_transport = pw, transport
|
||||
else:
|
||||
pw, transport = expecter.spawn.async_pw_transport
|
||||
pw.set_expecter(expecter)
|
||||
transport.resume_reading()
|
||||
try:
|
||||
return (yield from asyncio.wait_for(pw.fut, timeout))
|
||||
except asyncio.TimeoutError as e:
|
||||
transport.pause_reading()
|
||||
return expecter.timeout(e)
|
||||
|
||||
@asyncio.coroutine
|
||||
def repl_run_command_async(repl, cmdlines, timeout=-1):
|
||||
res = []
|
||||
repl.child.sendline(cmdlines[0])
|
||||
for line in cmdlines[1:]:
|
||||
yield from repl._expect_prompt(timeout=timeout, async_=True)
|
||||
res.append(repl.child.before)
|
||||
repl.child.sendline(line)
|
||||
|
||||
# Command was fully submitted, now wait for the next prompt
|
||||
prompt_idx = yield from repl._expect_prompt(timeout=timeout, async_=True)
|
||||
if prompt_idx == 1:
|
||||
# We got the continuation prompt - command was incomplete
|
||||
repl.child.kill(signal.SIGINT)
|
||||
yield from repl._expect_prompt(timeout=1, async_=True)
|
||||
raise ValueError("Continuation prompt found - input was incomplete:")
|
||||
return u''.join(res + [repl.child.before])
|
||||
|
||||
class PatternWaiter(asyncio.Protocol):
|
||||
transport = None
|
||||
|
||||
def set_expecter(self, expecter):
|
||||
self.expecter = expecter
|
||||
self.fut = asyncio.Future()
|
||||
|
||||
def found(self, result):
|
||||
if not self.fut.done():
|
||||
self.fut.set_result(result)
|
||||
self.transport.pause_reading()
|
||||
|
||||
def error(self, exc):
|
||||
if not self.fut.done():
|
||||
self.fut.set_exception(exc)
|
||||
self.transport.pause_reading()
|
||||
|
||||
def connection_made(self, transport):
|
||||
self.transport = transport
|
||||
|
||||
def data_received(self, data):
|
||||
spawn = self.expecter.spawn
|
||||
s = spawn._decoder.decode(data)
|
||||
spawn._log(s, 'read')
|
||||
|
||||
if self.fut.done():
|
||||
spawn._before.write(s)
|
||||
spawn._buffer.write(s)
|
||||
return
|
||||
|
||||
try:
|
||||
index = self.expecter.new_data(s)
|
||||
if index is not None:
|
||||
# Found a match
|
||||
self.found(index)
|
||||
except Exception as e:
|
||||
self.expecter.errored()
|
||||
self.error(e)
|
||||
|
||||
def eof_received(self):
|
||||
# N.B. If this gets called, async will close the pipe (the spawn object)
|
||||
# for us
|
||||
try:
|
||||
self.expecter.spawn.flag_eof = True
|
||||
index = self.expecter.eof()
|
||||
except EOF as e:
|
||||
self.error(e)
|
||||
else:
|
||||
self.found(index)
|
||||
|
||||
def connection_lost(self, exc):
|
||||
if isinstance(exc, OSError) and exc.errno == errno.EIO:
|
||||
# We may get here without eof_received being called, e.g on Linux
|
||||
self.eof_received()
|
||||
elif exc is not None:
|
||||
self.error(exc)
|
16
plugins/git_clone/pexpect/bashrc.sh
Normal file
16
plugins/git_clone/pexpect/bashrc.sh
Normal file
@ -0,0 +1,16 @@
|
||||
# Different platforms have different names for the systemwide bashrc
|
||||
if [[ -f /etc/bashrc ]]; then
|
||||
source /etc/bashrc
|
||||
fi
|
||||
if [[ -f /etc/bash.bashrc ]]; then
|
||||
source /etc/bash.bashrc
|
||||
fi
|
||||
if [[ -f ~/.bashrc ]]; then
|
||||
source ~/.bashrc
|
||||
fi
|
||||
|
||||
# Reset PS1 so pexpect can find it
|
||||
PS1="$"
|
||||
|
||||
# Unset PROMPT_COMMAND, so that it can't change PS1 to something unexpected.
|
||||
unset PROMPT_COMMAND
|
35
plugins/git_clone/pexpect/exceptions.py
Normal file
35
plugins/git_clone/pexpect/exceptions.py
Normal file
@ -0,0 +1,35 @@
|
||||
"""Exception classes used by Pexpect"""
|
||||
|
||||
import traceback
|
||||
import sys
|
||||
|
||||
class ExceptionPexpect(Exception):
|
||||
'''Base class for all exceptions raised by this module.
|
||||
'''
|
||||
|
||||
def __init__(self, value):
|
||||
super(ExceptionPexpect, self).__init__(value)
|
||||
self.value = value
|
||||
|
||||
def __str__(self):
|
||||
return str(self.value)
|
||||
|
||||
def get_trace(self):
|
||||
'''This returns an abbreviated stack trace with lines that only concern
|
||||
the caller. In other words, the stack trace inside the Pexpect module
|
||||
is not included. '''
|
||||
|
||||
tblist = traceback.extract_tb(sys.exc_info()[2])
|
||||
tblist = [item for item in tblist if ('pexpect/__init__' not in item[0])
|
||||
and ('pexpect/expect' not in item[0])]
|
||||
tblist = traceback.format_list(tblist)
|
||||
return ''.join(tblist)
|
||||
|
||||
|
||||
class EOF(ExceptionPexpect):
|
||||
'''Raised when EOF is read from a child.
|
||||
This usually means the child has exited.'''
|
||||
|
||||
|
||||
class TIMEOUT(ExceptionPexpect):
|
||||
'''Raised when a read time exceeds the timeout. '''
|
371
plugins/git_clone/pexpect/expect.py
Normal file
371
plugins/git_clone/pexpect/expect.py
Normal file
@ -0,0 +1,371 @@
|
||||
import time
|
||||
|
||||
from .exceptions import EOF, TIMEOUT
|
||||
|
||||
class Expecter(object):
|
||||
def __init__(self, spawn, searcher, searchwindowsize=-1):
|
||||
self.spawn = spawn
|
||||
self.searcher = searcher
|
||||
# A value of -1 means to use the figure from spawn, which should
|
||||
# be None or a positive number.
|
||||
if searchwindowsize == -1:
|
||||
searchwindowsize = spawn.searchwindowsize
|
||||
self.searchwindowsize = searchwindowsize
|
||||
self.lookback = None
|
||||
if hasattr(searcher, 'longest_string'):
|
||||
self.lookback = searcher.longest_string
|
||||
|
||||
def do_search(self, window, freshlen):
|
||||
spawn = self.spawn
|
||||
searcher = self.searcher
|
||||
if freshlen > len(window):
|
||||
freshlen = len(window)
|
||||
index = searcher.search(window, freshlen, self.searchwindowsize)
|
||||
if index >= 0:
|
||||
spawn._buffer = spawn.buffer_type()
|
||||
spawn._buffer.write(window[searcher.end:])
|
||||
spawn.before = spawn._before.getvalue()[
|
||||
0:-(len(window) - searcher.start)]
|
||||
spawn._before = spawn.buffer_type()
|
||||
spawn._before.write(window[searcher.end:])
|
||||
spawn.after = window[searcher.start:searcher.end]
|
||||
spawn.match = searcher.match
|
||||
spawn.match_index = index
|
||||
# Found a match
|
||||
return index
|
||||
elif self.searchwindowsize or self.lookback:
|
||||
maintain = self.searchwindowsize or self.lookback
|
||||
if spawn._buffer.tell() > maintain:
|
||||
spawn._buffer = spawn.buffer_type()
|
||||
spawn._buffer.write(window[-maintain:])
|
||||
|
||||
def existing_data(self):
|
||||
# First call from a new call to expect_loop or expect_async.
|
||||
# self.searchwindowsize may have changed.
|
||||
# Treat all data as fresh.
|
||||
spawn = self.spawn
|
||||
before_len = spawn._before.tell()
|
||||
buf_len = spawn._buffer.tell()
|
||||
freshlen = before_len
|
||||
if before_len > buf_len:
|
||||
if not self.searchwindowsize:
|
||||
spawn._buffer = spawn.buffer_type()
|
||||
window = spawn._before.getvalue()
|
||||
spawn._buffer.write(window)
|
||||
elif buf_len < self.searchwindowsize:
|
||||
spawn._buffer = spawn.buffer_type()
|
||||
spawn._before.seek(
|
||||
max(0, before_len - self.searchwindowsize))
|
||||
window = spawn._before.read()
|
||||
spawn._buffer.write(window)
|
||||
else:
|
||||
spawn._buffer.seek(max(0, buf_len - self.searchwindowsize))
|
||||
window = spawn._buffer.read()
|
||||
else:
|
||||
if self.searchwindowsize:
|
||||
spawn._buffer.seek(max(0, buf_len - self.searchwindowsize))
|
||||
window = spawn._buffer.read()
|
||||
else:
|
||||
window = spawn._buffer.getvalue()
|
||||
return self.do_search(window, freshlen)
|
||||
|
||||
def new_data(self, data):
|
||||
# A subsequent call, after a call to existing_data.
|
||||
spawn = self.spawn
|
||||
freshlen = len(data)
|
||||
spawn._before.write(data)
|
||||
if not self.searchwindowsize:
|
||||
if self.lookback:
|
||||
# search lookback + new data.
|
||||
old_len = spawn._buffer.tell()
|
||||
spawn._buffer.write(data)
|
||||
spawn._buffer.seek(max(0, old_len - self.lookback))
|
||||
window = spawn._buffer.read()
|
||||
else:
|
||||
# copy the whole buffer (really slow for large datasets).
|
||||
spawn._buffer.write(data)
|
||||
window = spawn.buffer
|
||||
else:
|
||||
if len(data) >= self.searchwindowsize or not spawn._buffer.tell():
|
||||
window = data[-self.searchwindowsize:]
|
||||
spawn._buffer = spawn.buffer_type()
|
||||
spawn._buffer.write(window[-self.searchwindowsize:])
|
||||
else:
|
||||
spawn._buffer.write(data)
|
||||
new_len = spawn._buffer.tell()
|
||||
spawn._buffer.seek(max(0, new_len - self.searchwindowsize))
|
||||
window = spawn._buffer.read()
|
||||
return self.do_search(window, freshlen)
|
||||
|
||||
def eof(self, err=None):
|
||||
spawn = self.spawn
|
||||
|
||||
spawn.before = spawn._before.getvalue()
|
||||
spawn._buffer = spawn.buffer_type()
|
||||
spawn._before = spawn.buffer_type()
|
||||
spawn.after = EOF
|
||||
index = self.searcher.eof_index
|
||||
if index >= 0:
|
||||
spawn.match = EOF
|
||||
spawn.match_index = index
|
||||
return index
|
||||
else:
|
||||
spawn.match = None
|
||||
spawn.match_index = None
|
||||
msg = str(spawn)
|
||||
msg += '\nsearcher: %s' % self.searcher
|
||||
if err is not None:
|
||||
msg = str(err) + '\n' + msg
|
||||
|
||||
exc = EOF(msg)
|
||||
exc.__cause__ = None # in Python 3.x we can use "raise exc from None"
|
||||
raise exc
|
||||
|
||||
def timeout(self, err=None):
|
||||
spawn = self.spawn
|
||||
|
||||
spawn.before = spawn._before.getvalue()
|
||||
spawn.after = TIMEOUT
|
||||
index = self.searcher.timeout_index
|
||||
if index >= 0:
|
||||
spawn.match = TIMEOUT
|
||||
spawn.match_index = index
|
||||
return index
|
||||
else:
|
||||
spawn.match = None
|
||||
spawn.match_index = None
|
||||
msg = str(spawn)
|
||||
msg += '\nsearcher: %s' % self.searcher
|
||||
if err is not None:
|
||||
msg = str(err) + '\n' + msg
|
||||
|
||||
exc = TIMEOUT(msg)
|
||||
exc.__cause__ = None # in Python 3.x we can use "raise exc from None"
|
||||
raise exc
|
||||
|
||||
def errored(self):
|
||||
spawn = self.spawn
|
||||
spawn.before = spawn._before.getvalue()
|
||||
spawn.after = None
|
||||
spawn.match = None
|
||||
spawn.match_index = None
|
||||
|
||||
def expect_loop(self, timeout=-1):
|
||||
"""Blocking expect"""
|
||||
spawn = self.spawn
|
||||
|
||||
if timeout is not None:
|
||||
end_time = time.time() + timeout
|
||||
|
||||
try:
|
||||
idx = self.existing_data()
|
||||
if idx is not None:
|
||||
return idx
|
||||
while True:
|
||||
# No match at this point
|
||||
if (timeout is not None) and (timeout < 0):
|
||||
return self.timeout()
|
||||
# Still have time left, so read more data
|
||||
incoming = spawn.read_nonblocking(spawn.maxread, timeout)
|
||||
if self.spawn.delayafterread is not None:
|
||||
time.sleep(self.spawn.delayafterread)
|
||||
idx = self.new_data(incoming)
|
||||
# Keep reading until exception or return.
|
||||
if idx is not None:
|
||||
return idx
|
||||
if timeout is not None:
|
||||
timeout = end_time - time.time()
|
||||
except EOF as e:
|
||||
return self.eof(e)
|
||||
except TIMEOUT as e:
|
||||
return self.timeout(e)
|
||||
except:
|
||||
self.errored()
|
||||
raise
|
||||
|
||||
|
||||
class searcher_string(object):
|
||||
'''This is a plain string search helper for the spawn.expect_any() method.
|
||||
This helper class is for speed. For more powerful regex patterns
|
||||
see the helper class, searcher_re.
|
||||
|
||||
Attributes:
|
||||
|
||||
eof_index - index of EOF, or -1
|
||||
timeout_index - index of TIMEOUT, or -1
|
||||
|
||||
After a successful match by the search() method the following attributes
|
||||
are available:
|
||||
|
||||
start - index into the buffer, first byte of match
|
||||
end - index into the buffer, first byte after match
|
||||
match - the matching string itself
|
||||
|
||||
'''
|
||||
|
||||
def __init__(self, strings):
|
||||
'''This creates an instance of searcher_string. This argument 'strings'
|
||||
may be a list; a sequence of strings; or the EOF or TIMEOUT types. '''
|
||||
|
||||
self.eof_index = -1
|
||||
self.timeout_index = -1
|
||||
self._strings = []
|
||||
self.longest_string = 0
|
||||
for n, s in enumerate(strings):
|
||||
if s is EOF:
|
||||
self.eof_index = n
|
||||
continue
|
||||
if s is TIMEOUT:
|
||||
self.timeout_index = n
|
||||
continue
|
||||
self._strings.append((n, s))
|
||||
if len(s) > self.longest_string:
|
||||
self.longest_string = len(s)
|
||||
|
||||
def __str__(self):
|
||||
'''This returns a human-readable string that represents the state of
|
||||
the object.'''
|
||||
|
||||
ss = [(ns[0], ' %d: %r' % ns) for ns in self._strings]
|
||||
ss.append((-1, 'searcher_string:'))
|
||||
if self.eof_index >= 0:
|
||||
ss.append((self.eof_index, ' %d: EOF' % self.eof_index))
|
||||
if self.timeout_index >= 0:
|
||||
ss.append((self.timeout_index,
|
||||
' %d: TIMEOUT' % self.timeout_index))
|
||||
ss.sort()
|
||||
ss = list(zip(*ss))[1]
|
||||
return '\n'.join(ss)
|
||||
|
||||
def search(self, buffer, freshlen, searchwindowsize=None):
|
||||
'''This searches 'buffer' for the first occurrence of one of the search
|
||||
strings. 'freshlen' must indicate the number of bytes at the end of
|
||||
'buffer' which have not been searched before. It helps to avoid
|
||||
searching the same, possibly big, buffer over and over again.
|
||||
|
||||
See class spawn for the 'searchwindowsize' argument.
|
||||
|
||||
If there is a match this returns the index of that string, and sets
|
||||
'start', 'end' and 'match'. Otherwise, this returns -1. '''
|
||||
|
||||
first_match = None
|
||||
|
||||
# 'freshlen' helps a lot here. Further optimizations could
|
||||
# possibly include:
|
||||
#
|
||||
# using something like the Boyer-Moore Fast String Searching
|
||||
# Algorithm; pre-compiling the search through a list of
|
||||
# strings into something that can scan the input once to
|
||||
# search for all N strings; realize that if we search for
|
||||
# ['bar', 'baz'] and the input is '...foo' we need not bother
|
||||
# rescanning until we've read three more bytes.
|
||||
#
|
||||
# Sadly, I don't know enough about this interesting topic. /grahn
|
||||
|
||||
for index, s in self._strings:
|
||||
if searchwindowsize is None:
|
||||
# the match, if any, can only be in the fresh data,
|
||||
# or at the very end of the old data
|
||||
offset = -(freshlen + len(s))
|
||||
else:
|
||||
# better obey searchwindowsize
|
||||
offset = -searchwindowsize
|
||||
n = buffer.find(s, offset)
|
||||
if n >= 0 and (first_match is None or n < first_match):
|
||||
first_match = n
|
||||
best_index, best_match = index, s
|
||||
if first_match is None:
|
||||
return -1
|
||||
self.match = best_match
|
||||
self.start = first_match
|
||||
self.end = self.start + len(self.match)
|
||||
return best_index
|
||||
|
||||
|
||||
class searcher_re(object):
|
||||
'''This is regular expression string search helper for the
|
||||
spawn.expect_any() method. This helper class is for powerful
|
||||
pattern matching. For speed, see the helper class, searcher_string.
|
||||
|
||||
Attributes:
|
||||
|
||||
eof_index - index of EOF, or -1
|
||||
timeout_index - index of TIMEOUT, or -1
|
||||
|
||||
After a successful match by the search() method the following attributes
|
||||
are available:
|
||||
|
||||
start - index into the buffer, first byte of match
|
||||
end - index into the buffer, first byte after match
|
||||
match - the re.match object returned by a successful re.search
|
||||
|
||||
'''
|
||||
|
||||
def __init__(self, patterns):
|
||||
'''This creates an instance that searches for 'patterns' Where
|
||||
'patterns' may be a list or other sequence of compiled regular
|
||||
expressions, or the EOF or TIMEOUT types.'''
|
||||
|
||||
self.eof_index = -1
|
||||
self.timeout_index = -1
|
||||
self._searches = []
|
||||
for n, s in enumerate(patterns):
|
||||
if s is EOF:
|
||||
self.eof_index = n
|
||||
continue
|
||||
if s is TIMEOUT:
|
||||
self.timeout_index = n
|
||||
continue
|
||||
self._searches.append((n, s))
|
||||
|
||||
def __str__(self):
|
||||
'''This returns a human-readable string that represents the state of
|
||||
the object.'''
|
||||
|
||||
#ss = [(n, ' %d: re.compile("%s")' %
|
||||
# (n, repr(s.pattern))) for n, s in self._searches]
|
||||
ss = list()
|
||||
for n, s in self._searches:
|
||||
ss.append((n, ' %d: re.compile(%r)' % (n, s.pattern)))
|
||||
ss.append((-1, 'searcher_re:'))
|
||||
if self.eof_index >= 0:
|
||||
ss.append((self.eof_index, ' %d: EOF' % self.eof_index))
|
||||
if self.timeout_index >= 0:
|
||||
ss.append((self.timeout_index, ' %d: TIMEOUT' %
|
||||
self.timeout_index))
|
||||
ss.sort()
|
||||
ss = list(zip(*ss))[1]
|
||||
return '\n'.join(ss)
|
||||
|
||||
def search(self, buffer, freshlen, searchwindowsize=None):
|
||||
'''This searches 'buffer' for the first occurrence of one of the regular
|
||||
expressions. 'freshlen' must indicate the number of bytes at the end of
|
||||
'buffer' which have not been searched before.
|
||||
|
||||
See class spawn for the 'searchwindowsize' argument.
|
||||
|
||||
If there is a match this returns the index of that string, and sets
|
||||
'start', 'end' and 'match'. Otherwise, returns -1.'''
|
||||
|
||||
first_match = None
|
||||
# 'freshlen' doesn't help here -- we cannot predict the
|
||||
# length of a match, and the re module provides no help.
|
||||
if searchwindowsize is None:
|
||||
searchstart = 0
|
||||
else:
|
||||
searchstart = max(0, len(buffer) - searchwindowsize)
|
||||
for index, s in self._searches:
|
||||
match = s.search(buffer, searchstart)
|
||||
if match is None:
|
||||
continue
|
||||
n = match.start()
|
||||
if first_match is None or n < first_match:
|
||||
first_match = n
|
||||
the_match = match
|
||||
best_index = index
|
||||
if first_match is None:
|
||||
return -1
|
||||
self.start = first_match
|
||||
self.match = the_match
|
||||
self.end = self.match.end()
|
||||
return best_index
|
148
plugins/git_clone/pexpect/fdpexpect.py
Normal file
148
plugins/git_clone/pexpect/fdpexpect.py
Normal file
@ -0,0 +1,148 @@
|
||||
'''This is like pexpect, but it will work with any file descriptor that you
|
||||
pass it. You are responsible for opening and close the file descriptor.
|
||||
This allows you to use Pexpect with sockets and named pipes (FIFOs).
|
||||
|
||||
PEXPECT LICENSE
|
||||
|
||||
This license is approved by the OSI and FSF as GPL-compatible.
|
||||
http://opensource.org/licenses/isc-license.txt
|
||||
|
||||
Copyright (c) 2012, Noah Spurrier <noah@noah.org>
|
||||
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
|
||||
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
|
||||
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
'''
|
||||
|
||||
from .spawnbase import SpawnBase
|
||||
from .exceptions import ExceptionPexpect, TIMEOUT
|
||||
from .utils import select_ignore_interrupts, poll_ignore_interrupts
|
||||
import os
|
||||
|
||||
__all__ = ['fdspawn']
|
||||
|
||||
class fdspawn(SpawnBase):
|
||||
'''This is like pexpect.spawn but allows you to supply your own open file
|
||||
descriptor. For example, you could use it to read through a file looking
|
||||
for patterns, or to control a modem or serial device. '''
|
||||
|
||||
def __init__ (self, fd, args=None, timeout=30, maxread=2000, searchwindowsize=None,
|
||||
logfile=None, encoding=None, codec_errors='strict', use_poll=False):
|
||||
'''This takes a file descriptor (an int) or an object that support the
|
||||
fileno() method (returning an int). All Python file-like objects
|
||||
support fileno(). '''
|
||||
|
||||
if type(fd) != type(0) and hasattr(fd, 'fileno'):
|
||||
fd = fd.fileno()
|
||||
|
||||
if type(fd) != type(0):
|
||||
raise ExceptionPexpect('The fd argument is not an int. If this is a command string then maybe you want to use pexpect.spawn.')
|
||||
|
||||
try: # make sure fd is a valid file descriptor
|
||||
os.fstat(fd)
|
||||
except OSError:
|
||||
raise ExceptionPexpect('The fd argument is not a valid file descriptor.')
|
||||
|
||||
self.args = None
|
||||
self.command = None
|
||||
SpawnBase.__init__(self, timeout, maxread, searchwindowsize, logfile,
|
||||
encoding=encoding, codec_errors=codec_errors)
|
||||
self.child_fd = fd
|
||||
self.own_fd = False
|
||||
self.closed = False
|
||||
self.name = '<file descriptor %d>' % fd
|
||||
self.use_poll = use_poll
|
||||
|
||||
def close (self):
|
||||
"""Close the file descriptor.
|
||||
|
||||
Calling this method a second time does nothing, but if the file
|
||||
descriptor was closed elsewhere, :class:`OSError` will be raised.
|
||||
"""
|
||||
if self.child_fd == -1:
|
||||
return
|
||||
|
||||
self.flush()
|
||||
os.close(self.child_fd)
|
||||
self.child_fd = -1
|
||||
self.closed = True
|
||||
|
||||
def isalive (self):
|
||||
'''This checks if the file descriptor is still valid. If :func:`os.fstat`
|
||||
does not raise an exception then we assume it is alive. '''
|
||||
|
||||
if self.child_fd == -1:
|
||||
return False
|
||||
try:
|
||||
os.fstat(self.child_fd)
|
||||
return True
|
||||
except:
|
||||
return False
|
||||
|
||||
def terminate (self, force=False): # pragma: no cover
|
||||
'''Deprecated and invalid. Just raises an exception.'''
|
||||
raise ExceptionPexpect('This method is not valid for file descriptors.')
|
||||
|
||||
# These four methods are left around for backwards compatibility, but not
|
||||
# documented as part of fdpexpect. You're encouraged to use os.write
|
||||
# directly.
|
||||
def send(self, s):
|
||||
"Write to fd, return number of bytes written"
|
||||
s = self._coerce_send_string(s)
|
||||
self._log(s, 'send')
|
||||
|
||||
b = self._encoder.encode(s, final=False)
|
||||
return os.write(self.child_fd, b)
|
||||
|
||||
def sendline(self, s):
|
||||
"Write to fd with trailing newline, return number of bytes written"
|
||||
s = self._coerce_send_string(s)
|
||||
return self.send(s + self.linesep)
|
||||
|
||||
def write(self, s):
|
||||
"Write to fd, return None"
|
||||
self.send(s)
|
||||
|
||||
def writelines(self, sequence):
|
||||
"Call self.write() for each item in sequence"
|
||||
for s in sequence:
|
||||
self.write(s)
|
||||
|
||||
def read_nonblocking(self, size=1, timeout=-1):
|
||||
"""
|
||||
Read from the file descriptor and return the result as a string.
|
||||
|
||||
The read_nonblocking method of :class:`SpawnBase` assumes that a call
|
||||
to os.read will not block (timeout parameter is ignored). This is not
|
||||
the case for POSIX file-like objects such as sockets and serial ports.
|
||||
|
||||
Use :func:`select.select`, timeout is implemented conditionally for
|
||||
POSIX systems.
|
||||
|
||||
:param int size: Read at most *size* bytes.
|
||||
:param int timeout: Wait timeout seconds for file descriptor to be
|
||||
ready to read. When -1 (default), use self.timeout. When 0, poll.
|
||||
:return: String containing the bytes read
|
||||
"""
|
||||
if os.name == 'posix':
|
||||
if timeout == -1:
|
||||
timeout = self.timeout
|
||||
rlist = [self.child_fd]
|
||||
wlist = []
|
||||
xlist = []
|
||||
if self.use_poll:
|
||||
rlist = poll_ignore_interrupts(rlist, timeout)
|
||||
else:
|
||||
rlist, wlist, xlist = select_ignore_interrupts(
|
||||
rlist, wlist, xlist, timeout
|
||||
)
|
||||
if self.child_fd not in rlist:
|
||||
raise TIMEOUT('Timeout exceeded.')
|
||||
return super(fdspawn, self).read_nonblocking(size)
|
188
plugins/git_clone/pexpect/popen_spawn.py
Normal file
188
plugins/git_clone/pexpect/popen_spawn.py
Normal file
@ -0,0 +1,188 @@
|
||||
"""Provides an interface like pexpect.spawn interface using subprocess.Popen
|
||||
"""
|
||||
import os
|
||||
import threading
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
import signal
|
||||
import shlex
|
||||
|
||||
try:
|
||||
from queue import Queue, Empty # Python 3
|
||||
except ImportError:
|
||||
from Queue import Queue, Empty # Python 2
|
||||
|
||||
from .spawnbase import SpawnBase, PY3
|
||||
from .exceptions import EOF
|
||||
from .utils import string_types
|
||||
|
||||
class PopenSpawn(SpawnBase):
|
||||
def __init__(self, cmd, timeout=30, maxread=2000, searchwindowsize=None,
|
||||
logfile=None, cwd=None, env=None, encoding=None,
|
||||
codec_errors='strict', preexec_fn=None):
|
||||
super(PopenSpawn, self).__init__(timeout=timeout, maxread=maxread,
|
||||
searchwindowsize=searchwindowsize, logfile=logfile,
|
||||
encoding=encoding, codec_errors=codec_errors)
|
||||
|
||||
# Note that `SpawnBase` initializes `self.crlf` to `\r\n`
|
||||
# because the default behaviour for a PTY is to convert
|
||||
# incoming LF to `\r\n` (see the `onlcr` flag and
|
||||
# https://stackoverflow.com/a/35887657/5397009). Here we set
|
||||
# it to `os.linesep` because that is what the spawned
|
||||
# application outputs by default and `popen` doesn't translate
|
||||
# anything.
|
||||
if encoding is None:
|
||||
self.crlf = os.linesep.encode ("ascii")
|
||||
else:
|
||||
self.crlf = self.string_type (os.linesep)
|
||||
|
||||
kwargs = dict(bufsize=0, stdin=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT, stdout=subprocess.PIPE,
|
||||
cwd=cwd, preexec_fn=preexec_fn, env=env)
|
||||
|
||||
if sys.platform == 'win32':
|
||||
startupinfo = subprocess.STARTUPINFO()
|
||||
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
|
||||
kwargs['startupinfo'] = startupinfo
|
||||
kwargs['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
|
||||
|
||||
if isinstance(cmd, string_types) and sys.platform != 'win32':
|
||||
cmd = shlex.split(cmd, posix=os.name == 'posix')
|
||||
|
||||
self.proc = subprocess.Popen(cmd, **kwargs)
|
||||
self.pid = self.proc.pid
|
||||
self.closed = False
|
||||
self._buf = self.string_type()
|
||||
|
||||
self._read_queue = Queue()
|
||||
self._read_thread = threading.Thread(target=self._read_incoming)
|
||||
self._read_thread.setDaemon(True)
|
||||
self._read_thread.start()
|
||||
|
||||
_read_reached_eof = False
|
||||
|
||||
def read_nonblocking(self, size, timeout):
|
||||
buf = self._buf
|
||||
if self._read_reached_eof:
|
||||
# We have already finished reading. Use up any buffered data,
|
||||
# then raise EOF
|
||||
if buf:
|
||||
self._buf = buf[size:]
|
||||
return buf[:size]
|
||||
else:
|
||||
self.flag_eof = True
|
||||
raise EOF('End Of File (EOF).')
|
||||
|
||||
if timeout == -1:
|
||||
timeout = self.timeout
|
||||
elif timeout is None:
|
||||
timeout = 1e6
|
||||
|
||||
t0 = time.time()
|
||||
while (time.time() - t0) < timeout and size and len(buf) < size:
|
||||
try:
|
||||
incoming = self._read_queue.get_nowait()
|
||||
except Empty:
|
||||
break
|
||||
else:
|
||||
if incoming is None:
|
||||
self._read_reached_eof = True
|
||||
break
|
||||
|
||||
buf += self._decoder.decode(incoming, final=False)
|
||||
|
||||
r, self._buf = buf[:size], buf[size:]
|
||||
|
||||
self._log(r, 'read')
|
||||
return r
|
||||
|
||||
def _read_incoming(self):
|
||||
"""Run in a thread to move output from a pipe to a queue."""
|
||||
fileno = self.proc.stdout.fileno()
|
||||
while 1:
|
||||
buf = b''
|
||||
try:
|
||||
buf = os.read(fileno, 1024)
|
||||
except OSError as e:
|
||||
self._log(e, 'read')
|
||||
|
||||
if not buf:
|
||||
# This indicates we have reached EOF
|
||||
self._read_queue.put(None)
|
||||
return
|
||||
|
||||
self._read_queue.put(buf)
|
||||
|
||||
def write(self, s):
|
||||
'''This is similar to send() except that there is no return value.
|
||||
'''
|
||||
self.send(s)
|
||||
|
||||
def writelines(self, sequence):
|
||||
'''This calls write() for each element in the sequence.
|
||||
|
||||
The sequence can be any iterable object producing strings, typically a
|
||||
list of strings. This does not add line separators. There is no return
|
||||
value.
|
||||
'''
|
||||
for s in sequence:
|
||||
self.send(s)
|
||||
|
||||
def send(self, s):
|
||||
'''Send data to the subprocess' stdin.
|
||||
|
||||
Returns the number of bytes written.
|
||||
'''
|
||||
s = self._coerce_send_string(s)
|
||||
self._log(s, 'send')
|
||||
|
||||
b = self._encoder.encode(s, final=False)
|
||||
if PY3:
|
||||
return self.proc.stdin.write(b)
|
||||
else:
|
||||
# On Python 2, .write() returns None, so we return the length of
|
||||
# bytes written ourselves. This assumes they all got written.
|
||||
self.proc.stdin.write(b)
|
||||
return len(b)
|
||||
|
||||
def sendline(self, s=''):
|
||||
'''Wraps send(), sending string ``s`` to child process, with os.linesep
|
||||
automatically appended. Returns number of bytes written. '''
|
||||
|
||||
n = self.send(s)
|
||||
return n + self.send(self.linesep)
|
||||
|
||||
def wait(self):
|
||||
'''Wait for the subprocess to finish.
|
||||
|
||||
Returns the exit code.
|
||||
'''
|
||||
status = self.proc.wait()
|
||||
if status >= 0:
|
||||
self.exitstatus = status
|
||||
self.signalstatus = None
|
||||
else:
|
||||
self.exitstatus = None
|
||||
self.signalstatus = -status
|
||||
self.terminated = True
|
||||
return status
|
||||
|
||||
def kill(self, sig):
|
||||
'''Sends a Unix signal to the subprocess.
|
||||
|
||||
Use constants from the :mod:`signal` module to specify which signal.
|
||||
'''
|
||||
if sys.platform == 'win32':
|
||||
if sig in [signal.SIGINT, signal.CTRL_C_EVENT]:
|
||||
sig = signal.CTRL_C_EVENT
|
||||
elif sig in [signal.SIGBREAK, signal.CTRL_BREAK_EVENT]:
|
||||
sig = signal.CTRL_BREAK_EVENT
|
||||
else:
|
||||
sig = signal.SIGTERM
|
||||
|
||||
os.kill(self.proc.pid, sig)
|
||||
|
||||
def sendeof(self):
|
||||
'''Closes the stdin pipe from the writing end.'''
|
||||
self.proc.stdin.close()
|
860
plugins/git_clone/pexpect/pty_spawn.py
Normal file
860
plugins/git_clone/pexpect/pty_spawn.py
Normal file
@ -0,0 +1,860 @@
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import pty
|
||||
import tty
|
||||
import errno
|
||||
import signal
|
||||
from contextlib import contextmanager
|
||||
|
||||
from .ptyprocess import ptyprocess
|
||||
from .ptyprocess.ptyprocess import use_native_pty_fork
|
||||
|
||||
from .exceptions import ExceptionPexpect, EOF, TIMEOUT
|
||||
from .spawnbase import SpawnBase
|
||||
from .utils import (
|
||||
which, split_command_line, select_ignore_interrupts, poll_ignore_interrupts
|
||||
)
|
||||
|
||||
@contextmanager
|
||||
def _wrap_ptyprocess_err():
|
||||
"""Turn ptyprocess errors into our own ExceptionPexpect errors"""
|
||||
try:
|
||||
yield
|
||||
except ptyprocess.PtyProcessError as e:
|
||||
raise ExceptionPexpect(*e.args)
|
||||
|
||||
PY3 = (sys.version_info[0] >= 3)
|
||||
|
||||
class spawn(SpawnBase):
|
||||
'''This is the main class interface for Pexpect. Use this class to start
|
||||
and control child applications. '''
|
||||
|
||||
# This is purely informational now - changing it has no effect
|
||||
use_native_pty_fork = use_native_pty_fork
|
||||
|
||||
def __init__(self, command, args=[], timeout=30, maxread=2000,
|
||||
searchwindowsize=None, logfile=None, cwd=None, env=None,
|
||||
ignore_sighup=False, echo=True, preexec_fn=None,
|
||||
encoding=None, codec_errors='strict', dimensions=None,
|
||||
use_poll=False):
|
||||
'''This is the constructor. The command parameter may be a string that
|
||||
includes a command and any arguments to the command. For example::
|
||||
|
||||
child = pexpect.spawn('/usr/bin/ftp')
|
||||
child = pexpect.spawn('/usr/bin/ssh user@example.com')
|
||||
child = pexpect.spawn('ls -latr /tmp')
|
||||
|
||||
You may also construct it with a list of arguments like so::
|
||||
|
||||
child = pexpect.spawn('/usr/bin/ftp', [])
|
||||
child = pexpect.spawn('/usr/bin/ssh', ['user@example.com'])
|
||||
child = pexpect.spawn('ls', ['-latr', '/tmp'])
|
||||
|
||||
After this the child application will be created and will be ready to
|
||||
talk to. For normal use, see expect() and send() and sendline().
|
||||
|
||||
Remember that Pexpect does NOT interpret shell meta characters such as
|
||||
redirect, pipe, or wild cards (``>``, ``|``, or ``*``). This is a
|
||||
common mistake. If you want to run a command and pipe it through
|
||||
another command then you must also start a shell. For example::
|
||||
|
||||
child = pexpect.spawn('/bin/bash -c "ls -l | grep LOG > logs.txt"')
|
||||
child.expect(pexpect.EOF)
|
||||
|
||||
The second form of spawn (where you pass a list of arguments) is useful
|
||||
in situations where you wish to spawn a command and pass it its own
|
||||
argument list. This can make syntax more clear. For example, the
|
||||
following is equivalent to the previous example::
|
||||
|
||||
shell_cmd = 'ls -l | grep LOG > logs.txt'
|
||||
child = pexpect.spawn('/bin/bash', ['-c', shell_cmd])
|
||||
child.expect(pexpect.EOF)
|
||||
|
||||
The maxread attribute sets the read buffer size. This is maximum number
|
||||
of bytes that Pexpect will try to read from a TTY at one time. Setting
|
||||
the maxread size to 1 will turn off buffering. Setting the maxread
|
||||
value higher may help performance in cases where large amounts of
|
||||
output are read back from the child. This feature is useful in
|
||||
conjunction with searchwindowsize.
|
||||
|
||||
When the keyword argument *searchwindowsize* is None (default), the
|
||||
full buffer is searched at each iteration of receiving incoming data.
|
||||
The default number of bytes scanned at each iteration is very large
|
||||
and may be reduced to collaterally reduce search cost. After
|
||||
:meth:`~.expect` returns, the full buffer attribute remains up to
|
||||
size *maxread* irrespective of *searchwindowsize* value.
|
||||
|
||||
When the keyword argument ``timeout`` is specified as a number,
|
||||
(default: *30*), then :class:`TIMEOUT` will be raised after the value
|
||||
specified has elapsed, in seconds, for any of the :meth:`~.expect`
|
||||
family of method calls. When None, TIMEOUT will not be raised, and
|
||||
:meth:`~.expect` may block indefinitely until match.
|
||||
|
||||
|
||||
The logfile member turns on or off logging. All input and output will
|
||||
be copied to the given file object. Set logfile to None to stop
|
||||
logging. This is the default. Set logfile to sys.stdout to echo
|
||||
everything to standard output. The logfile is flushed after each write.
|
||||
|
||||
Example log input and output to a file::
|
||||
|
||||
child = pexpect.spawn('some_command')
|
||||
fout = open('mylog.txt','wb')
|
||||
child.logfile = fout
|
||||
|
||||
Example log to stdout::
|
||||
|
||||
# In Python 2:
|
||||
child = pexpect.spawn('some_command')
|
||||
child.logfile = sys.stdout
|
||||
|
||||
# In Python 3, we'll use the ``encoding`` argument to decode data
|
||||
# from the subprocess and handle it as unicode:
|
||||
child = pexpect.spawn('some_command', encoding='utf-8')
|
||||
child.logfile = sys.stdout
|
||||
|
||||
The logfile_read and logfile_send members can be used to separately log
|
||||
the input from the child and output sent to the child. Sometimes you
|
||||
don't want to see everything you write to the child. You only want to
|
||||
log what the child sends back. For example::
|
||||
|
||||
child = pexpect.spawn('some_command')
|
||||
child.logfile_read = sys.stdout
|
||||
|
||||
You will need to pass an encoding to spawn in the above code if you are
|
||||
using Python 3.
|
||||
|
||||
To separately log output sent to the child use logfile_send::
|
||||
|
||||
child.logfile_send = fout
|
||||
|
||||
If ``ignore_sighup`` is True, the child process will ignore SIGHUP
|
||||
signals. The default is False from Pexpect 4.0, meaning that SIGHUP
|
||||
will be handled normally by the child.
|
||||
|
||||
The delaybeforesend helps overcome a weird behavior that many users
|
||||
were experiencing. The typical problem was that a user would expect() a
|
||||
"Password:" prompt and then immediately call sendline() to send the
|
||||
password. The user would then see that their password was echoed back
|
||||
to them. Passwords don't normally echo. The problem is caused by the
|
||||
fact that most applications print out the "Password" prompt and then
|
||||
turn off stdin echo, but if you send your password before the
|
||||
application turned off echo, then you get your password echoed.
|
||||
Normally this wouldn't be a problem when interacting with a human at a
|
||||
real keyboard. If you introduce a slight delay just before writing then
|
||||
this seems to clear up the problem. This was such a common problem for
|
||||
many users that I decided that the default pexpect behavior should be
|
||||
to sleep just before writing to the child application. 1/20th of a
|
||||
second (50 ms) seems to be enough to clear up the problem. You can set
|
||||
delaybeforesend to None to return to the old behavior.
|
||||
|
||||
Note that spawn is clever about finding commands on your path.
|
||||
It uses the same logic that "which" uses to find executables.
|
||||
|
||||
If you wish to get the exit status of the child you must call the
|
||||
close() method. The exit or signal status of the child will be stored
|
||||
in self.exitstatus or self.signalstatus. If the child exited normally
|
||||
then exitstatus will store the exit return code and signalstatus will
|
||||
be None. If the child was terminated abnormally with a signal then
|
||||
signalstatus will store the signal value and exitstatus will be None::
|
||||
|
||||
child = pexpect.spawn('some_command')
|
||||
child.close()
|
||||
print(child.exitstatus, child.signalstatus)
|
||||
|
||||
If you need more detail you can also read the self.status member which
|
||||
stores the status returned by os.waitpid. You can interpret this using
|
||||
os.WIFEXITED/os.WEXITSTATUS or os.WIFSIGNALED/os.TERMSIG.
|
||||
|
||||
The echo attribute may be set to False to disable echoing of input.
|
||||
As a pseudo-terminal, all input echoed by the "keyboard" (send()
|
||||
or sendline()) will be repeated to output. For many cases, it is
|
||||
not desirable to have echo enabled, and it may be later disabled
|
||||
using setecho(False) followed by waitnoecho(). However, for some
|
||||
platforms such as Solaris, this is not possible, and should be
|
||||
disabled immediately on spawn.
|
||||
|
||||
If preexec_fn is given, it will be called in the child process before
|
||||
launching the given command. This is useful to e.g. reset inherited
|
||||
signal handlers.
|
||||
|
||||
The dimensions attribute specifies the size of the pseudo-terminal as
|
||||
seen by the subprocess, and is specified as a two-entry tuple (rows,
|
||||
columns). If this is unspecified, the defaults in ptyprocess will apply.
|
||||
|
||||
The use_poll attribute enables using select.poll() over select.select()
|
||||
for socket handling. This is handy if your system could have > 1024 fds
|
||||
'''
|
||||
super(spawn, self).__init__(timeout=timeout, maxread=maxread, searchwindowsize=searchwindowsize,
|
||||
logfile=logfile, encoding=encoding, codec_errors=codec_errors)
|
||||
self.STDIN_FILENO = pty.STDIN_FILENO
|
||||
self.STDOUT_FILENO = pty.STDOUT_FILENO
|
||||
self.STDERR_FILENO = pty.STDERR_FILENO
|
||||
self.str_last_chars = 100
|
||||
self.cwd = cwd
|
||||
self.env = env
|
||||
self.echo = echo
|
||||
self.ignore_sighup = ignore_sighup
|
||||
self.__irix_hack = sys.platform.lower().startswith('irix')
|
||||
if command is None:
|
||||
self.command = None
|
||||
self.args = None
|
||||
self.name = '<pexpect factory incomplete>'
|
||||
else:
|
||||
self._spawn(command, args, preexec_fn, dimensions)
|
||||
self.use_poll = use_poll
|
||||
|
||||
def __str__(self):
|
||||
'''This returns a human-readable string that represents the state of
|
||||
the object. '''
|
||||
|
||||
s = []
|
||||
s.append(repr(self))
|
||||
s.append('command: ' + str(self.command))
|
||||
s.append('args: %r' % (self.args,))
|
||||
s.append('buffer (last %s chars): %r' % (self.str_last_chars,self.buffer[-self.str_last_chars:]))
|
||||
s.append('before (last %s chars): %r' % (self.str_last_chars,self.before[-self.str_last_chars:] if self.before else ''))
|
||||
s.append('after: %r' % (self.after,))
|
||||
s.append('match: %r' % (self.match,))
|
||||
s.append('match_index: ' + str(self.match_index))
|
||||
s.append('exitstatus: ' + str(self.exitstatus))
|
||||
if hasattr(self, 'ptyproc'):
|
||||
s.append('flag_eof: ' + str(self.flag_eof))
|
||||
s.append('pid: ' + str(self.pid))
|
||||
s.append('child_fd: ' + str(self.child_fd))
|
||||
s.append('closed: ' + str(self.closed))
|
||||
s.append('timeout: ' + str(self.timeout))
|
||||
s.append('delimiter: ' + str(self.delimiter))
|
||||
s.append('logfile: ' + str(self.logfile))
|
||||
s.append('logfile_read: ' + str(self.logfile_read))
|
||||
s.append('logfile_send: ' + str(self.logfile_send))
|
||||
s.append('maxread: ' + str(self.maxread))
|
||||
s.append('ignorecase: ' + str(self.ignorecase))
|
||||
s.append('searchwindowsize: ' + str(self.searchwindowsize))
|
||||
s.append('delaybeforesend: ' + str(self.delaybeforesend))
|
||||
s.append('delayafterclose: ' + str(self.delayafterclose))
|
||||
s.append('delayafterterminate: ' + str(self.delayafterterminate))
|
||||
return '\n'.join(s)
|
||||
|
||||
def _spawn(self, command, args=[], preexec_fn=None, dimensions=None):
|
||||
'''This starts the given command in a child process. This does all the
|
||||
fork/exec type of stuff for a pty. This is called by __init__. If args
|
||||
is empty then command will be parsed (split on spaces) and args will be
|
||||
set to parsed arguments. '''
|
||||
|
||||
# The pid and child_fd of this object get set by this method.
|
||||
# Note that it is difficult for this method to fail.
|
||||
# You cannot detect if the child process cannot start.
|
||||
# So the only way you can tell if the child process started
|
||||
# or not is to try to read from the file descriptor. If you get
|
||||
# EOF immediately then it means that the child is already dead.
|
||||
# That may not necessarily be bad because you may have spawned a child
|
||||
# that performs some task; creates no stdout output; and then dies.
|
||||
|
||||
# If command is an int type then it may represent a file descriptor.
|
||||
if isinstance(command, type(0)):
|
||||
raise ExceptionPexpect('Command is an int type. ' +
|
||||
'If this is a file descriptor then maybe you want to ' +
|
||||
'use fdpexpect.fdspawn which takes an existing ' +
|
||||
'file descriptor instead of a command string.')
|
||||
|
||||
if not isinstance(args, type([])):
|
||||
raise TypeError('The argument, args, must be a list.')
|
||||
|
||||
if args == []:
|
||||
self.args = split_command_line(command)
|
||||
self.command = self.args[0]
|
||||
else:
|
||||
# Make a shallow copy of the args list.
|
||||
self.args = args[:]
|
||||
self.args.insert(0, command)
|
||||
self.command = command
|
||||
|
||||
command_with_path = which(self.command, env=self.env)
|
||||
if command_with_path is None:
|
||||
raise ExceptionPexpect('The command was not found or was not ' +
|
||||
'executable: %s.' % self.command)
|
||||
self.command = command_with_path
|
||||
self.args[0] = self.command
|
||||
|
||||
self.name = '<' + ' '.join(self.args) + '>'
|
||||
|
||||
assert self.pid is None, 'The pid member must be None.'
|
||||
assert self.command is not None, 'The command member must not be None.'
|
||||
|
||||
kwargs = {'echo': self.echo, 'preexec_fn': preexec_fn}
|
||||
if self.ignore_sighup:
|
||||
def preexec_wrapper():
|
||||
"Set SIGHUP to be ignored, then call the real preexec_fn"
|
||||
signal.signal(signal.SIGHUP, signal.SIG_IGN)
|
||||
if preexec_fn is not None:
|
||||
preexec_fn()
|
||||
kwargs['preexec_fn'] = preexec_wrapper
|
||||
|
||||
if dimensions is not None:
|
||||
kwargs['dimensions'] = dimensions
|
||||
|
||||
if self.encoding is not None:
|
||||
# Encode command line using the specified encoding
|
||||
self.args = [a if isinstance(a, bytes) else a.encode(self.encoding)
|
||||
for a in self.args]
|
||||
|
||||
self.ptyproc = self._spawnpty(self.args, env=self.env,
|
||||
cwd=self.cwd, **kwargs)
|
||||
|
||||
self.pid = self.ptyproc.pid
|
||||
self.child_fd = self.ptyproc.fd
|
||||
|
||||
|
||||
self.terminated = False
|
||||
self.closed = False
|
||||
|
||||
def _spawnpty(self, args, **kwargs):
|
||||
'''Spawn a pty and return an instance of PtyProcess.'''
|
||||
return ptyprocess.PtyProcess.spawn(args, **kwargs)
|
||||
|
||||
def close(self, force=True):
|
||||
'''This closes the connection with the child application. Note that
|
||||
calling close() more than once is valid. This emulates standard Python
|
||||
behavior with files. Set force to True if you want to make sure that
|
||||
the child is terminated (SIGKILL is sent if the child ignores SIGHUP
|
||||
and SIGINT). '''
|
||||
|
||||
self.flush()
|
||||
with _wrap_ptyprocess_err():
|
||||
# PtyProcessError may be raised if it is not possible to terminate
|
||||
# the child.
|
||||
self.ptyproc.close(force=force)
|
||||
self.isalive() # Update exit status from ptyproc
|
||||
self.child_fd = -1
|
||||
self.closed = True
|
||||
|
||||
def isatty(self):
|
||||
'''This returns True if the file descriptor is open and connected to a
|
||||
tty(-like) device, else False.
|
||||
|
||||
On SVR4-style platforms implementing streams, such as SunOS and HP-UX,
|
||||
the child pty may not appear as a terminal device. This means
|
||||
methods such as setecho(), setwinsize(), getwinsize() may raise an
|
||||
IOError. '''
|
||||
|
||||
return os.isatty(self.child_fd)
|
||||
|
||||
def waitnoecho(self, timeout=-1):
|
||||
'''This waits until the terminal ECHO flag is set False. This returns
|
||||
True if the echo mode is off. This returns False if the ECHO flag was
|
||||
not set False before the timeout. This can be used to detect when the
|
||||
child is waiting for a password. Usually a child application will turn
|
||||
off echo mode when it is waiting for the user to enter a password. For
|
||||
example, instead of expecting the "password:" prompt you can wait for
|
||||
the child to set ECHO off::
|
||||
|
||||
p = pexpect.spawn('ssh user@example.com')
|
||||
p.waitnoecho()
|
||||
p.sendline(mypassword)
|
||||
|
||||
If timeout==-1 then this method will use the value in self.timeout.
|
||||
If timeout==None then this method to block until ECHO flag is False.
|
||||
'''
|
||||
|
||||
if timeout == -1:
|
||||
timeout = self.timeout
|
||||
if timeout is not None:
|
||||
end_time = time.time() + timeout
|
||||
while True:
|
||||
if not self.getecho():
|
||||
return True
|
||||
if timeout < 0 and timeout is not None:
|
||||
return False
|
||||
if timeout is not None:
|
||||
timeout = end_time - time.time()
|
||||
time.sleep(0.1)
|
||||
|
||||
def getecho(self):
|
||||
'''This returns the terminal echo mode. This returns True if echo is
|
||||
on or False if echo is off. Child applications that are expecting you
|
||||
to enter a password often set ECHO False. See waitnoecho().
|
||||
|
||||
Not supported on platforms where ``isatty()`` returns False. '''
|
||||
return self.ptyproc.getecho()
|
||||
|
||||
def setecho(self, state):
|
||||
'''This sets the terminal echo mode on or off. Note that anything the
|
||||
child sent before the echo will be lost, so you should be sure that
|
||||
your input buffer is empty before you call setecho(). For example, the
|
||||
following will work as expected::
|
||||
|
||||
p = pexpect.spawn('cat') # Echo is on by default.
|
||||
p.sendline('1234') # We expect see this twice from the child...
|
||||
p.expect(['1234']) # ... once from the tty echo...
|
||||
p.expect(['1234']) # ... and again from cat itself.
|
||||
p.setecho(False) # Turn off tty echo
|
||||
p.sendline('abcd') # We will set this only once (echoed by cat).
|
||||
p.sendline('wxyz') # We will set this only once (echoed by cat)
|
||||
p.expect(['abcd'])
|
||||
p.expect(['wxyz'])
|
||||
|
||||
The following WILL NOT WORK because the lines sent before the setecho
|
||||
will be lost::
|
||||
|
||||
p = pexpect.spawn('cat')
|
||||
p.sendline('1234')
|
||||
p.setecho(False) # Turn off tty echo
|
||||
p.sendline('abcd') # We will set this only once (echoed by cat).
|
||||
p.sendline('wxyz') # We will set this only once (echoed by cat)
|
||||
p.expect(['1234'])
|
||||
p.expect(['1234'])
|
||||
p.expect(['abcd'])
|
||||
p.expect(['wxyz'])
|
||||
|
||||
|
||||
Not supported on platforms where ``isatty()`` returns False.
|
||||
'''
|
||||
return self.ptyproc.setecho(state)
|
||||
|
||||
def read_nonblocking(self, size=1, timeout=-1):
|
||||
'''This reads at most size characters from the child application. It
|
||||
includes a timeout. If the read does not complete within the timeout
|
||||
period then a TIMEOUT exception is raised. If the end of file is read
|
||||
then an EOF exception will be raised. If a logfile is specified, a
|
||||
copy is written to that log.
|
||||
|
||||
If timeout is None then the read may block indefinitely.
|
||||
If timeout is -1 then the self.timeout value is used. If timeout is 0
|
||||
then the child is polled and if there is no data immediately ready
|
||||
then this will raise a TIMEOUT exception.
|
||||
|
||||
The timeout refers only to the amount of time to read at least one
|
||||
character. This is not affected by the 'size' parameter, so if you call
|
||||
read_nonblocking(size=100, timeout=30) and only one character is
|
||||
available right away then one character will be returned immediately.
|
||||
It will not wait for 30 seconds for another 99 characters to come in.
|
||||
|
||||
On the other hand, if there are bytes available to read immediately,
|
||||
all those bytes will be read (up to the buffer size). So, if the
|
||||
buffer size is 1 megabyte and there is 1 megabyte of data available
|
||||
to read, the buffer will be filled, regardless of timeout.
|
||||
|
||||
This is a wrapper around os.read(). It uses select.select() or
|
||||
select.poll() to implement the timeout. '''
|
||||
|
||||
if self.closed:
|
||||
raise ValueError('I/O operation on closed file.')
|
||||
|
||||
if self.use_poll:
|
||||
def select(timeout):
|
||||
return poll_ignore_interrupts([self.child_fd], timeout)
|
||||
else:
|
||||
def select(timeout):
|
||||
return select_ignore_interrupts([self.child_fd], [], [], timeout)[0]
|
||||
|
||||
# If there is data available to read right now, read as much as
|
||||
# we can. We do this to increase performance if there are a lot
|
||||
# of bytes to be read. This also avoids calling isalive() too
|
||||
# often. See also:
|
||||
# * https://github.com/pexpect/pexpect/pull/304
|
||||
# * http://trac.sagemath.org/ticket/10295
|
||||
if select(0):
|
||||
try:
|
||||
incoming = super(spawn, self).read_nonblocking(size)
|
||||
except EOF:
|
||||
# Maybe the child is dead: update some attributes in that case
|
||||
self.isalive()
|
||||
raise
|
||||
while len(incoming) < size and select(0):
|
||||
try:
|
||||
incoming += super(spawn, self).read_nonblocking(size - len(incoming))
|
||||
except EOF:
|
||||
# Maybe the child is dead: update some attributes in that case
|
||||
self.isalive()
|
||||
# Don't raise EOF, just return what we read so far.
|
||||
return incoming
|
||||
return incoming
|
||||
|
||||
if timeout == -1:
|
||||
timeout = self.timeout
|
||||
|
||||
if not self.isalive():
|
||||
# The process is dead, but there may or may not be data
|
||||
# available to read. Note that some systems such as Solaris
|
||||
# do not give an EOF when the child dies. In fact, you can
|
||||
# still try to read from the child_fd -- it will block
|
||||
# forever or until TIMEOUT. For that reason, it's important
|
||||
# to do this check before calling select() with timeout.
|
||||
if select(0):
|
||||
return super(spawn, self).read_nonblocking(size)
|
||||
self.flag_eof = True
|
||||
raise EOF('End Of File (EOF). Braindead platform.')
|
||||
elif self.__irix_hack:
|
||||
# Irix takes a long time before it realizes a child was terminated.
|
||||
# Make sure that the timeout is at least 2 seconds.
|
||||
# FIXME So does this mean Irix systems are forced to always have
|
||||
# FIXME a 2 second delay when calling read_nonblocking? That sucks.
|
||||
if timeout is not None and timeout < 2:
|
||||
timeout = 2
|
||||
|
||||
# Because of the select(0) check above, we know that no data
|
||||
# is available right now. But if a non-zero timeout is given
|
||||
# (possibly timeout=None), we call select() with a timeout.
|
||||
if (timeout != 0) and select(timeout):
|
||||
return super(spawn, self).read_nonblocking(size)
|
||||
|
||||
if not self.isalive():
|
||||
# Some platforms, such as Irix, will claim that their
|
||||
# processes are alive; timeout on the select; and
|
||||
# then finally admit that they are not alive.
|
||||
self.flag_eof = True
|
||||
raise EOF('End of File (EOF). Very slow platform.')
|
||||
else:
|
||||
raise TIMEOUT('Timeout exceeded.')
|
||||
|
||||
def write(self, s):
|
||||
'''This is similar to send() except that there is no return value.
|
||||
'''
|
||||
|
||||
self.send(s)
|
||||
|
||||
def writelines(self, sequence):
|
||||
'''This calls write() for each element in the sequence. The sequence
|
||||
can be any iterable object producing strings, typically a list of
|
||||
strings. This does not add line separators. There is no return value.
|
||||
'''
|
||||
|
||||
for s in sequence:
|
||||
self.write(s)
|
||||
|
||||
def send(self, s):
|
||||
'''Sends string ``s`` to the child process, returning the number of
|
||||
bytes written. If a logfile is specified, a copy is written to that
|
||||
log.
|
||||
|
||||
The default terminal input mode is canonical processing unless set
|
||||
otherwise by the child process. This allows backspace and other line
|
||||
processing to be performed prior to transmitting to the receiving
|
||||
program. As this is buffered, there is a limited size of such buffer.
|
||||
|
||||
On Linux systems, this is 4096 (defined by N_TTY_BUF_SIZE). All
|
||||
other systems honor the POSIX.1 definition PC_MAX_CANON -- 1024
|
||||
on OSX, 256 on OpenSolaris, and 1920 on FreeBSD.
|
||||
|
||||
This value may be discovered using fpathconf(3)::
|
||||
|
||||
>>> from os import fpathconf
|
||||
>>> print(fpathconf(0, 'PC_MAX_CANON'))
|
||||
256
|
||||
|
||||
On such a system, only 256 bytes may be received per line. Any
|
||||
subsequent bytes received will be discarded. BEL (``'\a'``) is then
|
||||
sent to output if IMAXBEL (termios.h) is set by the tty driver.
|
||||
This is usually enabled by default. Linux does not honor this as
|
||||
an option -- it behaves as though it is always set on.
|
||||
|
||||
Canonical input processing may be disabled altogether by executing
|
||||
a shell, then stty(1), before executing the final program::
|
||||
|
||||
>>> bash = pexpect.spawn('/bin/bash', echo=False)
|
||||
>>> bash.sendline('stty -icanon')
|
||||
>>> bash.sendline('base64')
|
||||
>>> bash.sendline('x' * 5000)
|
||||
'''
|
||||
|
||||
if self.delaybeforesend is not None:
|
||||
time.sleep(self.delaybeforesend)
|
||||
|
||||
s = self._coerce_send_string(s)
|
||||
self._log(s, 'send')
|
||||
|
||||
b = self._encoder.encode(s, final=False)
|
||||
return os.write(self.child_fd, b)
|
||||
|
||||
def sendline(self, s=''):
|
||||
'''Wraps send(), sending string ``s`` to child process, with
|
||||
``os.linesep`` automatically appended. Returns number of bytes
|
||||
written. Only a limited number of bytes may be sent for each
|
||||
line in the default terminal mode, see docstring of :meth:`send`.
|
||||
'''
|
||||
s = self._coerce_send_string(s)
|
||||
return self.send(s + self.linesep)
|
||||
|
||||
def _log_control(self, s):
|
||||
"""Write control characters to the appropriate log files"""
|
||||
if self.encoding is not None:
|
||||
s = s.decode(self.encoding, 'replace')
|
||||
self._log(s, 'send')
|
||||
|
||||
def sendcontrol(self, char):
|
||||
'''Helper method that wraps send() with mnemonic access for sending control
|
||||
character to the child (such as Ctrl-C or Ctrl-D). For example, to send
|
||||
Ctrl-G (ASCII 7, bell, '\a')::
|
||||
|
||||
child.sendcontrol('g')
|
||||
|
||||
See also, sendintr() and sendeof().
|
||||
'''
|
||||
n, byte = self.ptyproc.sendcontrol(char)
|
||||
self._log_control(byte)
|
||||
return n
|
||||
|
||||
def sendeof(self):
|
||||
'''This sends an EOF to the child. This sends a character which causes
|
||||
the pending parent output buffer to be sent to the waiting child
|
||||
program without waiting for end-of-line. If it is the first character
|
||||
of the line, the read() in the user program returns 0, which signifies
|
||||
end-of-file. This means to work as expected a sendeof() has to be
|
||||
called at the beginning of a line. This method does not send a newline.
|
||||
It is the responsibility of the caller to ensure the eof is sent at the
|
||||
beginning of a line. '''
|
||||
|
||||
n, byte = self.ptyproc.sendeof()
|
||||
self._log_control(byte)
|
||||
|
||||
def sendintr(self):
|
||||
'''This sends a SIGINT to the child. It does not require
|
||||
the SIGINT to be the first character on a line. '''
|
||||
|
||||
n, byte = self.ptyproc.sendintr()
|
||||
self._log_control(byte)
|
||||
|
||||
@property
|
||||
def flag_eof(self):
|
||||
return self.ptyproc.flag_eof
|
||||
|
||||
@flag_eof.setter
|
||||
def flag_eof(self, value):
|
||||
self.ptyproc.flag_eof = value
|
||||
|
||||
def eof(self):
|
||||
'''This returns True if the EOF exception was ever raised.
|
||||
'''
|
||||
return self.flag_eof
|
||||
|
||||
def terminate(self, force=False):
|
||||
'''This forces a child process to terminate. It starts nicely with
|
||||
SIGHUP and SIGINT. If "force" is True then moves onto SIGKILL. This
|
||||
returns True if the child was terminated. This returns False if the
|
||||
child could not be terminated. '''
|
||||
|
||||
if not self.isalive():
|
||||
return True
|
||||
try:
|
||||
self.kill(signal.SIGHUP)
|
||||
time.sleep(self.delayafterterminate)
|
||||
if not self.isalive():
|
||||
return True
|
||||
self.kill(signal.SIGCONT)
|
||||
time.sleep(self.delayafterterminate)
|
||||
if not self.isalive():
|
||||
return True
|
||||
self.kill(signal.SIGINT)
|
||||
time.sleep(self.delayafterterminate)
|
||||
if not self.isalive():
|
||||
return True
|
||||
if force:
|
||||
self.kill(signal.SIGKILL)
|
||||
time.sleep(self.delayafterterminate)
|
||||
if not self.isalive():
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
return False
|
||||
except OSError:
|
||||
# I think there are kernel timing issues that sometimes cause
|
||||
# this to happen. I think isalive() reports True, but the
|
||||
# process is dead to the kernel.
|
||||
# Make one last attempt to see if the kernel is up to date.
|
||||
time.sleep(self.delayafterterminate)
|
||||
if not self.isalive():
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def wait(self):
|
||||
'''This waits until the child exits. This is a blocking call. This will
|
||||
not read any data from the child, so this will block forever if the
|
||||
child has unread output and has terminated. In other words, the child
|
||||
may have printed output then called exit(), but, the child is
|
||||
technically still alive until its output is read by the parent.
|
||||
|
||||
This method is non-blocking if :meth:`wait` has already been called
|
||||
previously or :meth:`isalive` method returns False. It simply returns
|
||||
the previously determined exit status.
|
||||
'''
|
||||
|
||||
ptyproc = self.ptyproc
|
||||
with _wrap_ptyprocess_err():
|
||||
# exception may occur if "Is some other process attempting
|
||||
# "job control with our child pid?"
|
||||
exitstatus = ptyproc.wait()
|
||||
self.status = ptyproc.status
|
||||
self.exitstatus = ptyproc.exitstatus
|
||||
self.signalstatus = ptyproc.signalstatus
|
||||
self.terminated = True
|
||||
|
||||
return exitstatus
|
||||
|
||||
def isalive(self):
|
||||
'''This tests if the child process is running or not. This is
|
||||
non-blocking. If the child was terminated then this will read the
|
||||
exitstatus or signalstatus of the child. This returns True if the child
|
||||
process appears to be running or False if not. It can take literally
|
||||
SECONDS for Solaris to return the right status. '''
|
||||
|
||||
ptyproc = self.ptyproc
|
||||
with _wrap_ptyprocess_err():
|
||||
alive = ptyproc.isalive()
|
||||
|
||||
if not alive:
|
||||
self.status = ptyproc.status
|
||||
self.exitstatus = ptyproc.exitstatus
|
||||
self.signalstatus = ptyproc.signalstatus
|
||||
self.terminated = True
|
||||
|
||||
return alive
|
||||
|
||||
def kill(self, sig):
|
||||
|
||||
'''This sends the given signal to the child application. In keeping
|
||||
with UNIX tradition it has a misleading name. It does not necessarily
|
||||
kill the child unless you send the right signal. '''
|
||||
|
||||
# Same as os.kill, but the pid is given for you.
|
||||
if self.isalive():
|
||||
os.kill(self.pid, sig)
|
||||
|
||||
def getwinsize(self):
|
||||
'''This returns the terminal window size of the child tty. The return
|
||||
value is a tuple of (rows, cols). '''
|
||||
return self.ptyproc.getwinsize()
|
||||
|
||||
def setwinsize(self, rows, cols):
|
||||
'''This sets the terminal window size of the child tty. This will cause
|
||||
a SIGWINCH signal to be sent to the child. This does not change the
|
||||
physical window size. It changes the size reported to TTY-aware
|
||||
applications like vi or curses -- applications that respond to the
|
||||
SIGWINCH signal. '''
|
||||
return self.ptyproc.setwinsize(rows, cols)
|
||||
|
||||
|
||||
def interact(self, escape_character=chr(29),
|
||||
input_filter=None, output_filter=None):
|
||||
|
||||
'''This gives control of the child process to the interactive user (the
|
||||
human at the keyboard). Keystrokes are sent to the child process, and
|
||||
the stdout and stderr output of the child process is printed. This
|
||||
simply echos the child stdout and child stderr to the real stdout and
|
||||
it echos the real stdin to the child stdin. When the user types the
|
||||
escape_character this method will return None. The escape_character
|
||||
will not be transmitted. The default for escape_character is
|
||||
entered as ``Ctrl - ]``, the very same as BSD telnet. To prevent
|
||||
escaping, escape_character may be set to None.
|
||||
|
||||
If a logfile is specified, then the data sent and received from the
|
||||
child process in interact mode is duplicated to the given log.
|
||||
|
||||
You may pass in optional input and output filter functions. These
|
||||
functions should take bytes array and return bytes array too. Even
|
||||
with ``encoding='utf-8'`` support, meth:`interact` will always pass
|
||||
input_filter and output_filter bytes. You may need to wrap your
|
||||
function to decode and encode back to UTF-8.
|
||||
|
||||
The output_filter will be passed all the output from the child process.
|
||||
The input_filter will be passed all the keyboard input from the user.
|
||||
The input_filter is run BEFORE the check for the escape_character.
|
||||
|
||||
Note that if you change the window size of the parent the SIGWINCH
|
||||
signal will not be passed through to the child. If you want the child
|
||||
window size to change when the parent's window size changes then do
|
||||
something like the following example::
|
||||
|
||||
import pexpect, struct, fcntl, termios, signal, sys
|
||||
def sigwinch_passthrough (sig, data):
|
||||
s = struct.pack("HHHH", 0, 0, 0, 0)
|
||||
a = struct.unpack('hhhh', fcntl.ioctl(sys.stdout.fileno(),
|
||||
termios.TIOCGWINSZ , s))
|
||||
if not p.closed:
|
||||
p.setwinsize(a[0],a[1])
|
||||
|
||||
# Note this 'p' is global and used in sigwinch_passthrough.
|
||||
p = pexpect.spawn('/bin/bash')
|
||||
signal.signal(signal.SIGWINCH, sigwinch_passthrough)
|
||||
p.interact()
|
||||
'''
|
||||
|
||||
# Flush the buffer.
|
||||
self.write_to_stdout(self.buffer)
|
||||
self.stdout.flush()
|
||||
self._buffer = self.buffer_type()
|
||||
mode = tty.tcgetattr(self.STDIN_FILENO)
|
||||
tty.setraw(self.STDIN_FILENO)
|
||||
if escape_character is not None and PY3:
|
||||
escape_character = escape_character.encode('latin-1')
|
||||
try:
|
||||
self.__interact_copy(escape_character, input_filter, output_filter)
|
||||
finally:
|
||||
tty.tcsetattr(self.STDIN_FILENO, tty.TCSAFLUSH, mode)
|
||||
|
||||
def __interact_writen(self, fd, data):
|
||||
'''This is used by the interact() method.
|
||||
'''
|
||||
|
||||
while data != b'' and self.isalive():
|
||||
n = os.write(fd, data)
|
||||
data = data[n:]
|
||||
|
||||
def __interact_read(self, fd):
|
||||
'''This is used by the interact() method.
|
||||
'''
|
||||
|
||||
return os.read(fd, 1000)
|
||||
|
||||
def __interact_copy(
|
||||
self, escape_character=None, input_filter=None, output_filter=None
|
||||
):
|
||||
|
||||
'''This is used by the interact() method.
|
||||
'''
|
||||
|
||||
while self.isalive():
|
||||
if self.use_poll:
|
||||
r = poll_ignore_interrupts([self.child_fd, self.STDIN_FILENO])
|
||||
else:
|
||||
r, w, e = select_ignore_interrupts(
|
||||
[self.child_fd, self.STDIN_FILENO], [], []
|
||||
)
|
||||
if self.child_fd in r:
|
||||
try:
|
||||
data = self.__interact_read(self.child_fd)
|
||||
except OSError as err:
|
||||
if err.args[0] == errno.EIO:
|
||||
# Linux-style EOF
|
||||
break
|
||||
raise
|
||||
if data == b'':
|
||||
# BSD-style EOF
|
||||
break
|
||||
if output_filter:
|
||||
data = output_filter(data)
|
||||
self._log(data, 'read')
|
||||
os.write(self.STDOUT_FILENO, data)
|
||||
if self.STDIN_FILENO in r:
|
||||
data = self.__interact_read(self.STDIN_FILENO)
|
||||
if input_filter:
|
||||
data = input_filter(data)
|
||||
i = -1
|
||||
if escape_character is not None:
|
||||
i = data.rfind(escape_character)
|
||||
if i != -1:
|
||||
data = data[:i]
|
||||
if data:
|
||||
self._log(data, 'send')
|
||||
self.__interact_writen(self.child_fd, data)
|
||||
break
|
||||
self._log(data, 'send')
|
||||
self.__interact_writen(self.child_fd, data)
|
||||
|
||||
|
||||
def spawnu(*args, **kwargs):
|
||||
"""Deprecated: pass encoding to spawn() instead."""
|
||||
kwargs.setdefault('encoding', 'utf-8')
|
||||
return spawn(*args, **kwargs)
|
19
plugins/git_clone/pexpect/ptyprocess/LICENSE
Normal file
19
plugins/git_clone/pexpect/ptyprocess/LICENSE
Normal file
@ -0,0 +1,19 @@
|
||||
ISC LICENSE
|
||||
|
||||
This license is approved by the OSI and FSF as GPL-compatible.
|
||||
http://opensource.org/licenses/isc-license.txt
|
||||
|
||||
Copyright (c) 2013-2014, Pexpect development team
|
||||
Copyright (c) 2012, Noah Spurrier <noah@noah.org>
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
4
plugins/git_clone/pexpect/ptyprocess/__init__.py
Normal file
4
plugins/git_clone/pexpect/ptyprocess/__init__.py
Normal file
@ -0,0 +1,4 @@
|
||||
"""Run a subprocess in a pseudo terminal"""
|
||||
from .ptyprocess import PtyProcess, PtyProcessUnicode, PtyProcessError
|
||||
|
||||
__version__ = '0.7.0'
|
78
plugins/git_clone/pexpect/ptyprocess/_fork_pty.py
Normal file
78
plugins/git_clone/pexpect/ptyprocess/_fork_pty.py
Normal file
@ -0,0 +1,78 @@
|
||||
"""Substitute for the forkpty system call, to support Solaris.
|
||||
"""
|
||||
import os
|
||||
import errno
|
||||
|
||||
from pty import (STDIN_FILENO, STDOUT_FILENO, STDERR_FILENO, CHILD)
|
||||
from .util import PtyProcessError
|
||||
|
||||
def fork_pty():
|
||||
'''This implements a substitute for the forkpty system call. This
|
||||
should be more portable than the pty.fork() function. Specifically,
|
||||
this should work on Solaris.
|
||||
|
||||
Modified 10.06.05 by Geoff Marshall: Implemented __fork_pty() method to
|
||||
resolve the issue with Python's pty.fork() not supporting Solaris,
|
||||
particularly ssh. Based on patch to posixmodule.c authored by Noah
|
||||
Spurrier::
|
||||
|
||||
http://mail.python.org/pipermail/python-dev/2003-May/035281.html
|
||||
|
||||
'''
|
||||
|
||||
parent_fd, child_fd = os.openpty()
|
||||
if parent_fd < 0 or child_fd < 0:
|
||||
raise OSError("os.openpty() failed")
|
||||
|
||||
pid = os.fork()
|
||||
if pid == CHILD:
|
||||
# Child.
|
||||
os.close(parent_fd)
|
||||
pty_make_controlling_tty(child_fd)
|
||||
|
||||
os.dup2(child_fd, STDIN_FILENO)
|
||||
os.dup2(child_fd, STDOUT_FILENO)
|
||||
os.dup2(child_fd, STDERR_FILENO)
|
||||
|
||||
else:
|
||||
# Parent.
|
||||
os.close(child_fd)
|
||||
|
||||
return pid, parent_fd
|
||||
|
||||
def pty_make_controlling_tty(tty_fd):
|
||||
'''This makes the pseudo-terminal the controlling tty. This should be
|
||||
more portable than the pty.fork() function. Specifically, this should
|
||||
work on Solaris. '''
|
||||
|
||||
child_name = os.ttyname(tty_fd)
|
||||
|
||||
# Disconnect from controlling tty, if any. Raises OSError of ENXIO
|
||||
# if there was no controlling tty to begin with, such as when
|
||||
# executed by a cron(1) job.
|
||||
try:
|
||||
fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY)
|
||||
os.close(fd)
|
||||
except OSError as err:
|
||||
if err.errno != errno.ENXIO:
|
||||
raise
|
||||
|
||||
os.setsid()
|
||||
|
||||
# Verify we are disconnected from controlling tty by attempting to open
|
||||
# it again. We expect that OSError of ENXIO should always be raised.
|
||||
try:
|
||||
fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY)
|
||||
os.close(fd)
|
||||
raise PtyProcessError("OSError of errno.ENXIO should be raised.")
|
||||
except OSError as err:
|
||||
if err.errno != errno.ENXIO:
|
||||
raise
|
||||
|
||||
# Verify we can open child pty.
|
||||
fd = os.open(child_name, os.O_RDWR)
|
||||
os.close(fd)
|
||||
|
||||
# Verify we now have a controlling tty.
|
||||
fd = os.open("/dev/tty", os.O_WRONLY)
|
||||
os.close(fd)
|
855
plugins/git_clone/pexpect/ptyprocess/ptyprocess.py
Normal file
855
plugins/git_clone/pexpect/ptyprocess/ptyprocess.py
Normal file
@ -0,0 +1,855 @@
|
||||
import codecs
|
||||
import errno
|
||||
import fcntl
|
||||
import io
|
||||
import os
|
||||
import pty
|
||||
import resource
|
||||
import signal
|
||||
import struct
|
||||
import sys
|
||||
import termios
|
||||
import time
|
||||
|
||||
try:
|
||||
import builtins # Python 3
|
||||
except ImportError:
|
||||
import __builtin__ as builtins # Python 2
|
||||
|
||||
# Constants
|
||||
from pty import (STDIN_FILENO, CHILD)
|
||||
|
||||
from .util import which, PtyProcessError
|
||||
|
||||
_platform = sys.platform.lower()
|
||||
|
||||
# Solaris uses internal __fork_pty(). All others use pty.fork().
|
||||
_is_solaris = (
|
||||
_platform.startswith('solaris') or
|
||||
_platform.startswith('sunos'))
|
||||
|
||||
if _is_solaris:
|
||||
use_native_pty_fork = False
|
||||
from . import _fork_pty
|
||||
else:
|
||||
use_native_pty_fork = True
|
||||
|
||||
PY3 = sys.version_info[0] >= 3
|
||||
|
||||
if PY3:
|
||||
def _byte(i):
|
||||
return bytes([i])
|
||||
else:
|
||||
def _byte(i):
|
||||
return chr(i)
|
||||
|
||||
class FileNotFoundError(OSError): pass
|
||||
class TimeoutError(OSError): pass
|
||||
|
||||
_EOF, _INTR = None, None
|
||||
|
||||
def _make_eof_intr():
|
||||
"""Set constants _EOF and _INTR.
|
||||
|
||||
This avoids doing potentially costly operations on module load.
|
||||
"""
|
||||
global _EOF, _INTR
|
||||
if (_EOF is not None) and (_INTR is not None):
|
||||
return
|
||||
|
||||
# inherit EOF and INTR definitions from controlling process.
|
||||
try:
|
||||
from termios import VEOF, VINTR
|
||||
fd = None
|
||||
for name in 'stdin', 'stdout':
|
||||
stream = getattr(sys, '__%s__' % name, None)
|
||||
if stream is None or not hasattr(stream, 'fileno'):
|
||||
continue
|
||||
try:
|
||||
fd = stream.fileno()
|
||||
except ValueError:
|
||||
continue
|
||||
if fd is None:
|
||||
# no fd, raise ValueError to fallback on CEOF, CINTR
|
||||
raise ValueError("No stream has a fileno")
|
||||
intr = ord(termios.tcgetattr(fd)[6][VINTR])
|
||||
eof = ord(termios.tcgetattr(fd)[6][VEOF])
|
||||
except (ImportError, OSError, IOError, ValueError, termios.error):
|
||||
# unless the controlling process is also not a terminal,
|
||||
# such as cron(1), or when stdin and stdout are both closed.
|
||||
# Fall-back to using CEOF and CINTR. There
|
||||
try:
|
||||
from termios import CEOF, CINTR
|
||||
(intr, eof) = (CINTR, CEOF)
|
||||
except ImportError:
|
||||
# ^C, ^D
|
||||
(intr, eof) = (3, 4)
|
||||
|
||||
_INTR = _byte(intr)
|
||||
_EOF = _byte(eof)
|
||||
|
||||
# setecho and setwinsize are pulled out here because on some platforms, we need
|
||||
# to do this from the child before we exec()
|
||||
|
||||
def _setecho(fd, state):
|
||||
errmsg = 'setecho() may not be called on this platform (it may still be possible to enable/disable echo when spawning the child process)'
|
||||
|
||||
try:
|
||||
attr = termios.tcgetattr(fd)
|
||||
except termios.error as err:
|
||||
if err.args[0] == errno.EINVAL:
|
||||
raise IOError(err.args[0], '%s: %s.' % (err.args[1], errmsg))
|
||||
raise
|
||||
|
||||
if state:
|
||||
attr[3] = attr[3] | termios.ECHO
|
||||
else:
|
||||
attr[3] = attr[3] & ~termios.ECHO
|
||||
|
||||
try:
|
||||
# I tried TCSADRAIN and TCSAFLUSH, but these were inconsistent and
|
||||
# blocked on some platforms. TCSADRAIN would probably be ideal.
|
||||
termios.tcsetattr(fd, termios.TCSANOW, attr)
|
||||
except IOError as err:
|
||||
if err.args[0] == errno.EINVAL:
|
||||
raise IOError(err.args[0], '%s: %s.' % (err.args[1], errmsg))
|
||||
raise
|
||||
|
||||
def _setwinsize(fd, rows, cols):
|
||||
# Some very old platforms have a bug that causes the value for
|
||||
# termios.TIOCSWINSZ to be truncated. There was a hack here to work
|
||||
# around this, but it caused problems with newer platforms so has been
|
||||
# removed. For details see https://github.com/pexpect/pexpect/issues/39
|
||||
TIOCSWINSZ = getattr(termios, 'TIOCSWINSZ', -2146929561)
|
||||
# Note, assume ws_xpixel and ws_ypixel are zero.
|
||||
s = struct.pack('HHHH', rows, cols, 0, 0)
|
||||
fcntl.ioctl(fd, TIOCSWINSZ, s)
|
||||
|
||||
class PtyProcess(object):
|
||||
'''This class represents a process running in a pseudoterminal.
|
||||
|
||||
The main constructor is the :meth:`spawn` classmethod.
|
||||
'''
|
||||
string_type = bytes
|
||||
if PY3:
|
||||
linesep = os.linesep.encode('ascii')
|
||||
crlf = '\r\n'.encode('ascii')
|
||||
|
||||
@staticmethod
|
||||
def write_to_stdout(b):
|
||||
try:
|
||||
return sys.stdout.buffer.write(b)
|
||||
except AttributeError:
|
||||
# If stdout has been replaced, it may not have .buffer
|
||||
return sys.stdout.write(b.decode('ascii', 'replace'))
|
||||
else:
|
||||
linesep = os.linesep
|
||||
crlf = '\r\n'
|
||||
write_to_stdout = sys.stdout.write
|
||||
|
||||
encoding = None
|
||||
|
||||
argv = None
|
||||
env = None
|
||||
launch_dir = None
|
||||
|
||||
def __init__(self, pid, fd):
|
||||
_make_eof_intr() # Ensure _EOF and _INTR are calculated
|
||||
self.pid = pid
|
||||
self.fd = fd
|
||||
readf = io.open(fd, 'rb', buffering=0)
|
||||
writef = io.open(fd, 'wb', buffering=0, closefd=False)
|
||||
self.fileobj = io.BufferedRWPair(readf, writef)
|
||||
|
||||
self.terminated = False
|
||||
self.closed = False
|
||||
self.exitstatus = None
|
||||
self.signalstatus = None
|
||||
# status returned by os.waitpid
|
||||
self.status = None
|
||||
self.flag_eof = False
|
||||
# Used by close() to give kernel time to update process status.
|
||||
# Time in seconds.
|
||||
self.delayafterclose = 0.1
|
||||
# Used by terminate() to give kernel time to update process status.
|
||||
# Time in seconds.
|
||||
self.delayafterterminate = 0.1
|
||||
|
||||
@classmethod
|
||||
def spawn(
|
||||
cls, argv, cwd=None, env=None, echo=True, preexec_fn=None,
|
||||
dimensions=(24, 80), pass_fds=()):
|
||||
'''Start the given command in a child process in a pseudo terminal.
|
||||
|
||||
This does all the fork/exec type of stuff for a pty, and returns an
|
||||
instance of PtyProcess.
|
||||
|
||||
If preexec_fn is supplied, it will be called with no arguments in the
|
||||
child process before exec-ing the specified command.
|
||||
It may, for instance, set signal handlers to SIG_DFL or SIG_IGN.
|
||||
|
||||
Dimensions of the psuedoterminal used for the subprocess can be
|
||||
specified as a tuple (rows, cols), or the default (24, 80) will be used.
|
||||
|
||||
By default, all file descriptors except 0, 1 and 2 are closed. This
|
||||
behavior can be overridden with pass_fds, a list of file descriptors to
|
||||
keep open between the parent and the child.
|
||||
'''
|
||||
# Note that it is difficult for this method to fail.
|
||||
# You cannot detect if the child process cannot start.
|
||||
# So the only way you can tell if the child process started
|
||||
# or not is to try to read from the file descriptor. If you get
|
||||
# EOF immediately then it means that the child is already dead.
|
||||
# That may not necessarily be bad because you may have spawned a child
|
||||
# that performs some task; creates no stdout output; and then dies.
|
||||
|
||||
if not isinstance(argv, (list, tuple)):
|
||||
raise TypeError("Expected a list or tuple for argv, got %r" % argv)
|
||||
|
||||
# Shallow copy of argv so we can modify it
|
||||
argv = argv[:]
|
||||
command = argv[0]
|
||||
|
||||
command_with_path = which(command)
|
||||
if command_with_path is None:
|
||||
raise FileNotFoundError('The command was not found or was not ' +
|
||||
'executable: %s.' % command)
|
||||
command = command_with_path
|
||||
argv[0] = command
|
||||
|
||||
# [issue #119] To prevent the case where exec fails and the user is
|
||||
# stuck interacting with a python child process instead of whatever
|
||||
# was expected, we implement the solution from
|
||||
# http://stackoverflow.com/a/3703179 to pass the exception to the
|
||||
# parent process
|
||||
|
||||
# [issue #119] 1. Before forking, open a pipe in the parent process.
|
||||
exec_err_pipe_read, exec_err_pipe_write = os.pipe()
|
||||
|
||||
if use_native_pty_fork:
|
||||
pid, fd = pty.fork()
|
||||
else:
|
||||
# Use internal fork_pty, for Solaris
|
||||
pid, fd = _fork_pty.fork_pty()
|
||||
|
||||
# Some platforms must call setwinsize() and setecho() from the
|
||||
# child process, and others from the master process. We do both,
|
||||
# allowing IOError for either.
|
||||
|
||||
if pid == CHILD:
|
||||
# set window size
|
||||
try:
|
||||
_setwinsize(STDIN_FILENO, *dimensions)
|
||||
except IOError as err:
|
||||
if err.args[0] not in (errno.EINVAL, errno.ENOTTY):
|
||||
raise
|
||||
|
||||
# disable echo if spawn argument echo was unset
|
||||
if not echo:
|
||||
try:
|
||||
_setecho(STDIN_FILENO, False)
|
||||
except (IOError, termios.error) as err:
|
||||
if err.args[0] not in (errno.EINVAL, errno.ENOTTY):
|
||||
raise
|
||||
|
||||
# [issue #119] 3. The child closes the reading end and sets the
|
||||
# close-on-exec flag for the writing end.
|
||||
os.close(exec_err_pipe_read)
|
||||
fcntl.fcntl(exec_err_pipe_write, fcntl.F_SETFD, fcntl.FD_CLOEXEC)
|
||||
|
||||
# Do not allow child to inherit open file descriptors from parent,
|
||||
# with the exception of the exec_err_pipe_write of the pipe
|
||||
# and pass_fds.
|
||||
# Impose ceiling on max_fd: AIX bugfix for users with unlimited
|
||||
# nofiles where resource.RLIMIT_NOFILE is 2^63-1 and os.closerange()
|
||||
# occasionally raises out of range error
|
||||
max_fd = min(1048576, resource.getrlimit(resource.RLIMIT_NOFILE)[0])
|
||||
spass_fds = sorted(set(pass_fds) | {exec_err_pipe_write})
|
||||
for pair in zip([2] + spass_fds, spass_fds + [max_fd]):
|
||||
os.closerange(pair[0]+1, pair[1])
|
||||
|
||||
if cwd is not None:
|
||||
os.chdir(cwd)
|
||||
|
||||
if preexec_fn is not None:
|
||||
try:
|
||||
preexec_fn()
|
||||
except Exception as e:
|
||||
ename = type(e).__name__
|
||||
tosend = '{}:0:{}'.format(ename, str(e))
|
||||
if PY3:
|
||||
tosend = tosend.encode('utf-8')
|
||||
|
||||
os.write(exec_err_pipe_write, tosend)
|
||||
os.close(exec_err_pipe_write)
|
||||
os._exit(1)
|
||||
|
||||
try:
|
||||
if env is None:
|
||||
os.execv(command, argv)
|
||||
else:
|
||||
os.execvpe(command, argv, env)
|
||||
except OSError as err:
|
||||
# [issue #119] 5. If exec fails, the child writes the error
|
||||
# code back to the parent using the pipe, then exits.
|
||||
tosend = 'OSError:{}:{}'.format(err.errno, str(err))
|
||||
if PY3:
|
||||
tosend = tosend.encode('utf-8')
|
||||
os.write(exec_err_pipe_write, tosend)
|
||||
os.close(exec_err_pipe_write)
|
||||
os._exit(os.EX_OSERR)
|
||||
|
||||
# Parent
|
||||
inst = cls(pid, fd)
|
||||
|
||||
# Set some informational attributes
|
||||
inst.argv = argv
|
||||
if env is not None:
|
||||
inst.env = env
|
||||
if cwd is not None:
|
||||
inst.launch_dir = cwd
|
||||
|
||||
# [issue #119] 2. After forking, the parent closes the writing end
|
||||
# of the pipe and reads from the reading end.
|
||||
os.close(exec_err_pipe_write)
|
||||
exec_err_data = os.read(exec_err_pipe_read, 4096)
|
||||
os.close(exec_err_pipe_read)
|
||||
|
||||
# [issue #119] 6. The parent reads eof (a zero-length read) if the
|
||||
# child successfully performed exec, since close-on-exec made
|
||||
# successful exec close the writing end of the pipe. Or, if exec
|
||||
# failed, the parent reads the error code and can proceed
|
||||
# accordingly. Either way, the parent blocks until the child calls
|
||||
# exec.
|
||||
if len(exec_err_data) != 0:
|
||||
try:
|
||||
errclass, errno_s, errmsg = exec_err_data.split(b':', 2)
|
||||
exctype = getattr(builtins, errclass.decode('ascii'), Exception)
|
||||
|
||||
exception = exctype(errmsg.decode('utf-8', 'replace'))
|
||||
if exctype is OSError:
|
||||
exception.errno = int(errno_s)
|
||||
except:
|
||||
raise Exception('Subprocess failed, got bad error data: %r'
|
||||
% exec_err_data)
|
||||
else:
|
||||
raise exception
|
||||
|
||||
try:
|
||||
inst.setwinsize(*dimensions)
|
||||
except IOError as err:
|
||||
if err.args[0] not in (errno.EINVAL, errno.ENOTTY, errno.ENXIO):
|
||||
raise
|
||||
|
||||
return inst
|
||||
|
||||
def __repr__(self):
|
||||
clsname = type(self).__name__
|
||||
if self.argv is not None:
|
||||
args = [repr(self.argv)]
|
||||
if self.env is not None:
|
||||
args.append("env=%r" % self.env)
|
||||
if self.launch_dir is not None:
|
||||
args.append("cwd=%r" % self.launch_dir)
|
||||
|
||||
return "{}.spawn({})".format(clsname, ", ".join(args))
|
||||
|
||||
else:
|
||||
return "{}(pid={}, fd={})".format(clsname, self.pid, self.fd)
|
||||
|
||||
@staticmethod
|
||||
def _coerce_send_string(s):
|
||||
if not isinstance(s, bytes):
|
||||
return s.encode('utf-8')
|
||||
return s
|
||||
|
||||
@staticmethod
|
||||
def _coerce_read_string(s):
|
||||
return s
|
||||
|
||||
def __del__(self):
|
||||
'''This makes sure that no system resources are left open. Python only
|
||||
garbage collects Python objects. OS file descriptors are not Python
|
||||
objects, so they must be handled explicitly. If the child file
|
||||
descriptor was opened outside of this class (passed to the constructor)
|
||||
then this does not close it. '''
|
||||
|
||||
if not self.closed:
|
||||
# It is possible for __del__ methods to execute during the
|
||||
# teardown of the Python VM itself. Thus self.close() may
|
||||
# trigger an exception because os.close may be None.
|
||||
try:
|
||||
self.close()
|
||||
# which exception, shouldn't we catch explicitly .. ?
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
def fileno(self):
|
||||
'''This returns the file descriptor of the pty for the child.
|
||||
'''
|
||||
return self.fd
|
||||
|
||||
def close(self, force=True):
|
||||
'''This closes the connection with the child application. Note that
|
||||
calling close() more than once is valid. This emulates standard Python
|
||||
behavior with files. Set force to True if you want to make sure that
|
||||
the child is terminated (SIGKILL is sent if the child ignores SIGHUP
|
||||
and SIGINT). '''
|
||||
if not self.closed:
|
||||
self.flush()
|
||||
self.fileobj.close() # Closes the file descriptor
|
||||
# Give kernel time to update process status.
|
||||
time.sleep(self.delayafterclose)
|
||||
if self.isalive():
|
||||
if not self.terminate(force):
|
||||
raise PtyProcessError('Could not terminate the child.')
|
||||
self.fd = -1
|
||||
self.closed = True
|
||||
#self.pid = None
|
||||
|
||||
def flush(self):
|
||||
'''This does nothing. It is here to support the interface for a
|
||||
File-like object. '''
|
||||
|
||||
pass
|
||||
|
||||
def isatty(self):
|
||||
'''This returns True if the file descriptor is open and connected to a
|
||||
tty(-like) device, else False.
|
||||
|
||||
On SVR4-style platforms implementing streams, such as SunOS and HP-UX,
|
||||
the child pty may not appear as a terminal device. This means
|
||||
methods such as setecho(), setwinsize(), getwinsize() may raise an
|
||||
IOError. '''
|
||||
|
||||
return os.isatty(self.fd)
|
||||
|
||||
def waitnoecho(self, timeout=None):
|
||||
'''Wait until the terminal ECHO flag is set False.
|
||||
|
||||
This returns True if the echo mode is off, or False if echo was not
|
||||
disabled before the timeout. This can be used to detect when the
|
||||
child is waiting for a password. Usually a child application will turn
|
||||
off echo mode when it is waiting for the user to enter a password. For
|
||||
example, instead of expecting the "password:" prompt you can wait for
|
||||
the child to turn echo off::
|
||||
|
||||
p = pexpect.spawn('ssh user@example.com')
|
||||
p.waitnoecho()
|
||||
p.sendline(mypassword)
|
||||
|
||||
If ``timeout=None`` then this method to block until ECHO flag is False.
|
||||
'''
|
||||
|
||||
if timeout is not None:
|
||||
end_time = time.time() + timeout
|
||||
while True:
|
||||
if not self.getecho():
|
||||
return True
|
||||
if timeout < 0 and timeout is not None:
|
||||
return False
|
||||
if timeout is not None:
|
||||
timeout = end_time - time.time()
|
||||
time.sleep(0.1)
|
||||
|
||||
def getecho(self):
|
||||
'''Returns True if terminal echo is on, or False if echo is off.
|
||||
|
||||
Child applications that are expecting you to enter a password often
|
||||
disable echo. See also :meth:`waitnoecho`.
|
||||
|
||||
Not supported on platforms where ``isatty()`` returns False.
|
||||
'''
|
||||
|
||||
try:
|
||||
attr = termios.tcgetattr(self.fd)
|
||||
except termios.error as err:
|
||||
errmsg = 'getecho() may not be called on this platform'
|
||||
if err.args[0] == errno.EINVAL:
|
||||
raise IOError(err.args[0], '%s: %s.' % (err.args[1], errmsg))
|
||||
raise
|
||||
|
||||
self.echo = bool(attr[3] & termios.ECHO)
|
||||
return self.echo
|
||||
|
||||
def setecho(self, state):
|
||||
'''Enable or disable terminal echo.
|
||||
|
||||
Anything the child sent before the echo will be lost, so you should be
|
||||
sure that your input buffer is empty before you call setecho().
|
||||
For example, the following will work as expected::
|
||||
|
||||
p = pexpect.spawn('cat') # Echo is on by default.
|
||||
p.sendline('1234') # We expect see this twice from the child...
|
||||
p.expect(['1234']) # ... once from the tty echo...
|
||||
p.expect(['1234']) # ... and again from cat itself.
|
||||
p.setecho(False) # Turn off tty echo
|
||||
p.sendline('abcd') # We will set this only once (echoed by cat).
|
||||
p.sendline('wxyz') # We will set this only once (echoed by cat)
|
||||
p.expect(['abcd'])
|
||||
p.expect(['wxyz'])
|
||||
|
||||
The following WILL NOT WORK because the lines sent before the setecho
|
||||
will be lost::
|
||||
|
||||
p = pexpect.spawn('cat')
|
||||
p.sendline('1234')
|
||||
p.setecho(False) # Turn off tty echo
|
||||
p.sendline('abcd') # We will set this only once (echoed by cat).
|
||||
p.sendline('wxyz') # We will set this only once (echoed by cat)
|
||||
p.expect(['1234'])
|
||||
p.expect(['1234'])
|
||||
p.expect(['abcd'])
|
||||
p.expect(['wxyz'])
|
||||
|
||||
|
||||
Not supported on platforms where ``isatty()`` returns False.
|
||||
'''
|
||||
_setecho(self.fd, state)
|
||||
|
||||
self.echo = state
|
||||
|
||||
def read(self, size=1024):
|
||||
"""Read and return at most ``size`` bytes from the pty.
|
||||
|
||||
Can block if there is nothing to read. Raises :exc:`EOFError` if the
|
||||
terminal was closed.
|
||||
|
||||
Unlike Pexpect's ``read_nonblocking`` method, this doesn't try to deal
|
||||
with the vagaries of EOF on platforms that do strange things, like IRIX
|
||||
or older Solaris systems. It handles the errno=EIO pattern used on
|
||||
Linux, and the empty-string return used on BSD platforms and (seemingly)
|
||||
on recent Solaris.
|
||||
"""
|
||||
try:
|
||||
s = self.fileobj.read1(size)
|
||||
except (OSError, IOError) as err:
|
||||
if err.args[0] == errno.EIO:
|
||||
# Linux-style EOF
|
||||
self.flag_eof = True
|
||||
raise EOFError('End Of File (EOF). Exception style platform.')
|
||||
raise
|
||||
if s == b'':
|
||||
# BSD-style EOF (also appears to work on recent Solaris (OpenIndiana))
|
||||
self.flag_eof = True
|
||||
raise EOFError('End Of File (EOF). Empty string style platform.')
|
||||
|
||||
return s
|
||||
|
||||
def readline(self):
|
||||
"""Read one line from the pseudoterminal, and return it as unicode.
|
||||
|
||||
Can block if there is nothing to read. Raises :exc:`EOFError` if the
|
||||
terminal was closed.
|
||||
"""
|
||||
try:
|
||||
s = self.fileobj.readline()
|
||||
except (OSError, IOError) as err:
|
||||
if err.args[0] == errno.EIO:
|
||||
# Linux-style EOF
|
||||
self.flag_eof = True
|
||||
raise EOFError('End Of File (EOF). Exception style platform.')
|
||||
raise
|
||||
if s == b'':
|
||||
# BSD-style EOF (also appears to work on recent Solaris (OpenIndiana))
|
||||
self.flag_eof = True
|
||||
raise EOFError('End Of File (EOF). Empty string style platform.')
|
||||
|
||||
return s
|
||||
|
||||
def _writeb(self, b, flush=True):
|
||||
n = self.fileobj.write(b)
|
||||
if flush:
|
||||
self.fileobj.flush()
|
||||
return n
|
||||
|
||||
def write(self, s, flush=True):
|
||||
"""Write bytes to the pseudoterminal.
|
||||
|
||||
Returns the number of bytes written.
|
||||
"""
|
||||
return self._writeb(s, flush=flush)
|
||||
|
||||
def sendcontrol(self, char):
|
||||
'''Helper method for sending control characters to the terminal.
|
||||
|
||||
For example, to send Ctrl-G (ASCII 7, bell, ``'\\a'``)::
|
||||
|
||||
child.sendcontrol('g')
|
||||
|
||||
See also, :meth:`sendintr` and :meth:`sendeof`.
|
||||
'''
|
||||
char = char.lower()
|
||||
a = ord(char)
|
||||
if 97 <= a <= 122:
|
||||
a = a - ord('a') + 1
|
||||
byte = _byte(a)
|
||||
return self._writeb(byte), byte
|
||||
d = {'@': 0, '`': 0,
|
||||
'[': 27, '{': 27,
|
||||
'\\': 28, '|': 28,
|
||||
']': 29, '}': 29,
|
||||
'^': 30, '~': 30,
|
||||
'_': 31,
|
||||
'?': 127}
|
||||
if char not in d:
|
||||
return 0, b''
|
||||
|
||||
byte = _byte(d[char])
|
||||
return self._writeb(byte), byte
|
||||
|
||||
def sendeof(self):
|
||||
'''Sends an EOF (typically Ctrl-D) through the terminal.
|
||||
|
||||
This sends a character which causes
|
||||
the pending parent output buffer to be sent to the waiting child
|
||||
program without waiting for end-of-line. If it is the first character
|
||||
of the line, the read() in the user program returns 0, which signifies
|
||||
end-of-file. This means to work as expected a sendeof() has to be
|
||||
called at the beginning of a line. This method does not send a newline.
|
||||
It is the responsibility of the caller to ensure the eof is sent at the
|
||||
beginning of a line.
|
||||
'''
|
||||
return self._writeb(_EOF), _EOF
|
||||
|
||||
def sendintr(self):
|
||||
'''Send an interrupt character (typically Ctrl-C) through the terminal.
|
||||
|
||||
This will normally trigger the kernel to send SIGINT to the current
|
||||
foreground process group. Processes can turn off this translation, in
|
||||
which case they can read the raw data sent, e.g. ``b'\\x03'`` for Ctrl-C.
|
||||
|
||||
See also the :meth:`kill` method, which sends a signal directly to the
|
||||
immediate child process in the terminal (which is not necessarily the
|
||||
foreground process).
|
||||
'''
|
||||
return self._writeb(_INTR), _INTR
|
||||
|
||||
def eof(self):
|
||||
'''This returns True if the EOF exception was ever raised.
|
||||
'''
|
||||
|
||||
return self.flag_eof
|
||||
|
||||
def terminate(self, force=False):
|
||||
'''This forces a child process to terminate. It starts nicely with
|
||||
SIGHUP and SIGINT. If "force" is True then moves onto SIGKILL. This
|
||||
returns True if the child was terminated. This returns False if the
|
||||
child could not be terminated. '''
|
||||
|
||||
if not self.isalive():
|
||||
return True
|
||||
try:
|
||||
self.kill(signal.SIGHUP)
|
||||
time.sleep(self.delayafterterminate)
|
||||
if not self.isalive():
|
||||
return True
|
||||
self.kill(signal.SIGCONT)
|
||||
time.sleep(self.delayafterterminate)
|
||||
if not self.isalive():
|
||||
return True
|
||||
self.kill(signal.SIGINT)
|
||||
time.sleep(self.delayafterterminate)
|
||||
if not self.isalive():
|
||||
return True
|
||||
if force:
|
||||
self.kill(signal.SIGKILL)
|
||||
time.sleep(self.delayafterterminate)
|
||||
if not self.isalive():
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
return False
|
||||
except OSError:
|
||||
# I think there are kernel timing issues that sometimes cause
|
||||
# this to happen. I think isalive() reports True, but the
|
||||
# process is dead to the kernel.
|
||||
# Make one last attempt to see if the kernel is up to date.
|
||||
time.sleep(self.delayafterterminate)
|
||||
if not self.isalive():
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def wait(self):
|
||||
'''This waits until the child exits. This is a blocking call. This will
|
||||
not read any data from the child, so this will block forever if the
|
||||
child has unread output and has terminated. In other words, the child
|
||||
may have printed output then called exit(), but, the child is
|
||||
technically still alive until its output is read by the parent. '''
|
||||
|
||||
if self.isalive():
|
||||
pid, status = os.waitpid(self.pid, 0)
|
||||
else:
|
||||
return self.exitstatus
|
||||
self.exitstatus = os.WEXITSTATUS(status)
|
||||
if os.WIFEXITED(status):
|
||||
self.status = status
|
||||
self.exitstatus = os.WEXITSTATUS(status)
|
||||
self.signalstatus = None
|
||||
self.terminated = True
|
||||
elif os.WIFSIGNALED(status):
|
||||
self.status = status
|
||||
self.exitstatus = None
|
||||
self.signalstatus = os.WTERMSIG(status)
|
||||
self.terminated = True
|
||||
elif os.WIFSTOPPED(status): # pragma: no cover
|
||||
# You can't call wait() on a child process in the stopped state.
|
||||
raise PtyProcessError('Called wait() on a stopped child ' +
|
||||
'process. This is not supported. Is some other ' +
|
||||
'process attempting job control with our child pid?')
|
||||
return self.exitstatus
|
||||
|
||||
def isalive(self):
|
||||
'''This tests if the child process is running or not. This is
|
||||
non-blocking. If the child was terminated then this will read the
|
||||
exitstatus or signalstatus of the child. This returns True if the child
|
||||
process appears to be running or False if not. It can take literally
|
||||
SECONDS for Solaris to return the right status. '''
|
||||
|
||||
if self.terminated:
|
||||
return False
|
||||
|
||||
if self.flag_eof:
|
||||
# This is for Linux, which requires the blocking form
|
||||
# of waitpid to get the status of a defunct process.
|
||||
# This is super-lame. The flag_eof would have been set
|
||||
# in read_nonblocking(), so this should be safe.
|
||||
waitpid_options = 0
|
||||
else:
|
||||
waitpid_options = os.WNOHANG
|
||||
|
||||
try:
|
||||
pid, status = os.waitpid(self.pid, waitpid_options)
|
||||
except OSError as e:
|
||||
# No child processes
|
||||
if e.errno == errno.ECHILD:
|
||||
raise PtyProcessError('isalive() encountered condition ' +
|
||||
'where "terminated" is 0, but there was no child ' +
|
||||
'process. Did someone else call waitpid() ' +
|
||||
'on our process?')
|
||||
else:
|
||||
raise
|
||||
|
||||
# I have to do this twice for Solaris.
|
||||
# I can't even believe that I figured this out...
|
||||
# If waitpid() returns 0 it means that no child process
|
||||
# wishes to report, and the value of status is undefined.
|
||||
if pid == 0:
|
||||
try:
|
||||
### os.WNOHANG) # Solaris!
|
||||
pid, status = os.waitpid(self.pid, waitpid_options)
|
||||
except OSError as e: # pragma: no cover
|
||||
# This should never happen...
|
||||
if e.errno == errno.ECHILD:
|
||||
raise PtyProcessError('isalive() encountered condition ' +
|
||||
'that should never happen. There was no child ' +
|
||||
'process. Did someone else call waitpid() ' +
|
||||
'on our process?')
|
||||
else:
|
||||
raise
|
||||
|
||||
# If pid is still 0 after two calls to waitpid() then the process
|
||||
# really is alive. This seems to work on all platforms, except for
|
||||
# Irix which seems to require a blocking call on waitpid or select,
|
||||
# so I let read_nonblocking take care of this situation
|
||||
# (unfortunately, this requires waiting through the timeout).
|
||||
if pid == 0:
|
||||
return True
|
||||
|
||||
if pid == 0:
|
||||
return True
|
||||
|
||||
if os.WIFEXITED(status):
|
||||
self.status = status
|
||||
self.exitstatus = os.WEXITSTATUS(status)
|
||||
self.signalstatus = None
|
||||
self.terminated = True
|
||||
elif os.WIFSIGNALED(status):
|
||||
self.status = status
|
||||
self.exitstatus = None
|
||||
self.signalstatus = os.WTERMSIG(status)
|
||||
self.terminated = True
|
||||
elif os.WIFSTOPPED(status):
|
||||
raise PtyProcessError('isalive() encountered condition ' +
|
||||
'where child process is stopped. This is not ' +
|
||||
'supported. Is some other process attempting ' +
|
||||
'job control with our child pid?')
|
||||
return False
|
||||
|
||||
def kill(self, sig):
|
||||
"""Send the given signal to the child application.
|
||||
|
||||
In keeping with UNIX tradition it has a misleading name. It does not
|
||||
necessarily kill the child unless you send the right signal. See the
|
||||
:mod:`signal` module for constants representing signal numbers.
|
||||
"""
|
||||
|
||||
# Same as os.kill, but the pid is given for you.
|
||||
if self.isalive():
|
||||
os.kill(self.pid, sig)
|
||||
|
||||
def getwinsize(self):
|
||||
"""Return the window size of the pseudoterminal as a tuple (rows, cols).
|
||||
"""
|
||||
TIOCGWINSZ = getattr(termios, 'TIOCGWINSZ', 1074295912)
|
||||
s = struct.pack('HHHH', 0, 0, 0, 0)
|
||||
x = fcntl.ioctl(self.fd, TIOCGWINSZ, s)
|
||||
return struct.unpack('HHHH', x)[0:2]
|
||||
|
||||
def setwinsize(self, rows, cols):
|
||||
"""Set the terminal window size of the child tty.
|
||||
|
||||
This will cause a SIGWINCH signal to be sent to the child. This does not
|
||||
change the physical window size. It changes the size reported to
|
||||
TTY-aware applications like vi or curses -- applications that respond to
|
||||
the SIGWINCH signal.
|
||||
"""
|
||||
return _setwinsize(self.fd, rows, cols)
|
||||
|
||||
|
||||
class PtyProcessUnicode(PtyProcess):
|
||||
"""Unicode wrapper around a process running in a pseudoterminal.
|
||||
|
||||
This class exposes a similar interface to :class:`PtyProcess`, but its read
|
||||
methods return unicode, and its :meth:`write` accepts unicode.
|
||||
"""
|
||||
if PY3:
|
||||
string_type = str
|
||||
else:
|
||||
string_type = unicode # analysis:ignore
|
||||
|
||||
def __init__(self, pid, fd, encoding='utf-8', codec_errors='strict'):
|
||||
super(PtyProcessUnicode, self).__init__(pid, fd)
|
||||
self.encoding = encoding
|
||||
self.codec_errors = codec_errors
|
||||
self.decoder = codecs.getincrementaldecoder(encoding)(errors=codec_errors)
|
||||
|
||||
def read(self, size=1024):
|
||||
"""Read at most ``size`` bytes from the pty, return them as unicode.
|
||||
|
||||
Can block if there is nothing to read. Raises :exc:`EOFError` if the
|
||||
terminal was closed.
|
||||
|
||||
The size argument still refers to bytes, not unicode code points.
|
||||
"""
|
||||
b = super(PtyProcessUnicode, self).read(size)
|
||||
return self.decoder.decode(b, final=False)
|
||||
|
||||
def readline(self):
|
||||
"""Read one line from the pseudoterminal, and return it as unicode.
|
||||
|
||||
Can block if there is nothing to read. Raises :exc:`EOFError` if the
|
||||
terminal was closed.
|
||||
"""
|
||||
b = super(PtyProcessUnicode, self).readline()
|
||||
return self.decoder.decode(b, final=False)
|
||||
|
||||
def write(self, s):
|
||||
"""Write the unicode string ``s`` to the pseudoterminal.
|
||||
|
||||
Returns the number of bytes written.
|
||||
"""
|
||||
b = s.encode(self.encoding)
|
||||
return super(PtyProcessUnicode, self).write(b)
|
71
plugins/git_clone/pexpect/ptyprocess/util.py
Normal file
71
plugins/git_clone/pexpect/ptyprocess/util.py
Normal file
@ -0,0 +1,71 @@
|
||||
try:
|
||||
from shutil import which # Python >= 3.3
|
||||
except ImportError:
|
||||
import os, sys
|
||||
|
||||
# This is copied from Python 3.4.1
|
||||
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
|
||||
"""Given a command, mode, and a PATH string, return the path which
|
||||
conforms to the given mode on the PATH, or None if there is no such
|
||||
file.
|
||||
|
||||
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
|
||||
of os.environ.get("PATH"), or can be overridden with a custom search
|
||||
path.
|
||||
|
||||
"""
|
||||
# Check that a given file can be accessed with the correct mode.
|
||||
# Additionally check that `file` is not a directory, as on Windows
|
||||
# directories pass the os.access check.
|
||||
def _access_check(fn, mode):
|
||||
return (os.path.exists(fn) and os.access(fn, mode)
|
||||
and not os.path.isdir(fn))
|
||||
|
||||
# If we're given a path with a directory part, look it up directly rather
|
||||
# than referring to PATH directories. This includes checking relative to the
|
||||
# current directory, e.g. ./script
|
||||
if os.path.dirname(cmd):
|
||||
if _access_check(cmd, mode):
|
||||
return cmd
|
||||
return None
|
||||
|
||||
if path is None:
|
||||
path = os.environ.get("PATH", os.defpath)
|
||||
if not path:
|
||||
return None
|
||||
path = path.split(os.pathsep)
|
||||
|
||||
if sys.platform == "win32":
|
||||
# The current directory takes precedence on Windows.
|
||||
if not os.curdir in path:
|
||||
path.insert(0, os.curdir)
|
||||
|
||||
# PATHEXT is necessary to check on Windows.
|
||||
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
|
||||
# See if the given file matches any of the expected path extensions.
|
||||
# This will allow us to short circuit when given "python.exe".
|
||||
# If it does match, only test that one, otherwise we have to try
|
||||
# others.
|
||||
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
|
||||
files = [cmd]
|
||||
else:
|
||||
files = [cmd + ext for ext in pathext]
|
||||
else:
|
||||
# On other platforms you don't have things like PATHEXT to tell you
|
||||
# what file suffixes are executable, so just pass on cmd as-is.
|
||||
files = [cmd]
|
||||
|
||||
seen = set()
|
||||
for dir in path:
|
||||
normdir = os.path.normcase(dir)
|
||||
if not normdir in seen:
|
||||
seen.add(normdir)
|
||||
for thefile in files:
|
||||
name = os.path.join(dir, thefile)
|
||||
if _access_check(name, mode):
|
||||
return name
|
||||
return None
|
||||
|
||||
|
||||
class PtyProcessError(Exception):
|
||||
"""Generic error class for this package."""
|
537
plugins/git_clone/pexpect/pxssh.py
Normal file
537
plugins/git_clone/pexpect/pxssh.py
Normal file
@ -0,0 +1,537 @@
|
||||
'''This class extends pexpect.spawn to specialize setting up SSH connections.
|
||||
This adds methods for login, logout, and expecting the shell prompt.
|
||||
|
||||
PEXPECT LICENSE
|
||||
|
||||
This license is approved by the OSI and FSF as GPL-compatible.
|
||||
http://opensource.org/licenses/isc-license.txt
|
||||
|
||||
Copyright (c) 2012, Noah Spurrier <noah@noah.org>
|
||||
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
|
||||
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
|
||||
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
'''
|
||||
|
||||
from pexpect import ExceptionPexpect, TIMEOUT, EOF, spawn
|
||||
import time
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
|
||||
__all__ = ['ExceptionPxssh', 'pxssh']
|
||||
|
||||
# Exception classes used by this module.
|
||||
class ExceptionPxssh(ExceptionPexpect):
|
||||
'''Raised for pxssh exceptions.
|
||||
'''
|
||||
|
||||
if sys.version_info > (3, 0):
|
||||
from shlex import quote
|
||||
else:
|
||||
_find_unsafe = re.compile(r'[^\w@%+=:,./-]').search
|
||||
|
||||
def quote(s):
|
||||
"""Return a shell-escaped version of the string *s*."""
|
||||
if not s:
|
||||
return "''"
|
||||
if _find_unsafe(s) is None:
|
||||
return s
|
||||
|
||||
# use single quotes, and put single quotes into double quotes
|
||||
# the string $'b is then quoted as '$'"'"'b'
|
||||
return "'" + s.replace("'", "'\"'\"'") + "'"
|
||||
|
||||
class pxssh (spawn):
|
||||
'''This class extends pexpect.spawn to specialize setting up SSH
|
||||
connections. This adds methods for login, logout, and expecting the shell
|
||||
prompt. It does various tricky things to handle many situations in the SSH
|
||||
login process. For example, if the session is your first login, then pxssh
|
||||
automatically accepts the remote certificate; or if you have public key
|
||||
authentication setup then pxssh won't wait for the password prompt.
|
||||
|
||||
pxssh uses the shell prompt to synchronize output from the remote host. In
|
||||
order to make this more robust it sets the shell prompt to something more
|
||||
unique than just $ or #. This should work on most Borne/Bash or Csh style
|
||||
shells.
|
||||
|
||||
Example that runs a few commands on a remote server and prints the result::
|
||||
|
||||
from pexpect import pxssh
|
||||
import getpass
|
||||
try:
|
||||
s = pxssh.pxssh()
|
||||
hostname = raw_input('hostname: ')
|
||||
username = raw_input('username: ')
|
||||
password = getpass.getpass('password: ')
|
||||
s.login(hostname, username, password)
|
||||
s.sendline('uptime') # run a command
|
||||
s.prompt() # match the prompt
|
||||
print(s.before) # print everything before the prompt.
|
||||
s.sendline('ls -l')
|
||||
s.prompt()
|
||||
print(s.before)
|
||||
s.sendline('df')
|
||||
s.prompt()
|
||||
print(s.before)
|
||||
s.logout()
|
||||
except pxssh.ExceptionPxssh as e:
|
||||
print("pxssh failed on login.")
|
||||
print(e)
|
||||
|
||||
Example showing how to specify SSH options::
|
||||
|
||||
from pexpect import pxssh
|
||||
s = pxssh.pxssh(options={
|
||||
"StrictHostKeyChecking": "no",
|
||||
"UserKnownHostsFile": "/dev/null"})
|
||||
...
|
||||
|
||||
Note that if you have ssh-agent running while doing development with pxssh
|
||||
then this can lead to a lot of confusion. Many X display managers (xdm,
|
||||
gdm, kdm, etc.) will automatically start a GUI agent. You may see a GUI
|
||||
dialog box popup asking for a password during development. You should turn
|
||||
off any key agents during testing. The 'force_password' attribute will turn
|
||||
off public key authentication. This will only work if the remote SSH server
|
||||
is configured to allow password logins. Example of using 'force_password'
|
||||
attribute::
|
||||
|
||||
s = pxssh.pxssh()
|
||||
s.force_password = True
|
||||
hostname = raw_input('hostname: ')
|
||||
username = raw_input('username: ')
|
||||
password = getpass.getpass('password: ')
|
||||
s.login (hostname, username, password)
|
||||
|
||||
`debug_command_string` is only for the test suite to confirm that the string
|
||||
generated for SSH is correct, using this will not allow you to do
|
||||
anything other than get a string back from `pxssh.pxssh.login()`.
|
||||
'''
|
||||
|
||||
def __init__ (self, timeout=30, maxread=2000, searchwindowsize=None,
|
||||
logfile=None, cwd=None, env=None, ignore_sighup=True, echo=True,
|
||||
options={}, encoding=None, codec_errors='strict',
|
||||
debug_command_string=False, use_poll=False):
|
||||
|
||||
spawn.__init__(self, None, timeout=timeout, maxread=maxread,
|
||||
searchwindowsize=searchwindowsize, logfile=logfile,
|
||||
cwd=cwd, env=env, ignore_sighup=ignore_sighup, echo=echo,
|
||||
encoding=encoding, codec_errors=codec_errors, use_poll=use_poll)
|
||||
|
||||
self.name = '<pxssh>'
|
||||
|
||||
#SUBTLE HACK ALERT! Note that the command that SETS the prompt uses a
|
||||
#slightly different string than the regular expression to match it. This
|
||||
#is because when you set the prompt the command will echo back, but we
|
||||
#don't want to match the echoed command. So if we make the set command
|
||||
#slightly different than the regex we eliminate the problem. To make the
|
||||
#set command different we add a backslash in front of $. The $ doesn't
|
||||
#need to be escaped, but it doesn't hurt and serves to make the set
|
||||
#prompt command different than the regex.
|
||||
|
||||
# used to match the command-line prompt
|
||||
self.UNIQUE_PROMPT = r"\[PEXPECT\][\$\#] "
|
||||
self.PROMPT = self.UNIQUE_PROMPT
|
||||
|
||||
# used to set shell command-line prompt to UNIQUE_PROMPT.
|
||||
self.PROMPT_SET_SH = r"PS1='[PEXPECT]\$ '"
|
||||
self.PROMPT_SET_CSH = r"set prompt='[PEXPECT]\$ '"
|
||||
self.SSH_OPTS = ("-o'RSAAuthentication=no'"
|
||||
+ " -o 'PubkeyAuthentication=no'")
|
||||
# Disabling host key checking, makes you vulnerable to MITM attacks.
|
||||
# + " -o 'StrictHostKeyChecking=no'"
|
||||
# + " -o 'UserKnownHostsFile /dev/null' ")
|
||||
# Disabling X11 forwarding gets rid of the annoying SSH_ASKPASS from
|
||||
# displaying a GUI password dialog. I have not figured out how to
|
||||
# disable only SSH_ASKPASS without also disabling X11 forwarding.
|
||||
# Unsetting SSH_ASKPASS on the remote side doesn't disable it! Annoying!
|
||||
#self.SSH_OPTS = "-x -o'RSAAuthentication=no' -o 'PubkeyAuthentication=no'"
|
||||
self.force_password = False
|
||||
|
||||
self.debug_command_string = debug_command_string
|
||||
|
||||
# User defined SSH options, eg,
|
||||
# ssh.otions = dict(StrictHostKeyChecking="no",UserKnownHostsFile="/dev/null")
|
||||
self.options = options
|
||||
|
||||
def levenshtein_distance(self, a, b):
|
||||
'''This calculates the Levenshtein distance between a and b.
|
||||
'''
|
||||
|
||||
n, m = len(a), len(b)
|
||||
if n > m:
|
||||
a,b = b,a
|
||||
n,m = m,n
|
||||
current = range(n+1)
|
||||
for i in range(1,m+1):
|
||||
previous, current = current, [i]+[0]*n
|
||||
for j in range(1,n+1):
|
||||
add, delete = previous[j]+1, current[j-1]+1
|
||||
change = previous[j-1]
|
||||
if a[j-1] != b[i-1]:
|
||||
change = change + 1
|
||||
current[j] = min(add, delete, change)
|
||||
return current[n]
|
||||
|
||||
def try_read_prompt(self, timeout_multiplier):
|
||||
'''This facilitates using communication timeouts to perform
|
||||
synchronization as quickly as possible, while supporting high latency
|
||||
connections with a tunable worst case performance. Fast connections
|
||||
should be read almost immediately. Worst case performance for this
|
||||
method is timeout_multiplier * 3 seconds.
|
||||
'''
|
||||
|
||||
# maximum time allowed to read the first response
|
||||
first_char_timeout = timeout_multiplier * 0.5
|
||||
|
||||
# maximum time allowed between subsequent characters
|
||||
inter_char_timeout = timeout_multiplier * 0.1
|
||||
|
||||
# maximum time for reading the entire prompt
|
||||
total_timeout = timeout_multiplier * 3.0
|
||||
|
||||
prompt = self.string_type()
|
||||
begin = time.time()
|
||||
expired = 0.0
|
||||
timeout = first_char_timeout
|
||||
|
||||
while expired < total_timeout:
|
||||
try:
|
||||
prompt += self.read_nonblocking(size=1, timeout=timeout)
|
||||
expired = time.time() - begin # updated total time expired
|
||||
timeout = inter_char_timeout
|
||||
except TIMEOUT:
|
||||
break
|
||||
|
||||
return prompt
|
||||
|
||||
def sync_original_prompt (self, sync_multiplier=1.0):
|
||||
'''This attempts to find the prompt. Basically, press enter and record
|
||||
the response; press enter again and record the response; if the two
|
||||
responses are similar then assume we are at the original prompt.
|
||||
This can be a slow function. Worst case with the default sync_multiplier
|
||||
can take 12 seconds. Low latency connections are more likely to fail
|
||||
with a low sync_multiplier. Best case sync time gets worse with a
|
||||
high sync multiplier (500 ms with default). '''
|
||||
|
||||
# All of these timing pace values are magic.
|
||||
# I came up with these based on what seemed reliable for
|
||||
# connecting to a heavily loaded machine I have.
|
||||
self.sendline()
|
||||
time.sleep(0.1)
|
||||
|
||||
try:
|
||||
# Clear the buffer before getting the prompt.
|
||||
self.try_read_prompt(sync_multiplier)
|
||||
except TIMEOUT:
|
||||
pass
|
||||
|
||||
self.sendline()
|
||||
x = self.try_read_prompt(sync_multiplier)
|
||||
|
||||
self.sendline()
|
||||
a = self.try_read_prompt(sync_multiplier)
|
||||
|
||||
self.sendline()
|
||||
b = self.try_read_prompt(sync_multiplier)
|
||||
|
||||
ld = self.levenshtein_distance(a,b)
|
||||
len_a = len(a)
|
||||
if len_a == 0:
|
||||
return False
|
||||
if float(ld)/len_a < 0.4:
|
||||
return True
|
||||
return False
|
||||
|
||||
### TODO: This is getting messy and I'm pretty sure this isn't perfect.
|
||||
### TODO: I need to draw a flow chart for this.
|
||||
### TODO: Unit tests for SSH tunnels, remote SSH command exec, disabling original prompt sync
|
||||
def login (self, server, username=None, password='', terminal_type='ansi',
|
||||
original_prompt=r"[#$]", login_timeout=10, port=None,
|
||||
auto_prompt_reset=True, ssh_key=None, quiet=True,
|
||||
sync_multiplier=1, check_local_ip=True,
|
||||
password_regex=r'(?i)(?:password:)|(?:passphrase for key)',
|
||||
ssh_tunnels={}, spawn_local_ssh=True,
|
||||
sync_original_prompt=True, ssh_config=None, cmd='ssh'):
|
||||
'''This logs the user into the given server.
|
||||
|
||||
It uses 'original_prompt' to try to find the prompt right after login.
|
||||
When it finds the prompt it immediately tries to reset the prompt to
|
||||
something more easily matched. The default 'original_prompt' is very
|
||||
optimistic and is easily fooled. It's more reliable to try to match the original
|
||||
prompt as exactly as possible to prevent false matches by server
|
||||
strings such as the "Message Of The Day". On many systems you can
|
||||
disable the MOTD on the remote server by creating a zero-length file
|
||||
called :file:`~/.hushlogin` on the remote server. If a prompt cannot be found
|
||||
then this will not necessarily cause the login to fail. In the case of
|
||||
a timeout when looking for the prompt we assume that the original
|
||||
prompt was so weird that we could not match it, so we use a few tricks
|
||||
to guess when we have reached the prompt. Then we hope for the best and
|
||||
blindly try to reset the prompt to something more unique. If that fails
|
||||
then login() raises an :class:`ExceptionPxssh` exception.
|
||||
|
||||
In some situations it is not possible or desirable to reset the
|
||||
original prompt. In this case, pass ``auto_prompt_reset=False`` to
|
||||
inhibit setting the prompt to the UNIQUE_PROMPT. Remember that pxssh
|
||||
uses a unique prompt in the :meth:`prompt` method. If the original prompt is
|
||||
not reset then this will disable the :meth:`prompt` method unless you
|
||||
manually set the :attr:`PROMPT` attribute.
|
||||
|
||||
Set ``password_regex`` if there is a MOTD message with `password` in it.
|
||||
Changing this is like playing in traffic, don't (p)expect it to match straight
|
||||
away.
|
||||
|
||||
If you require to connect to another SSH server from the your original SSH
|
||||
connection set ``spawn_local_ssh`` to `False` and this will use your current
|
||||
session to do so. Setting this option to `False` and not having an active session
|
||||
will trigger an error.
|
||||
|
||||
Set ``ssh_key`` to a file path to an SSH private key to use that SSH key
|
||||
for the session authentication.
|
||||
Set ``ssh_key`` to `True` to force passing the current SSH authentication socket
|
||||
to the desired ``hostname``.
|
||||
|
||||
Set ``ssh_config`` to a file path string of an SSH client config file to pass that
|
||||
file to the client to handle itself. You may set any options you wish in here, however
|
||||
doing so will require you to post extra information that you may not want to if you
|
||||
run into issues.
|
||||
|
||||
Alter the ``cmd`` to change the ssh client used, or to prepend it with network
|
||||
namespaces. For example ```cmd="ip netns exec vlan2 ssh"``` to execute the ssh in
|
||||
network namespace named ```vlan```.
|
||||
'''
|
||||
|
||||
session_regex_array = ["(?i)are you sure you want to continue connecting", original_prompt, password_regex, "(?i)permission denied", "(?i)terminal type", TIMEOUT]
|
||||
session_init_regex_array = []
|
||||
session_init_regex_array.extend(session_regex_array)
|
||||
session_init_regex_array.extend(["(?i)connection closed by remote host", EOF])
|
||||
|
||||
ssh_options = ''.join([" -o '%s=%s'" % (o, v) for (o, v) in self.options.items()])
|
||||
if quiet:
|
||||
ssh_options = ssh_options + ' -q'
|
||||
if not check_local_ip:
|
||||
ssh_options = ssh_options + " -o'NoHostAuthenticationForLocalhost=yes'"
|
||||
if self.force_password:
|
||||
ssh_options = ssh_options + ' ' + self.SSH_OPTS
|
||||
if ssh_config is not None:
|
||||
if spawn_local_ssh and not os.path.isfile(ssh_config):
|
||||
raise ExceptionPxssh('SSH config does not exist or is not a file.')
|
||||
ssh_options = ssh_options + ' -F ' + ssh_config
|
||||
if port is not None:
|
||||
ssh_options = ssh_options + ' -p %s'%(str(port))
|
||||
if ssh_key is not None:
|
||||
# Allow forwarding our SSH key to the current session
|
||||
if ssh_key==True:
|
||||
ssh_options = ssh_options + ' -A'
|
||||
else:
|
||||
if spawn_local_ssh and not os.path.isfile(ssh_key):
|
||||
raise ExceptionPxssh('private ssh key does not exist or is not a file.')
|
||||
ssh_options = ssh_options + ' -i %s' % (ssh_key)
|
||||
|
||||
# SSH tunnels, make sure you know what you're putting into the lists
|
||||
# under each heading. Do not expect these to open 100% of the time,
|
||||
# The port you're requesting might be bound.
|
||||
#
|
||||
# The structure should be like this:
|
||||
# { 'local': ['2424:localhost:22'], # Local SSH tunnels
|
||||
# 'remote': ['2525:localhost:22'], # Remote SSH tunnels
|
||||
# 'dynamic': [8888] } # Dynamic/SOCKS tunnels
|
||||
if ssh_tunnels!={} and isinstance({},type(ssh_tunnels)):
|
||||
tunnel_types = {
|
||||
'local':'L',
|
||||
'remote':'R',
|
||||
'dynamic':'D'
|
||||
}
|
||||
for tunnel_type in tunnel_types:
|
||||
cmd_type = tunnel_types[tunnel_type]
|
||||
if tunnel_type in ssh_tunnels:
|
||||
tunnels = ssh_tunnels[tunnel_type]
|
||||
for tunnel in tunnels:
|
||||
if spawn_local_ssh==False:
|
||||
tunnel = quote(str(tunnel))
|
||||
ssh_options = ssh_options + ' -' + cmd_type + ' ' + str(tunnel)
|
||||
|
||||
if username is not None:
|
||||
ssh_options = ssh_options + ' -l ' + username
|
||||
elif ssh_config is None:
|
||||
raise TypeError('login() needs either a username or an ssh_config')
|
||||
else: # make sure ssh_config has an entry for the server with a username
|
||||
with open(ssh_config, 'rt') as f:
|
||||
lines = [l.strip() for l in f.readlines()]
|
||||
|
||||
server_regex = r'^Host\s+%s\s*$' % server
|
||||
user_regex = r'^User\s+\w+\s*$'
|
||||
config_has_server = False
|
||||
server_has_username = False
|
||||
for line in lines:
|
||||
if not config_has_server and re.match(server_regex, line, re.IGNORECASE):
|
||||
config_has_server = True
|
||||
elif config_has_server and 'hostname' in line.lower():
|
||||
pass
|
||||
elif config_has_server and 'host' in line.lower():
|
||||
server_has_username = False # insurance
|
||||
break # we have left the relevant section
|
||||
elif config_has_server and re.match(user_regex, line, re.IGNORECASE):
|
||||
server_has_username = True
|
||||
break
|
||||
|
||||
if lines:
|
||||
del line
|
||||
|
||||
del lines
|
||||
|
||||
if not config_has_server:
|
||||
raise TypeError('login() ssh_config has no Host entry for %s' % server)
|
||||
elif not server_has_username:
|
||||
raise TypeError('login() ssh_config has no user entry for %s' % server)
|
||||
|
||||
cmd += " %s %s" % (ssh_options, server)
|
||||
if self.debug_command_string:
|
||||
return(cmd)
|
||||
|
||||
# Are we asking for a local ssh command or to spawn one in another session?
|
||||
if spawn_local_ssh:
|
||||
spawn._spawn(self, cmd)
|
||||
else:
|
||||
self.sendline(cmd)
|
||||
|
||||
# This does not distinguish between a remote server 'password' prompt
|
||||
# and a local ssh 'passphrase' prompt (for unlocking a private key).
|
||||
i = self.expect(session_init_regex_array, timeout=login_timeout)
|
||||
|
||||
# First phase
|
||||
if i==0:
|
||||
# New certificate -- always accept it.
|
||||
# This is what you get if SSH does not have the remote host's
|
||||
# public key stored in the 'known_hosts' cache.
|
||||
self.sendline("yes")
|
||||
i = self.expect(session_regex_array)
|
||||
if i==2: # password or passphrase
|
||||
self.sendline(password)
|
||||
i = self.expect(session_regex_array)
|
||||
if i==4:
|
||||
self.sendline(terminal_type)
|
||||
i = self.expect(session_regex_array)
|
||||
if i==7:
|
||||
self.close()
|
||||
raise ExceptionPxssh('Could not establish connection to host')
|
||||
|
||||
# Second phase
|
||||
if i==0:
|
||||
# This is weird. This should not happen twice in a row.
|
||||
self.close()
|
||||
raise ExceptionPxssh('Weird error. Got "are you sure" prompt twice.')
|
||||
elif i==1: # can occur if you have a public key pair set to authenticate.
|
||||
### TODO: May NOT be OK if expect() got tricked and matched a false prompt.
|
||||
pass
|
||||
elif i==2: # password prompt again
|
||||
# For incorrect passwords, some ssh servers will
|
||||
# ask for the password again, others return 'denied' right away.
|
||||
# If we get the password prompt again then this means
|
||||
# we didn't get the password right the first time.
|
||||
self.close()
|
||||
raise ExceptionPxssh('password refused')
|
||||
elif i==3: # permission denied -- password was bad.
|
||||
self.close()
|
||||
raise ExceptionPxssh('permission denied')
|
||||
elif i==4: # terminal type again? WTF?
|
||||
self.close()
|
||||
raise ExceptionPxssh('Weird error. Got "terminal type" prompt twice.')
|
||||
elif i==5: # Timeout
|
||||
#This is tricky... I presume that we are at the command-line prompt.
|
||||
#It may be that the shell prompt was so weird that we couldn't match
|
||||
#it. Or it may be that we couldn't log in for some other reason. I
|
||||
#can't be sure, but it's safe to guess that we did login because if
|
||||
#I presume wrong and we are not logged in then this should be caught
|
||||
#later when I try to set the shell prompt.
|
||||
pass
|
||||
elif i==6: # Connection closed by remote host
|
||||
self.close()
|
||||
raise ExceptionPxssh('connection closed')
|
||||
else: # Unexpected
|
||||
self.close()
|
||||
raise ExceptionPxssh('unexpected login response')
|
||||
if sync_original_prompt:
|
||||
if not self.sync_original_prompt(sync_multiplier):
|
||||
self.close()
|
||||
raise ExceptionPxssh('could not synchronize with original prompt')
|
||||
# We appear to be in.
|
||||
# set shell prompt to something unique.
|
||||
if auto_prompt_reset:
|
||||
if not self.set_unique_prompt():
|
||||
self.close()
|
||||
raise ExceptionPxssh('could not set shell prompt '
|
||||
'(received: %r, expected: %r).' % (
|
||||
self.before, self.PROMPT,))
|
||||
return True
|
||||
|
||||
def logout (self):
|
||||
'''Sends exit to the remote shell.
|
||||
|
||||
If there are stopped jobs then this automatically sends exit twice.
|
||||
'''
|
||||
self.sendline("exit")
|
||||
index = self.expect([EOF, "(?i)there are stopped jobs"])
|
||||
if index==1:
|
||||
self.sendline("exit")
|
||||
self.expect(EOF)
|
||||
self.close()
|
||||
|
||||
def prompt(self, timeout=-1):
|
||||
'''Match the next shell prompt.
|
||||
|
||||
This is little more than a short-cut to the :meth:`~pexpect.spawn.expect`
|
||||
method. Note that if you called :meth:`login` with
|
||||
``auto_prompt_reset=False``, then before calling :meth:`prompt` you must
|
||||
set the :attr:`PROMPT` attribute to a regex that it will use for
|
||||
matching the prompt.
|
||||
|
||||
Calling :meth:`prompt` will erase the contents of the :attr:`before`
|
||||
attribute even if no prompt is ever matched. If timeout is not given or
|
||||
it is set to -1 then self.timeout is used.
|
||||
|
||||
:return: True if the shell prompt was matched, False if the timeout was
|
||||
reached.
|
||||
'''
|
||||
|
||||
if timeout == -1:
|
||||
timeout = self.timeout
|
||||
i = self.expect([self.PROMPT, TIMEOUT], timeout=timeout)
|
||||
if i==1:
|
||||
return False
|
||||
return True
|
||||
|
||||
def set_unique_prompt(self):
|
||||
'''This sets the remote prompt to something more unique than ``#`` or ``$``.
|
||||
This makes it easier for the :meth:`prompt` method to match the shell prompt
|
||||
unambiguously. This method is called automatically by the :meth:`login`
|
||||
method, but you may want to call it manually if you somehow reset the
|
||||
shell prompt. For example, if you 'su' to a different user then you
|
||||
will need to manually reset the prompt. This sends shell commands to
|
||||
the remote host to set the prompt, so this assumes the remote host is
|
||||
ready to receive commands.
|
||||
|
||||
Alternatively, you may use your own prompt pattern. In this case you
|
||||
should call :meth:`login` with ``auto_prompt_reset=False``; then set the
|
||||
:attr:`PROMPT` attribute to a regular expression. After that, the
|
||||
:meth:`prompt` method will try to match your prompt pattern.
|
||||
'''
|
||||
|
||||
self.sendline("unset PROMPT_COMMAND")
|
||||
self.sendline(self.PROMPT_SET_SH) # sh-style
|
||||
i = self.expect ([TIMEOUT, self.PROMPT], timeout=10)
|
||||
if i == 0: # csh-style
|
||||
self.sendline(self.PROMPT_SET_CSH)
|
||||
i = self.expect([TIMEOUT, self.PROMPT], timeout=10)
|
||||
if i == 0:
|
||||
return False
|
||||
return True
|
||||
|
||||
# vi:ts=4:sw=4:expandtab:ft=python:
|
130
plugins/git_clone/pexpect/replwrap.py
Normal file
130
plugins/git_clone/pexpect/replwrap.py
Normal file
@ -0,0 +1,130 @@
|
||||
"""Generic wrapper for read-eval-print-loops, a.k.a. interactive shells
|
||||
"""
|
||||
import os.path
|
||||
import signal
|
||||
import sys
|
||||
|
||||
import pexpect
|
||||
|
||||
PY3 = (sys.version_info[0] >= 3)
|
||||
|
||||
if PY3:
|
||||
basestring = str
|
||||
|
||||
PEXPECT_PROMPT = u'[PEXPECT_PROMPT>'
|
||||
PEXPECT_CONTINUATION_PROMPT = u'[PEXPECT_PROMPT+'
|
||||
|
||||
class REPLWrapper(object):
|
||||
"""Wrapper for a REPL.
|
||||
|
||||
:param cmd_or_spawn: This can either be an instance of :class:`pexpect.spawn`
|
||||
in which a REPL has already been started, or a str command to start a new
|
||||
REPL process.
|
||||
:param str orig_prompt: The prompt to expect at first.
|
||||
:param str prompt_change: A command to change the prompt to something more
|
||||
unique. If this is ``None``, the prompt will not be changed. This will
|
||||
be formatted with the new and continuation prompts as positional
|
||||
parameters, so you can use ``{}`` style formatting to insert them into
|
||||
the command.
|
||||
:param str new_prompt: The more unique prompt to expect after the change.
|
||||
:param str extra_init_cmd: Commands to do extra initialisation, such as
|
||||
disabling pagers.
|
||||
"""
|
||||
def __init__(self, cmd_or_spawn, orig_prompt, prompt_change,
|
||||
new_prompt=PEXPECT_PROMPT,
|
||||
continuation_prompt=PEXPECT_CONTINUATION_PROMPT,
|
||||
extra_init_cmd=None):
|
||||
if isinstance(cmd_or_spawn, basestring):
|
||||
self.child = pexpect.spawn(cmd_or_spawn, echo=False, encoding='utf-8')
|
||||
else:
|
||||
self.child = cmd_or_spawn
|
||||
if self.child.echo:
|
||||
# Existing spawn instance has echo enabled, disable it
|
||||
# to prevent our input from being repeated to output.
|
||||
self.child.setecho(False)
|
||||
self.child.waitnoecho()
|
||||
|
||||
if prompt_change is None:
|
||||
self.prompt = orig_prompt
|
||||
else:
|
||||
self.set_prompt(orig_prompt,
|
||||
prompt_change.format(new_prompt, continuation_prompt))
|
||||
self.prompt = new_prompt
|
||||
self.continuation_prompt = continuation_prompt
|
||||
|
||||
self._expect_prompt()
|
||||
|
||||
if extra_init_cmd is not None:
|
||||
self.run_command(extra_init_cmd)
|
||||
|
||||
def set_prompt(self, orig_prompt, prompt_change):
|
||||
self.child.expect(orig_prompt)
|
||||
self.child.sendline(prompt_change)
|
||||
|
||||
def _expect_prompt(self, timeout=-1, async_=False):
|
||||
return self.child.expect_exact([self.prompt, self.continuation_prompt],
|
||||
timeout=timeout, async_=async_)
|
||||
|
||||
def run_command(self, command, timeout=-1, async_=False):
|
||||
"""Send a command to the REPL, wait for and return output.
|
||||
|
||||
:param str command: The command to send. Trailing newlines are not needed.
|
||||
This should be a complete block of input that will trigger execution;
|
||||
if a continuation prompt is found after sending input, :exc:`ValueError`
|
||||
will be raised.
|
||||
:param int timeout: How long to wait for the next prompt. -1 means the
|
||||
default from the :class:`pexpect.spawn` object (default 30 seconds).
|
||||
None means to wait indefinitely.
|
||||
:param bool async_: On Python 3.4, or Python 3.3 with asyncio
|
||||
installed, passing ``async_=True`` will make this return an
|
||||
:mod:`asyncio` Future, which you can yield from to get the same
|
||||
result that this method would normally give directly.
|
||||
"""
|
||||
# Split up multiline commands and feed them in bit-by-bit
|
||||
cmdlines = command.splitlines()
|
||||
# splitlines ignores trailing newlines - add it back in manually
|
||||
if command.endswith('\n'):
|
||||
cmdlines.append('')
|
||||
if not cmdlines:
|
||||
raise ValueError("No command was given")
|
||||
|
||||
if async_:
|
||||
from ._async import repl_run_command_async
|
||||
return repl_run_command_async(self, cmdlines, timeout)
|
||||
|
||||
res = []
|
||||
self.child.sendline(cmdlines[0])
|
||||
for line in cmdlines[1:]:
|
||||
self._expect_prompt(timeout=timeout)
|
||||
res.append(self.child.before)
|
||||
self.child.sendline(line)
|
||||
|
||||
# Command was fully submitted, now wait for the next prompt
|
||||
if self._expect_prompt(timeout=timeout) == 1:
|
||||
# We got the continuation prompt - command was incomplete
|
||||
self.child.kill(signal.SIGINT)
|
||||
self._expect_prompt(timeout=1)
|
||||
raise ValueError("Continuation prompt found - input was incomplete:\n"
|
||||
+ command)
|
||||
return u''.join(res + [self.child.before])
|
||||
|
||||
def python(command="python"):
|
||||
"""Start a Python shell and return a :class:`REPLWrapper` object."""
|
||||
return REPLWrapper(command, u">>> ", u"import sys; sys.ps1={0!r}; sys.ps2={1!r}")
|
||||
|
||||
def bash(command="bash"):
|
||||
"""Start a bash shell and return a :class:`REPLWrapper` object."""
|
||||
bashrc = os.path.join(os.path.dirname(__file__), 'bashrc.sh')
|
||||
child = pexpect.spawn(command, ['--rcfile', bashrc], echo=False,
|
||||
encoding='utf-8')
|
||||
|
||||
# If the user runs 'env', the value of PS1 will be in the output. To avoid
|
||||
# replwrap seeing that as the next prompt, we'll embed the marker characters
|
||||
# for invisible characters in the prompt; these show up when inspecting the
|
||||
# environment variable, but not when bash displays the prompt.
|
||||
ps1 = PEXPECT_PROMPT[:5] + u'\\[\\]' + PEXPECT_PROMPT[5:]
|
||||
ps2 = PEXPECT_CONTINUATION_PROMPT[:5] + u'\\[\\]' + PEXPECT_CONTINUATION_PROMPT[5:]
|
||||
prompt_change = u"PS1='{0}' PS2='{1}' PROMPT_COMMAND=''".format(ps1, ps2)
|
||||
|
||||
return REPLWrapper(child, u'\\$', prompt_change,
|
||||
extra_init_cmd="export PAGER=cat")
|
157
plugins/git_clone/pexpect/run.py
Normal file
157
plugins/git_clone/pexpect/run.py
Normal file
@ -0,0 +1,157 @@
|
||||
import sys
|
||||
import types
|
||||
|
||||
from .exceptions import EOF, TIMEOUT
|
||||
from .pty_spawn import spawn
|
||||
|
||||
def run(command, timeout=30, withexitstatus=False, events=None,
|
||||
extra_args=None, logfile=None, cwd=None, env=None, **kwargs):
|
||||
|
||||
'''
|
||||
This function runs the given command; waits for it to finish; then
|
||||
returns all output as a string. STDERR is included in output. If the full
|
||||
path to the command is not given then the path is searched.
|
||||
|
||||
Note that lines are terminated by CR/LF (\\r\\n) combination even on
|
||||
UNIX-like systems because this is the standard for pseudottys. If you set
|
||||
'withexitstatus' to true, then run will return a tuple of (command_output,
|
||||
exitstatus). If 'withexitstatus' is false then this returns just
|
||||
command_output.
|
||||
|
||||
The run() function can often be used instead of creating a spawn instance.
|
||||
For example, the following code uses spawn::
|
||||
|
||||
from pexpect import *
|
||||
child = spawn('scp foo user@example.com:.')
|
||||
child.expect('(?i)password')
|
||||
child.sendline(mypassword)
|
||||
|
||||
The previous code can be replace with the following::
|
||||
|
||||
from pexpect import *
|
||||
run('scp foo user@example.com:.', events={'(?i)password': mypassword})
|
||||
|
||||
**Examples**
|
||||
|
||||
Start the apache daemon on the local machine::
|
||||
|
||||
from pexpect import *
|
||||
run("/usr/local/apache/bin/apachectl start")
|
||||
|
||||
Check in a file using SVN::
|
||||
|
||||
from pexpect import *
|
||||
run("svn ci -m 'automatic commit' my_file.py")
|
||||
|
||||
Run a command and capture exit status::
|
||||
|
||||
from pexpect import *
|
||||
(command_output, exitstatus) = run('ls -l /bin', withexitstatus=1)
|
||||
|
||||
The following will run SSH and execute 'ls -l' on the remote machine. The
|
||||
password 'secret' will be sent if the '(?i)password' pattern is ever seen::
|
||||
|
||||
run("ssh username@machine.example.com 'ls -l'",
|
||||
events={'(?i)password':'secret\\n'})
|
||||
|
||||
This will start mencoder to rip a video from DVD. This will also display
|
||||
progress ticks every 5 seconds as it runs. For example::
|
||||
|
||||
from pexpect import *
|
||||
def print_ticks(d):
|
||||
print d['event_count'],
|
||||
run("mencoder dvd://1 -o video.avi -oac copy -ovc copy",
|
||||
events={TIMEOUT:print_ticks}, timeout=5)
|
||||
|
||||
The 'events' argument should be either a dictionary or a tuple list that
|
||||
contains patterns and responses. Whenever one of the patterns is seen
|
||||
in the command output, run() will send the associated response string.
|
||||
So, run() in the above example can be also written as:
|
||||
|
||||
run("mencoder dvd://1 -o video.avi -oac copy -ovc copy",
|
||||
events=[(TIMEOUT,print_ticks)], timeout=5)
|
||||
|
||||
Use a tuple list for events if the command output requires a delicate
|
||||
control over what pattern should be matched, since the tuple list is passed
|
||||
to pexpect() as its pattern list, with the order of patterns preserved.
|
||||
|
||||
Note that you should put newlines in your string if Enter is necessary.
|
||||
|
||||
Like the example above, the responses may also contain a callback, either
|
||||
a function or method. It should accept a dictionary value as an argument.
|
||||
The dictionary contains all the locals from the run() function, so you can
|
||||
access the child spawn object or any other variable defined in run()
|
||||
(event_count, child, and extra_args are the most useful). A callback may
|
||||
return True to stop the current run process. Otherwise run() continues
|
||||
until the next event. A callback may also return a string which will be
|
||||
sent to the child. 'extra_args' is not used by directly run(). It provides
|
||||
a way to pass data to a callback function through run() through the locals
|
||||
dictionary passed to a callback.
|
||||
|
||||
Like :class:`spawn`, passing *encoding* will make it work with unicode
|
||||
instead of bytes. You can pass *codec_errors* to control how errors in
|
||||
encoding and decoding are handled.
|
||||
'''
|
||||
if timeout == -1:
|
||||
child = spawn(command, maxread=2000, logfile=logfile, cwd=cwd, env=env,
|
||||
**kwargs)
|
||||
else:
|
||||
child = spawn(command, timeout=timeout, maxread=2000, logfile=logfile,
|
||||
cwd=cwd, env=env, **kwargs)
|
||||
if isinstance(events, list):
|
||||
patterns= [x for x,y in events]
|
||||
responses = [y for x,y in events]
|
||||
elif isinstance(events, dict):
|
||||
patterns = list(events.keys())
|
||||
responses = list(events.values())
|
||||
else:
|
||||
# This assumes EOF or TIMEOUT will eventually cause run to terminate.
|
||||
patterns = None
|
||||
responses = None
|
||||
child_result_list = []
|
||||
event_count = 0
|
||||
while True:
|
||||
try:
|
||||
index = child.expect(patterns)
|
||||
if isinstance(child.after, child.allowed_string_types):
|
||||
child_result_list.append(child.before + child.after)
|
||||
else:
|
||||
# child.after may have been a TIMEOUT or EOF,
|
||||
# which we don't want appended to the list.
|
||||
child_result_list.append(child.before)
|
||||
if isinstance(responses[index], child.allowed_string_types):
|
||||
child.send(responses[index])
|
||||
elif (isinstance(responses[index], types.FunctionType) or
|
||||
isinstance(responses[index], types.MethodType)):
|
||||
callback_result = responses[index](locals())
|
||||
sys.stdout.flush()
|
||||
if isinstance(callback_result, child.allowed_string_types):
|
||||
child.send(callback_result)
|
||||
elif callback_result:
|
||||
break
|
||||
else:
|
||||
raise TypeError("parameter `event' at index {index} must be "
|
||||
"a string, method, or function: {value!r}"
|
||||
.format(index=index, value=responses[index]))
|
||||
event_count = event_count + 1
|
||||
except TIMEOUT:
|
||||
child_result_list.append(child.before)
|
||||
break
|
||||
except EOF:
|
||||
child_result_list.append(child.before)
|
||||
break
|
||||
child_result = child.string_type().join(child_result_list)
|
||||
if withexitstatus:
|
||||
child.close()
|
||||
return (child_result, child.exitstatus)
|
||||
else:
|
||||
return child_result
|
||||
|
||||
def runu(command, timeout=30, withexitstatus=False, events=None,
|
||||
extra_args=None, logfile=None, cwd=None, env=None, **kwargs):
|
||||
"""Deprecated: pass encoding to run() instead.
|
||||
"""
|
||||
kwargs.setdefault('encoding', 'utf-8')
|
||||
return run(command, timeout=timeout, withexitstatus=withexitstatus,
|
||||
events=events, extra_args=extra_args, logfile=logfile, cwd=cwd,
|
||||
env=env, **kwargs)
|
431
plugins/git_clone/pexpect/screen.py
Normal file
431
plugins/git_clone/pexpect/screen.py
Normal file
@ -0,0 +1,431 @@
|
||||
'''This implements a virtual screen. This is used to support ANSI terminal
|
||||
emulation. The screen representation and state is implemented in this class.
|
||||
Most of the methods are inspired by ANSI screen control codes. The
|
||||
:class:`~pexpect.ANSI.ANSI` class extends this class to add parsing of ANSI
|
||||
escape codes.
|
||||
|
||||
PEXPECT LICENSE
|
||||
|
||||
This license is approved by the OSI and FSF as GPL-compatible.
|
||||
http://opensource.org/licenses/isc-license.txt
|
||||
|
||||
Copyright (c) 2012, Noah Spurrier <noah@noah.org>
|
||||
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
|
||||
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
|
||||
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
'''
|
||||
|
||||
import codecs
|
||||
import copy
|
||||
import sys
|
||||
|
||||
import warnings
|
||||
|
||||
warnings.warn(("pexpect.screen and pexpect.ANSI are deprecated. "
|
||||
"We recommend using pyte to emulate a terminal screen: "
|
||||
"https://pypi.python.org/pypi/pyte"),
|
||||
stacklevel=2)
|
||||
|
||||
NUL = 0 # Fill character; ignored on input.
|
||||
ENQ = 5 # Transmit answerback message.
|
||||
BEL = 7 # Ring the bell.
|
||||
BS = 8 # Move cursor left.
|
||||
HT = 9 # Move cursor to next tab stop.
|
||||
LF = 10 # Line feed.
|
||||
VT = 11 # Same as LF.
|
||||
FF = 12 # Same as LF.
|
||||
CR = 13 # Move cursor to left margin or newline.
|
||||
SO = 14 # Invoke G1 character set.
|
||||
SI = 15 # Invoke G0 character set.
|
||||
XON = 17 # Resume transmission.
|
||||
XOFF = 19 # Halt transmission.
|
||||
CAN = 24 # Cancel escape sequence.
|
||||
SUB = 26 # Same as CAN.
|
||||
ESC = 27 # Introduce a control sequence.
|
||||
DEL = 127 # Fill character; ignored on input.
|
||||
SPACE = u' ' # Space or blank character.
|
||||
|
||||
PY3 = (sys.version_info[0] >= 3)
|
||||
if PY3:
|
||||
unicode = str
|
||||
|
||||
def constrain (n, min, max):
|
||||
|
||||
'''This returns a number, n constrained to the min and max bounds. '''
|
||||
|
||||
if n < min:
|
||||
return min
|
||||
if n > max:
|
||||
return max
|
||||
return n
|
||||
|
||||
class screen:
|
||||
'''This object maintains the state of a virtual text screen as a
|
||||
rectangular array. This maintains a virtual cursor position and handles
|
||||
scrolling as characters are added. This supports most of the methods needed
|
||||
by an ANSI text screen. Row and column indexes are 1-based (not zero-based,
|
||||
like arrays).
|
||||
|
||||
Characters are represented internally using unicode. Methods that accept
|
||||
input characters, when passed 'bytes' (which in Python 2 is equivalent to
|
||||
'str'), convert them from the encoding specified in the 'encoding'
|
||||
parameter to the constructor. Methods that return screen contents return
|
||||
unicode strings, with the exception of __str__() under Python 2. Passing
|
||||
``encoding=None`` limits the API to only accept unicode input, so passing
|
||||
bytes in will raise :exc:`TypeError`.
|
||||
'''
|
||||
def __init__(self, r=24, c=80, encoding='latin-1', encoding_errors='replace'):
|
||||
'''This initializes a blank screen of the given dimensions.'''
|
||||
|
||||
self.rows = r
|
||||
self.cols = c
|
||||
self.encoding = encoding
|
||||
self.encoding_errors = encoding_errors
|
||||
if encoding is not None:
|
||||
self.decoder = codecs.getincrementaldecoder(encoding)(encoding_errors)
|
||||
else:
|
||||
self.decoder = None
|
||||
self.cur_r = 1
|
||||
self.cur_c = 1
|
||||
self.cur_saved_r = 1
|
||||
self.cur_saved_c = 1
|
||||
self.scroll_row_start = 1
|
||||
self.scroll_row_end = self.rows
|
||||
self.w = [ [SPACE] * self.cols for _ in range(self.rows)]
|
||||
|
||||
def _decode(self, s):
|
||||
'''This converts from the external coding system (as passed to
|
||||
the constructor) to the internal one (unicode). '''
|
||||
if self.decoder is not None:
|
||||
return self.decoder.decode(s)
|
||||
else:
|
||||
raise TypeError("This screen was constructed with encoding=None, "
|
||||
"so it does not handle bytes.")
|
||||
|
||||
def _unicode(self):
|
||||
'''This returns a printable representation of the screen as a unicode
|
||||
string (which, under Python 3.x, is the same as 'str'). The end of each
|
||||
screen line is terminated by a newline.'''
|
||||
|
||||
return u'\n'.join ([ u''.join(c) for c in self.w ])
|
||||
|
||||
if PY3:
|
||||
__str__ = _unicode
|
||||
else:
|
||||
__unicode__ = _unicode
|
||||
|
||||
def __str__(self):
|
||||
'''This returns a printable representation of the screen. The end of
|
||||
each screen line is terminated by a newline. '''
|
||||
encoding = self.encoding or 'ascii'
|
||||
return self._unicode().encode(encoding, 'replace')
|
||||
|
||||
def dump (self):
|
||||
'''This returns a copy of the screen as a unicode string. This is similar to
|
||||
__str__/__unicode__ except that lines are not terminated with line
|
||||
feeds.'''
|
||||
|
||||
return u''.join ([ u''.join(c) for c in self.w ])
|
||||
|
||||
def pretty (self):
|
||||
'''This returns a copy of the screen as a unicode string with an ASCII
|
||||
text box around the screen border. This is similar to
|
||||
__str__/__unicode__ except that it adds a box.'''
|
||||
|
||||
top_bot = u'+' + u'-'*self.cols + u'+\n'
|
||||
return top_bot + u'\n'.join([u'|'+line+u'|' for line in unicode(self).split(u'\n')]) + u'\n' + top_bot
|
||||
|
||||
def fill (self, ch=SPACE):
|
||||
|
||||
if isinstance(ch, bytes):
|
||||
ch = self._decode(ch)
|
||||
|
||||
self.fill_region (1,1,self.rows,self.cols, ch)
|
||||
|
||||
def fill_region (self, rs,cs, re,ce, ch=SPACE):
|
||||
|
||||
if isinstance(ch, bytes):
|
||||
ch = self._decode(ch)
|
||||
|
||||
rs = constrain (rs, 1, self.rows)
|
||||
re = constrain (re, 1, self.rows)
|
||||
cs = constrain (cs, 1, self.cols)
|
||||
ce = constrain (ce, 1, self.cols)
|
||||
if rs > re:
|
||||
rs, re = re, rs
|
||||
if cs > ce:
|
||||
cs, ce = ce, cs
|
||||
for r in range (rs, re+1):
|
||||
for c in range (cs, ce + 1):
|
||||
self.put_abs (r,c,ch)
|
||||
|
||||
def cr (self):
|
||||
'''This moves the cursor to the beginning (col 1) of the current row.
|
||||
'''
|
||||
|
||||
self.cursor_home (self.cur_r, 1)
|
||||
|
||||
def lf (self):
|
||||
'''This moves the cursor down with scrolling.
|
||||
'''
|
||||
|
||||
old_r = self.cur_r
|
||||
self.cursor_down()
|
||||
if old_r == self.cur_r:
|
||||
self.scroll_up ()
|
||||
self.erase_line()
|
||||
|
||||
def crlf (self):
|
||||
'''This advances the cursor with CRLF properties.
|
||||
The cursor will line wrap and the screen may scroll.
|
||||
'''
|
||||
|
||||
self.cr ()
|
||||
self.lf ()
|
||||
|
||||
def newline (self):
|
||||
'''This is an alias for crlf().
|
||||
'''
|
||||
|
||||
self.crlf()
|
||||
|
||||
def put_abs (self, r, c, ch):
|
||||
'''Screen array starts at 1 index.'''
|
||||
|
||||
r = constrain (r, 1, self.rows)
|
||||
c = constrain (c, 1, self.cols)
|
||||
if isinstance(ch, bytes):
|
||||
ch = self._decode(ch)[0]
|
||||
else:
|
||||
ch = ch[0]
|
||||
self.w[r-1][c-1] = ch
|
||||
|
||||
def put (self, ch):
|
||||
'''This puts a characters at the current cursor position.
|
||||
'''
|
||||
|
||||
if isinstance(ch, bytes):
|
||||
ch = self._decode(ch)
|
||||
|
||||
self.put_abs (self.cur_r, self.cur_c, ch)
|
||||
|
||||
def insert_abs (self, r, c, ch):
|
||||
'''This inserts a character at (r,c). Everything under
|
||||
and to the right is shifted right one character.
|
||||
The last character of the line is lost.
|
||||
'''
|
||||
|
||||
if isinstance(ch, bytes):
|
||||
ch = self._decode(ch)
|
||||
|
||||
r = constrain (r, 1, self.rows)
|
||||
c = constrain (c, 1, self.cols)
|
||||
for ci in range (self.cols, c, -1):
|
||||
self.put_abs (r,ci, self.get_abs(r,ci-1))
|
||||
self.put_abs (r,c,ch)
|
||||
|
||||
def insert (self, ch):
|
||||
|
||||
if isinstance(ch, bytes):
|
||||
ch = self._decode(ch)
|
||||
|
||||
self.insert_abs (self.cur_r, self.cur_c, ch)
|
||||
|
||||
def get_abs (self, r, c):
|
||||
|
||||
r = constrain (r, 1, self.rows)
|
||||
c = constrain (c, 1, self.cols)
|
||||
return self.w[r-1][c-1]
|
||||
|
||||
def get (self):
|
||||
|
||||
self.get_abs (self.cur_r, self.cur_c)
|
||||
|
||||
def get_region (self, rs,cs, re,ce):
|
||||
'''This returns a list of lines representing the region.
|
||||
'''
|
||||
|
||||
rs = constrain (rs, 1, self.rows)
|
||||
re = constrain (re, 1, self.rows)
|
||||
cs = constrain (cs, 1, self.cols)
|
||||
ce = constrain (ce, 1, self.cols)
|
||||
if rs > re:
|
||||
rs, re = re, rs
|
||||
if cs > ce:
|
||||
cs, ce = ce, cs
|
||||
sc = []
|
||||
for r in range (rs, re+1):
|
||||
line = u''
|
||||
for c in range (cs, ce + 1):
|
||||
ch = self.get_abs (r,c)
|
||||
line = line + ch
|
||||
sc.append (line)
|
||||
return sc
|
||||
|
||||
def cursor_constrain (self):
|
||||
'''This keeps the cursor within the screen area.
|
||||
'''
|
||||
|
||||
self.cur_r = constrain (self.cur_r, 1, self.rows)
|
||||
self.cur_c = constrain (self.cur_c, 1, self.cols)
|
||||
|
||||
def cursor_home (self, r=1, c=1): # <ESC>[{ROW};{COLUMN}H
|
||||
|
||||
self.cur_r = r
|
||||
self.cur_c = c
|
||||
self.cursor_constrain ()
|
||||
|
||||
def cursor_back (self,count=1): # <ESC>[{COUNT}D (not confused with down)
|
||||
|
||||
self.cur_c = self.cur_c - count
|
||||
self.cursor_constrain ()
|
||||
|
||||
def cursor_down (self,count=1): # <ESC>[{COUNT}B (not confused with back)
|
||||
|
||||
self.cur_r = self.cur_r + count
|
||||
self.cursor_constrain ()
|
||||
|
||||
def cursor_forward (self,count=1): # <ESC>[{COUNT}C
|
||||
|
||||
self.cur_c = self.cur_c + count
|
||||
self.cursor_constrain ()
|
||||
|
||||
def cursor_up (self,count=1): # <ESC>[{COUNT}A
|
||||
|
||||
self.cur_r = self.cur_r - count
|
||||
self.cursor_constrain ()
|
||||
|
||||
def cursor_up_reverse (self): # <ESC> M (called RI -- Reverse Index)
|
||||
|
||||
old_r = self.cur_r
|
||||
self.cursor_up()
|
||||
if old_r == self.cur_r:
|
||||
self.scroll_up()
|
||||
|
||||
def cursor_force_position (self, r, c): # <ESC>[{ROW};{COLUMN}f
|
||||
'''Identical to Cursor Home.'''
|
||||
|
||||
self.cursor_home (r, c)
|
||||
|
||||
def cursor_save (self): # <ESC>[s
|
||||
'''Save current cursor position.'''
|
||||
|
||||
self.cursor_save_attrs()
|
||||
|
||||
def cursor_unsave (self): # <ESC>[u
|
||||
'''Restores cursor position after a Save Cursor.'''
|
||||
|
||||
self.cursor_restore_attrs()
|
||||
|
||||
def cursor_save_attrs (self): # <ESC>7
|
||||
'''Save current cursor position.'''
|
||||
|
||||
self.cur_saved_r = self.cur_r
|
||||
self.cur_saved_c = self.cur_c
|
||||
|
||||
def cursor_restore_attrs (self): # <ESC>8
|
||||
'''Restores cursor position after a Save Cursor.'''
|
||||
|
||||
self.cursor_home (self.cur_saved_r, self.cur_saved_c)
|
||||
|
||||
def scroll_constrain (self):
|
||||
'''This keeps the scroll region within the screen region.'''
|
||||
|
||||
if self.scroll_row_start <= 0:
|
||||
self.scroll_row_start = 1
|
||||
if self.scroll_row_end > self.rows:
|
||||
self.scroll_row_end = self.rows
|
||||
|
||||
def scroll_screen (self): # <ESC>[r
|
||||
'''Enable scrolling for entire display.'''
|
||||
|
||||
self.scroll_row_start = 1
|
||||
self.scroll_row_end = self.rows
|
||||
|
||||
def scroll_screen_rows (self, rs, re): # <ESC>[{start};{end}r
|
||||
'''Enable scrolling from row {start} to row {end}.'''
|
||||
|
||||
self.scroll_row_start = rs
|
||||
self.scroll_row_end = re
|
||||
self.scroll_constrain()
|
||||
|
||||
def scroll_down (self): # <ESC>D
|
||||
'''Scroll display down one line.'''
|
||||
|
||||
# Screen is indexed from 1, but arrays are indexed from 0.
|
||||
s = self.scroll_row_start - 1
|
||||
e = self.scroll_row_end - 1
|
||||
self.w[s+1:e+1] = copy.deepcopy(self.w[s:e])
|
||||
|
||||
def scroll_up (self): # <ESC>M
|
||||
'''Scroll display up one line.'''
|
||||
|
||||
# Screen is indexed from 1, but arrays are indexed from 0.
|
||||
s = self.scroll_row_start - 1
|
||||
e = self.scroll_row_end - 1
|
||||
self.w[s:e] = copy.deepcopy(self.w[s+1:e+1])
|
||||
|
||||
def erase_end_of_line (self): # <ESC>[0K -or- <ESC>[K
|
||||
'''Erases from the current cursor position to the end of the current
|
||||
line.'''
|
||||
|
||||
self.fill_region (self.cur_r, self.cur_c, self.cur_r, self.cols)
|
||||
|
||||
def erase_start_of_line (self): # <ESC>[1K
|
||||
'''Erases from the current cursor position to the start of the current
|
||||
line.'''
|
||||
|
||||
self.fill_region (self.cur_r, 1, self.cur_r, self.cur_c)
|
||||
|
||||
def erase_line (self): # <ESC>[2K
|
||||
'''Erases the entire current line.'''
|
||||
|
||||
self.fill_region (self.cur_r, 1, self.cur_r, self.cols)
|
||||
|
||||
def erase_down (self): # <ESC>[0J -or- <ESC>[J
|
||||
'''Erases the screen from the current line down to the bottom of the
|
||||
screen.'''
|
||||
|
||||
self.erase_end_of_line ()
|
||||
self.fill_region (self.cur_r + 1, 1, self.rows, self.cols)
|
||||
|
||||
def erase_up (self): # <ESC>[1J
|
||||
'''Erases the screen from the current line up to the top of the
|
||||
screen.'''
|
||||
|
||||
self.erase_start_of_line ()
|
||||
self.fill_region (self.cur_r-1, 1, 1, self.cols)
|
||||
|
||||
def erase_screen (self): # <ESC>[2J
|
||||
'''Erases the screen with the background color.'''
|
||||
|
||||
self.fill ()
|
||||
|
||||
def set_tab (self): # <ESC>H
|
||||
'''Sets a tab at the current position.'''
|
||||
|
||||
pass
|
||||
|
||||
def clear_tab (self): # <ESC>[g
|
||||
'''Clears tab at the current position.'''
|
||||
|
||||
pass
|
||||
|
||||
def clear_all_tabs (self): # <ESC>[3g
|
||||
'''Clears all tabs.'''
|
||||
|
||||
pass
|
||||
|
||||
# Insert line Esc [ Pn L
|
||||
# Delete line Esc [ Pn M
|
||||
# Delete character Esc [ Pn P
|
||||
# Scrolling region Esc [ Pn(top);Pn(bot) r
|
||||
|
525
plugins/git_clone/pexpect/spawnbase.py
Normal file
525
plugins/git_clone/pexpect/spawnbase.py
Normal file
@ -0,0 +1,525 @@
|
||||
from io import StringIO, BytesIO
|
||||
import codecs
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
import errno
|
||||
from .exceptions import ExceptionPexpect, EOF, TIMEOUT
|
||||
from .expect import Expecter, searcher_string, searcher_re
|
||||
|
||||
PY3 = (sys.version_info[0] >= 3)
|
||||
text_type = str if PY3 else unicode
|
||||
|
||||
class _NullCoder(object):
|
||||
"""Pass bytes through unchanged."""
|
||||
@staticmethod
|
||||
def encode(b, final=False):
|
||||
return b
|
||||
|
||||
@staticmethod
|
||||
def decode(b, final=False):
|
||||
return b
|
||||
|
||||
class SpawnBase(object):
|
||||
"""A base class providing the backwards-compatible spawn API for Pexpect.
|
||||
|
||||
This should not be instantiated directly: use :class:`pexpect.spawn` or
|
||||
:class:`pexpect.fdpexpect.fdspawn`.
|
||||
"""
|
||||
encoding = None
|
||||
pid = None
|
||||
flag_eof = False
|
||||
|
||||
def __init__(self, timeout=30, maxread=2000, searchwindowsize=None,
|
||||
logfile=None, encoding=None, codec_errors='strict'):
|
||||
self.stdin = sys.stdin
|
||||
self.stdout = sys.stdout
|
||||
self.stderr = sys.stderr
|
||||
|
||||
self.searcher = None
|
||||
self.ignorecase = False
|
||||
self.before = None
|
||||
self.after = None
|
||||
self.match = None
|
||||
self.match_index = None
|
||||
self.terminated = True
|
||||
self.exitstatus = None
|
||||
self.signalstatus = None
|
||||
# status returned by os.waitpid
|
||||
self.status = None
|
||||
# the child file descriptor is initially closed
|
||||
self.child_fd = -1
|
||||
self.timeout = timeout
|
||||
self.delimiter = EOF
|
||||
self.logfile = logfile
|
||||
# input from child (read_nonblocking)
|
||||
self.logfile_read = None
|
||||
# output to send (send, sendline)
|
||||
self.logfile_send = None
|
||||
# max bytes to read at one time into buffer
|
||||
self.maxread = maxread
|
||||
# Data before searchwindowsize point is preserved, but not searched.
|
||||
self.searchwindowsize = searchwindowsize
|
||||
# Delay used before sending data to child. Time in seconds.
|
||||
# Set this to None to skip the time.sleep() call completely.
|
||||
self.delaybeforesend = 0.05
|
||||
# Used by close() to give kernel time to update process status.
|
||||
# Time in seconds.
|
||||
self.delayafterclose = 0.1
|
||||
# Used by terminate() to give kernel time to update process status.
|
||||
# Time in seconds.
|
||||
self.delayafterterminate = 0.1
|
||||
# Delay in seconds to sleep after each call to read_nonblocking().
|
||||
# Set this to None to skip the time.sleep() call completely: that
|
||||
# would restore the behavior from pexpect-2.0 (for performance
|
||||
# reasons or because you don't want to release Python's global
|
||||
# interpreter lock).
|
||||
self.delayafterread = 0.0001
|
||||
self.softspace = False
|
||||
self.name = '<' + repr(self) + '>'
|
||||
self.closed = True
|
||||
|
||||
# Unicode interface
|
||||
self.encoding = encoding
|
||||
self.codec_errors = codec_errors
|
||||
if encoding is None:
|
||||
# bytes mode (accepts some unicode for backwards compatibility)
|
||||
self._encoder = self._decoder = _NullCoder()
|
||||
self.string_type = bytes
|
||||
self.buffer_type = BytesIO
|
||||
self.crlf = b'\r\n'
|
||||
if PY3:
|
||||
self.allowed_string_types = (bytes, str)
|
||||
self.linesep = os.linesep.encode('ascii')
|
||||
def write_to_stdout(b):
|
||||
try:
|
||||
return sys.stdout.buffer.write(b)
|
||||
except AttributeError:
|
||||
# If stdout has been replaced, it may not have .buffer
|
||||
return sys.stdout.write(b.decode('ascii', 'replace'))
|
||||
self.write_to_stdout = write_to_stdout
|
||||
else:
|
||||
self.allowed_string_types = (basestring,) # analysis:ignore
|
||||
self.linesep = os.linesep
|
||||
self.write_to_stdout = sys.stdout.write
|
||||
else:
|
||||
# unicode mode
|
||||
self._encoder = codecs.getincrementalencoder(encoding)(codec_errors)
|
||||
self._decoder = codecs.getincrementaldecoder(encoding)(codec_errors)
|
||||
self.string_type = text_type
|
||||
self.buffer_type = StringIO
|
||||
self.crlf = u'\r\n'
|
||||
self.allowed_string_types = (text_type, )
|
||||
if PY3:
|
||||
self.linesep = os.linesep
|
||||
else:
|
||||
self.linesep = os.linesep.decode('ascii')
|
||||
# This can handle unicode in both Python 2 and 3
|
||||
self.write_to_stdout = sys.stdout.write
|
||||
# storage for async transport
|
||||
self.async_pw_transport = None
|
||||
# This is the read buffer. See maxread.
|
||||
self._buffer = self.buffer_type()
|
||||
# The buffer may be trimmed for efficiency reasons. This is the
|
||||
# untrimmed buffer, used to create the before attribute.
|
||||
self._before = self.buffer_type()
|
||||
|
||||
def _log(self, s, direction):
|
||||
if self.logfile is not None:
|
||||
self.logfile.write(s)
|
||||
self.logfile.flush()
|
||||
second_log = self.logfile_send if (direction=='send') else self.logfile_read
|
||||
if second_log is not None:
|
||||
second_log.write(s)
|
||||
second_log.flush()
|
||||
|
||||
# For backwards compatibility, in bytes mode (when encoding is None)
|
||||
# unicode is accepted for send and expect. Unicode mode is strictly unicode
|
||||
# only.
|
||||
def _coerce_expect_string(self, s):
|
||||
if self.encoding is None and not isinstance(s, bytes):
|
||||
return s.encode('ascii')
|
||||
return s
|
||||
|
||||
def _coerce_send_string(self, s):
|
||||
if self.encoding is None and not isinstance(s, bytes):
|
||||
return s.encode('utf-8')
|
||||
return s
|
||||
|
||||
def _get_buffer(self):
|
||||
return self._buffer.getvalue()
|
||||
|
||||
def _set_buffer(self, value):
|
||||
self._buffer = self.buffer_type()
|
||||
self._buffer.write(value)
|
||||
|
||||
# This property is provided for backwards compatability (self.buffer used
|
||||
# to be a string/bytes object)
|
||||
buffer = property(_get_buffer, _set_buffer)
|
||||
|
||||
def read_nonblocking(self, size=1, timeout=None):
|
||||
"""This reads data from the file descriptor.
|
||||
|
||||
This is a simple implementation suitable for a regular file. Subclasses using ptys or pipes should override it.
|
||||
|
||||
The timeout parameter is ignored.
|
||||
"""
|
||||
|
||||
try:
|
||||
s = os.read(self.child_fd, size)
|
||||
except OSError as err:
|
||||
if err.args[0] == errno.EIO:
|
||||
# Linux-style EOF
|
||||
self.flag_eof = True
|
||||
raise EOF('End Of File (EOF). Exception style platform.')
|
||||
raise
|
||||
if s == b'':
|
||||
# BSD-style EOF
|
||||
self.flag_eof = True
|
||||
raise EOF('End Of File (EOF). Empty string style platform.')
|
||||
|
||||
s = self._decoder.decode(s, final=False)
|
||||
self._log(s, 'read')
|
||||
return s
|
||||
|
||||
def _pattern_type_err(self, pattern):
|
||||
raise TypeError('got {badtype} ({badobj!r}) as pattern, must be one'
|
||||
' of: {goodtypes}, pexpect.EOF, pexpect.TIMEOUT'\
|
||||
.format(badtype=type(pattern),
|
||||
badobj=pattern,
|
||||
goodtypes=', '.join([str(ast)\
|
||||
for ast in self.allowed_string_types])
|
||||
)
|
||||
)
|
||||
|
||||
def compile_pattern_list(self, patterns):
|
||||
'''This compiles a pattern-string or a list of pattern-strings.
|
||||
Patterns must be a StringType, EOF, TIMEOUT, SRE_Pattern, or a list of
|
||||
those. Patterns may also be None which results in an empty list (you
|
||||
might do this if waiting for an EOF or TIMEOUT condition without
|
||||
expecting any pattern).
|
||||
|
||||
This is used by expect() when calling expect_list(). Thus expect() is
|
||||
nothing more than::
|
||||
|
||||
cpl = self.compile_pattern_list(pl)
|
||||
return self.expect_list(cpl, timeout)
|
||||
|
||||
If you are using expect() within a loop it may be more
|
||||
efficient to compile the patterns first and then call expect_list().
|
||||
This avoid calls in a loop to compile_pattern_list()::
|
||||
|
||||
cpl = self.compile_pattern_list(my_pattern)
|
||||
while some_condition:
|
||||
...
|
||||
i = self.expect_list(cpl, timeout)
|
||||
...
|
||||
'''
|
||||
|
||||
if patterns is None:
|
||||
return []
|
||||
if not isinstance(patterns, list):
|
||||
patterns = [patterns]
|
||||
|
||||
# Allow dot to match \n
|
||||
compile_flags = re.DOTALL
|
||||
if self.ignorecase:
|
||||
compile_flags = compile_flags | re.IGNORECASE
|
||||
compiled_pattern_list = []
|
||||
for idx, p in enumerate(patterns):
|
||||
if isinstance(p, self.allowed_string_types):
|
||||
p = self._coerce_expect_string(p)
|
||||
compiled_pattern_list.append(re.compile(p, compile_flags))
|
||||
elif p is EOF:
|
||||
compiled_pattern_list.append(EOF)
|
||||
elif p is TIMEOUT:
|
||||
compiled_pattern_list.append(TIMEOUT)
|
||||
elif isinstance(p, type(re.compile(''))):
|
||||
compiled_pattern_list.append(p)
|
||||
else:
|
||||
self._pattern_type_err(p)
|
||||
return compiled_pattern_list
|
||||
|
||||
def expect(self, pattern, timeout=-1, searchwindowsize=-1, async_=False, **kw):
|
||||
'''This seeks through the stream until a pattern is matched. The
|
||||
pattern is overloaded and may take several types. The pattern can be a
|
||||
StringType, EOF, a compiled re, or a list of any of those types.
|
||||
Strings will be compiled to re types. This returns the index into the
|
||||
pattern list. If the pattern was not a list this returns index 0 on a
|
||||
successful match. This may raise exceptions for EOF or TIMEOUT. To
|
||||
avoid the EOF or TIMEOUT exceptions add EOF or TIMEOUT to the pattern
|
||||
list. That will cause expect to match an EOF or TIMEOUT condition
|
||||
instead of raising an exception.
|
||||
|
||||
If you pass a list of patterns and more than one matches, the first
|
||||
match in the stream is chosen. If more than one pattern matches at that
|
||||
point, the leftmost in the pattern list is chosen. For example::
|
||||
|
||||
# the input is 'foobar'
|
||||
index = p.expect(['bar', 'foo', 'foobar'])
|
||||
# returns 1('foo') even though 'foobar' is a "better" match
|
||||
|
||||
Please note, however, that buffering can affect this behavior, since
|
||||
input arrives in unpredictable chunks. For example::
|
||||
|
||||
# the input is 'foobar'
|
||||
index = p.expect(['foobar', 'foo'])
|
||||
# returns 0('foobar') if all input is available at once,
|
||||
# but returns 1('foo') if parts of the final 'bar' arrive late
|
||||
|
||||
When a match is found for the given pattern, the class instance
|
||||
attribute *match* becomes an re.MatchObject result. Should an EOF
|
||||
or TIMEOUT pattern match, then the match attribute will be an instance
|
||||
of that exception class. The pairing before and after class
|
||||
instance attributes are views of the data preceding and following
|
||||
the matching pattern. On general exception, class attribute
|
||||
*before* is all data received up to the exception, while *match* and
|
||||
*after* attributes are value None.
|
||||
|
||||
When the keyword argument timeout is -1 (default), then TIMEOUT will
|
||||
raise after the default value specified by the class timeout
|
||||
attribute. When None, TIMEOUT will not be raised and may block
|
||||
indefinitely until match.
|
||||
|
||||
When the keyword argument searchwindowsize is -1 (default), then the
|
||||
value specified by the class maxread attribute is used.
|
||||
|
||||
A list entry may be EOF or TIMEOUT instead of a string. This will
|
||||
catch these exceptions and return the index of the list entry instead
|
||||
of raising the exception. The attribute 'after' will be set to the
|
||||
exception type. The attribute 'match' will be None. This allows you to
|
||||
write code like this::
|
||||
|
||||
index = p.expect(['good', 'bad', pexpect.EOF, pexpect.TIMEOUT])
|
||||
if index == 0:
|
||||
do_something()
|
||||
elif index == 1:
|
||||
do_something_else()
|
||||
elif index == 2:
|
||||
do_some_other_thing()
|
||||
elif index == 3:
|
||||
do_something_completely_different()
|
||||
|
||||
instead of code like this::
|
||||
|
||||
try:
|
||||
index = p.expect(['good', 'bad'])
|
||||
if index == 0:
|
||||
do_something()
|
||||
elif index == 1:
|
||||
do_something_else()
|
||||
except EOF:
|
||||
do_some_other_thing()
|
||||
except TIMEOUT:
|
||||
do_something_completely_different()
|
||||
|
||||
These two forms are equivalent. It all depends on what you want. You
|
||||
can also just expect the EOF if you are waiting for all output of a
|
||||
child to finish. For example::
|
||||
|
||||
p = pexpect.spawn('/bin/ls')
|
||||
p.expect(pexpect.EOF)
|
||||
print p.before
|
||||
|
||||
If you are trying to optimize for speed then see expect_list().
|
||||
|
||||
On Python 3.4, or Python 3.3 with asyncio installed, passing
|
||||
``async_=True`` will make this return an :mod:`asyncio` coroutine,
|
||||
which you can yield from to get the same result that this method would
|
||||
normally give directly. So, inside a coroutine, you can replace this code::
|
||||
|
||||
index = p.expect(patterns)
|
||||
|
||||
With this non-blocking form::
|
||||
|
||||
index = yield from p.expect(patterns, async_=True)
|
||||
'''
|
||||
if 'async' in kw:
|
||||
async_ = kw.pop('async')
|
||||
if kw:
|
||||
raise TypeError("Unknown keyword arguments: {}".format(kw))
|
||||
|
||||
compiled_pattern_list = self.compile_pattern_list(pattern)
|
||||
return self.expect_list(compiled_pattern_list,
|
||||
timeout, searchwindowsize, async_)
|
||||
|
||||
def expect_list(self, pattern_list, timeout=-1, searchwindowsize=-1,
|
||||
async_=False, **kw):
|
||||
'''This takes a list of compiled regular expressions and returns the
|
||||
index into the pattern_list that matched the child output. The list may
|
||||
also contain EOF or TIMEOUT(which are not compiled regular
|
||||
expressions). This method is similar to the expect() method except that
|
||||
expect_list() does not recompile the pattern list on every call. This
|
||||
may help if you are trying to optimize for speed, otherwise just use
|
||||
the expect() method. This is called by expect().
|
||||
|
||||
|
||||
Like :meth:`expect`, passing ``async_=True`` will make this return an
|
||||
asyncio coroutine.
|
||||
'''
|
||||
if timeout == -1:
|
||||
timeout = self.timeout
|
||||
if 'async' in kw:
|
||||
async_ = kw.pop('async')
|
||||
if kw:
|
||||
raise TypeError("Unknown keyword arguments: {}".format(kw))
|
||||
|
||||
exp = Expecter(self, searcher_re(pattern_list), searchwindowsize)
|
||||
if async_:
|
||||
from ._async import expect_async
|
||||
return expect_async(exp, timeout)
|
||||
else:
|
||||
return exp.expect_loop(timeout)
|
||||
|
||||
def expect_exact(self, pattern_list, timeout=-1, searchwindowsize=-1,
|
||||
async_=False, **kw):
|
||||
|
||||
'''This is similar to expect(), but uses plain string matching instead
|
||||
of compiled regular expressions in 'pattern_list'. The 'pattern_list'
|
||||
may be a string; a list or other sequence of strings; or TIMEOUT and
|
||||
EOF.
|
||||
|
||||
This call might be faster than expect() for two reasons: string
|
||||
searching is faster than RE matching and it is possible to limit the
|
||||
search to just the end of the input buffer.
|
||||
|
||||
This method is also useful when you don't want to have to worry about
|
||||
escaping regular expression characters that you want to match.
|
||||
|
||||
Like :meth:`expect`, passing ``async_=True`` will make this return an
|
||||
asyncio coroutine.
|
||||
'''
|
||||
if timeout == -1:
|
||||
timeout = self.timeout
|
||||
if 'async' in kw:
|
||||
async_ = kw.pop('async')
|
||||
if kw:
|
||||
raise TypeError("Unknown keyword arguments: {}".format(kw))
|
||||
|
||||
if (isinstance(pattern_list, self.allowed_string_types) or
|
||||
pattern_list in (TIMEOUT, EOF)):
|
||||
pattern_list = [pattern_list]
|
||||
|
||||
def prepare_pattern(pattern):
|
||||
if pattern in (TIMEOUT, EOF):
|
||||
return pattern
|
||||
if isinstance(pattern, self.allowed_string_types):
|
||||
return self._coerce_expect_string(pattern)
|
||||
self._pattern_type_err(pattern)
|
||||
|
||||
try:
|
||||
pattern_list = iter(pattern_list)
|
||||
except TypeError:
|
||||
self._pattern_type_err(pattern_list)
|
||||
pattern_list = [prepare_pattern(p) for p in pattern_list]
|
||||
|
||||
exp = Expecter(self, searcher_string(pattern_list), searchwindowsize)
|
||||
if async_:
|
||||
from ._async import expect_async
|
||||
return expect_async(exp, timeout)
|
||||
else:
|
||||
return exp.expect_loop(timeout)
|
||||
|
||||
def expect_loop(self, searcher, timeout=-1, searchwindowsize=-1):
|
||||
'''This is the common loop used inside expect. The 'searcher' should be
|
||||
an instance of searcher_re or searcher_string, which describes how and
|
||||
what to search for in the input.
|
||||
|
||||
See expect() for other arguments, return value and exceptions. '''
|
||||
|
||||
exp = Expecter(self, searcher, searchwindowsize)
|
||||
return exp.expect_loop(timeout)
|
||||
|
||||
def read(self, size=-1):
|
||||
'''This reads at most "size" bytes from the file (less if the read hits
|
||||
EOF before obtaining size bytes). If the size argument is negative or
|
||||
omitted, read all data until EOF is reached. The bytes are returned as
|
||||
a string object. An empty string is returned when EOF is encountered
|
||||
immediately. '''
|
||||
|
||||
if size == 0:
|
||||
return self.string_type()
|
||||
if size < 0:
|
||||
# delimiter default is EOF
|
||||
self.expect(self.delimiter)
|
||||
return self.before
|
||||
|
||||
# I could have done this more directly by not using expect(), but
|
||||
# I deliberately decided to couple read() to expect() so that
|
||||
# I would catch any bugs early and ensure consistent behavior.
|
||||
# It's a little less efficient, but there is less for me to
|
||||
# worry about if I have to later modify read() or expect().
|
||||
# Note, it's OK if size==-1 in the regex. That just means it
|
||||
# will never match anything in which case we stop only on EOF.
|
||||
cre = re.compile(self._coerce_expect_string('.{%d}' % size), re.DOTALL)
|
||||
# delimiter default is EOF
|
||||
index = self.expect([cre, self.delimiter])
|
||||
if index == 0:
|
||||
### FIXME self.before should be ''. Should I assert this?
|
||||
return self.after
|
||||
return self.before
|
||||
|
||||
def readline(self, size=-1):
|
||||
'''This reads and returns one entire line. The newline at the end of
|
||||
line is returned as part of the string, unless the file ends without a
|
||||
newline. An empty string is returned if EOF is encountered immediately.
|
||||
This looks for a newline as a CR/LF pair (\\r\\n) even on UNIX because
|
||||
this is what the pseudotty device returns. So contrary to what you may
|
||||
expect you will receive newlines as \\r\\n.
|
||||
|
||||
If the size argument is 0 then an empty string is returned. In all
|
||||
other cases the size argument is ignored, which is not standard
|
||||
behavior for a file-like object. '''
|
||||
|
||||
if size == 0:
|
||||
return self.string_type()
|
||||
# delimiter default is EOF
|
||||
index = self.expect([self.crlf, self.delimiter])
|
||||
if index == 0:
|
||||
return self.before + self.crlf
|
||||
else:
|
||||
return self.before
|
||||
|
||||
def __iter__(self):
|
||||
'''This is to support iterators over a file-like object.
|
||||
'''
|
||||
return iter(self.readline, self.string_type())
|
||||
|
||||
def readlines(self, sizehint=-1):
|
||||
'''This reads until EOF using readline() and returns a list containing
|
||||
the lines thus read. The optional 'sizehint' argument is ignored.
|
||||
Remember, because this reads until EOF that means the child
|
||||
process should have closed its stdout. If you run this method on
|
||||
a child that is still running with its stdout open then this
|
||||
method will block until it timesout.'''
|
||||
|
||||
lines = []
|
||||
while True:
|
||||
line = self.readline()
|
||||
if not line:
|
||||
break
|
||||
lines.append(line)
|
||||
return lines
|
||||
|
||||
def fileno(self):
|
||||
'''Expose file descriptor for a file-like interface
|
||||
'''
|
||||
return self.child_fd
|
||||
|
||||
def flush(self):
|
||||
'''This does nothing. It is here to support the interface for a
|
||||
File-like object. '''
|
||||
pass
|
||||
|
||||
def isatty(self):
|
||||
"""Overridden in subclass using tty"""
|
||||
return False
|
||||
|
||||
# For 'with spawn(...) as child:'
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, etype, evalue, tb):
|
||||
# We rely on subclasses to implement close(). If they don't, it's not
|
||||
# clear what a context manager should do.
|
||||
self.close()
|
187
plugins/git_clone/pexpect/utils.py
Normal file
187
plugins/git_clone/pexpect/utils.py
Normal file
@ -0,0 +1,187 @@
|
||||
import os
|
||||
import sys
|
||||
import stat
|
||||
import select
|
||||
import time
|
||||
import errno
|
||||
|
||||
try:
|
||||
InterruptedError
|
||||
except NameError:
|
||||
# Alias Python2 exception to Python3
|
||||
InterruptedError = select.error
|
||||
|
||||
if sys.version_info[0] >= 3:
|
||||
string_types = (str,)
|
||||
else:
|
||||
string_types = (unicode, str)
|
||||
|
||||
|
||||
def is_executable_file(path):
|
||||
"""Checks that path is an executable regular file, or a symlink towards one.
|
||||
|
||||
This is roughly ``os.path isfile(path) and os.access(path, os.X_OK)``.
|
||||
"""
|
||||
# follow symlinks,
|
||||
fpath = os.path.realpath(path)
|
||||
|
||||
if not os.path.isfile(fpath):
|
||||
# non-files (directories, fifo, etc.)
|
||||
return False
|
||||
|
||||
mode = os.stat(fpath).st_mode
|
||||
|
||||
if (sys.platform.startswith('sunos')
|
||||
and os.getuid() == 0):
|
||||
# When root on Solaris, os.X_OK is True for *all* files, irregardless
|
||||
# of their executability -- instead, any permission bit of any user,
|
||||
# group, or other is fine enough.
|
||||
#
|
||||
# (This may be true for other "Unix98" OS's such as HP-UX and AIX)
|
||||
return bool(mode & (stat.S_IXUSR |
|
||||
stat.S_IXGRP |
|
||||
stat.S_IXOTH))
|
||||
|
||||
return os.access(fpath, os.X_OK)
|
||||
|
||||
|
||||
def which(filename, env=None):
|
||||
'''This takes a given filename; tries to find it in the environment path;
|
||||
then checks if it is executable. This returns the full path to the filename
|
||||
if found and executable. Otherwise this returns None.'''
|
||||
|
||||
# Special case where filename contains an explicit path.
|
||||
if os.path.dirname(filename) != '' and is_executable_file(filename):
|
||||
return filename
|
||||
if env is None:
|
||||
env = os.environ
|
||||
p = env.get('PATH')
|
||||
if not p:
|
||||
p = os.defpath
|
||||
pathlist = p.split(os.pathsep)
|
||||
for path in pathlist:
|
||||
ff = os.path.join(path, filename)
|
||||
if is_executable_file(ff):
|
||||
return ff
|
||||
return None
|
||||
|
||||
|
||||
def split_command_line(command_line):
|
||||
|
||||
'''This splits a command line into a list of arguments. It splits arguments
|
||||
on spaces, but handles embedded quotes, doublequotes, and escaped
|
||||
characters. It's impossible to do this with a regular expression, so I
|
||||
wrote a little state machine to parse the command line. '''
|
||||
|
||||
arg_list = []
|
||||
arg = ''
|
||||
|
||||
# Constants to name the states we can be in.
|
||||
state_basic = 0
|
||||
state_esc = 1
|
||||
state_singlequote = 2
|
||||
state_doublequote = 3
|
||||
# The state when consuming whitespace between commands.
|
||||
state_whitespace = 4
|
||||
state = state_basic
|
||||
|
||||
for c in command_line:
|
||||
if state == state_basic or state == state_whitespace:
|
||||
if c == '\\':
|
||||
# Escape the next character
|
||||
state = state_esc
|
||||
elif c == r"'":
|
||||
# Handle single quote
|
||||
state = state_singlequote
|
||||
elif c == r'"':
|
||||
# Handle double quote
|
||||
state = state_doublequote
|
||||
elif c.isspace():
|
||||
# Add arg to arg_list if we aren't in the middle of whitespace.
|
||||
if state == state_whitespace:
|
||||
# Do nothing.
|
||||
None
|
||||
else:
|
||||
arg_list.append(arg)
|
||||
arg = ''
|
||||
state = state_whitespace
|
||||
else:
|
||||
arg = arg + c
|
||||
state = state_basic
|
||||
elif state == state_esc:
|
||||
arg = arg + c
|
||||
state = state_basic
|
||||
elif state == state_singlequote:
|
||||
if c == r"'":
|
||||
state = state_basic
|
||||
else:
|
||||
arg = arg + c
|
||||
elif state == state_doublequote:
|
||||
if c == r'"':
|
||||
state = state_basic
|
||||
else:
|
||||
arg = arg + c
|
||||
|
||||
if arg != '':
|
||||
arg_list.append(arg)
|
||||
return arg_list
|
||||
|
||||
|
||||
def select_ignore_interrupts(iwtd, owtd, ewtd, timeout=None):
|
||||
|
||||
'''This is a wrapper around select.select() that ignores signals. If
|
||||
select.select raises a select.error exception and errno is an EINTR
|
||||
error then it is ignored. Mainly this is used to ignore sigwinch
|
||||
(terminal resize). '''
|
||||
|
||||
# if select() is interrupted by a signal (errno==EINTR) then
|
||||
# we loop back and enter the select() again.
|
||||
if timeout is not None:
|
||||
end_time = time.time() + timeout
|
||||
while True:
|
||||
try:
|
||||
return select.select(iwtd, owtd, ewtd, timeout)
|
||||
except InterruptedError:
|
||||
err = sys.exc_info()[1]
|
||||
if err.args[0] == errno.EINTR:
|
||||
# if we loop back we have to subtract the
|
||||
# amount of time we already waited.
|
||||
if timeout is not None:
|
||||
timeout = end_time - time.time()
|
||||
if timeout < 0:
|
||||
return([], [], [])
|
||||
else:
|
||||
# something else caused the select.error, so
|
||||
# this actually is an exception.
|
||||
raise
|
||||
|
||||
|
||||
def poll_ignore_interrupts(fds, timeout=None):
|
||||
'''Simple wrapper around poll to register file descriptors and
|
||||
ignore signals.'''
|
||||
|
||||
if timeout is not None:
|
||||
end_time = time.time() + timeout
|
||||
|
||||
poller = select.poll()
|
||||
for fd in fds:
|
||||
poller.register(fd, select.POLLIN | select.POLLPRI | select.POLLHUP | select.POLLERR)
|
||||
|
||||
while True:
|
||||
try:
|
||||
timeout_ms = None if timeout is None else timeout * 1000
|
||||
results = poller.poll(timeout_ms)
|
||||
return [afd for afd, _ in results]
|
||||
except InterruptedError:
|
||||
err = sys.exc_info()[1]
|
||||
if err.args[0] == errno.EINTR:
|
||||
# if we loop back we have to subtract the
|
||||
# amount of time we already waited.
|
||||
if timeout is not None:
|
||||
timeout = end_time - time.time()
|
||||
if timeout < 0:
|
||||
return []
|
||||
else:
|
||||
# something else caused the select.error, so
|
||||
# this actually is an exception.
|
||||
raise
|
@ -5,6 +5,7 @@ import subprocess
|
||||
import time
|
||||
|
||||
# Lib imports
|
||||
from . import pexpect
|
||||
import gi
|
||||
gi.require_version('Gtk', '3.0')
|
||||
from gi.repository import Gtk
|
||||
@ -21,6 +22,10 @@ def threaded(fn):
|
||||
|
||||
|
||||
|
||||
class GitClonePluginException(Exception):
|
||||
...
|
||||
|
||||
|
||||
|
||||
class Plugin(PluginBase):
|
||||
def __init__(self):
|
||||
@ -42,9 +47,38 @@ class Plugin(PluginBase):
|
||||
def _do_download(self, widget=None, eve=None):
|
||||
self._event_system.emit("get_current_state")
|
||||
|
||||
self.get_user_and_pass()
|
||||
dir = self._fm_state.tab.get_current_directory()
|
||||
self._download(dir)
|
||||
events = {
|
||||
'(?i)Username': self.get_user(),
|
||||
'(?i)Password': self.get_pass()
|
||||
}
|
||||
|
||||
self._download(dir, events)
|
||||
|
||||
@threaded
|
||||
def _download(self, dir):
|
||||
subprocess.Popen([f'{self.path}/download.sh', dir])
|
||||
def _download(self, dir, _events):
|
||||
git_clone_link = self.get_clipboard_data()
|
||||
pexpect.run(f"git clone {git_clone_link}", cwd = dir, events=_events)
|
||||
|
||||
|
||||
def get_user_and_pass(self):
|
||||
response = self._fm_state.user_pass_dialog.run()
|
||||
if response in (-4, -6):
|
||||
raise GitClonePluginException("User canceled request...")
|
||||
|
||||
|
||||
def get_user(self):
|
||||
user = self._fm_state.user_pass_dialog.user_input.get_text()
|
||||
return f"{user}\n"
|
||||
|
||||
def get_pass(self):
|
||||
passwd = self._fm_state.user_pass_dialog.pass_input.get_text()
|
||||
return f"{passwd}\n"
|
||||
|
||||
|
||||
def get_clipboard_data(self, encoding="utf-8") -> str:
|
||||
proc = subprocess.Popen(['xclip','-selection', 'clipboard', '-o'], stdout=subprocess.PIPE)
|
||||
retcode = proc.wait()
|
||||
data = proc.stdout.read()
|
||||
return data.decode(encoding).strip()
|
||||
|
6
plugins/translate/brotli/__init__.py
Normal file
6
plugins/translate/brotli/__init__.py
Normal file
@ -0,0 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# flake8: noqa
|
||||
from .brotli import (
|
||||
decompress, Decompressor, compress, BrotliEncoderMode, DEFAULT_MODE,
|
||||
Compressor, MODE_GENERIC, MODE_TEXT, MODE_FONT, error, Error
|
||||
)
|
466
plugins/translate/brotli/brotli.py
Normal file
466
plugins/translate/brotli/brotli.py
Normal file
@ -0,0 +1,466 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import math
|
||||
import enum
|
||||
|
||||
from ._brotli import ffi, lib
|
||||
|
||||
|
||||
class Error(Exception):
|
||||
"""
|
||||
Raised whenever an error is encountered with compressing or decompressing
|
||||
data using brotlipy.
|
||||
|
||||
.. versionadded:: 0.5.1
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
#: An alias of :class:`Error <brotli.Error>` that exists for compatibility with
|
||||
#: the original C brotli module.
|
||||
#:
|
||||
#: .. versionadded: 0.5.1
|
||||
error = Error
|
||||
|
||||
|
||||
class BrotliEncoderMode(enum.IntEnum):
|
||||
"""
|
||||
Compression modes for the Brotli encoder.
|
||||
|
||||
.. versionadded:: 0.5.0
|
||||
"""
|
||||
#: Default compression mode. The compressor does not know anything in
|
||||
#: advance about the properties of the input.
|
||||
GENERIC = lib.BROTLI_MODE_GENERIC
|
||||
|
||||
#: Compression mode for UTF-8 format text input.
|
||||
TEXT = lib.BROTLI_MODE_TEXT
|
||||
|
||||
#: Compression mode used in WOFF 2.0
|
||||
FONT = lib.BROTLI_MODE_FONT
|
||||
|
||||
|
||||
# Define some names for compatibility with the C module.
|
||||
|
||||
#: The default compression mode for brotli.
|
||||
DEFAULT_MODE = BrotliEncoderMode(lib.BROTLI_DEFAULT_MODE)
|
||||
|
||||
|
||||
#: A compression mode where the compressor does not know anything in advance
|
||||
#: about the properties of the input.
|
||||
#:
|
||||
#: .. note:: This name is defined for compatibility with the Brotli C
|
||||
#: extension. If you're not interested in that compatibility, it is
|
||||
#: recommended that you use :class:`BrotliEncoderMode
|
||||
#: <brotli.BrotliEncoderMode>` instead.
|
||||
#:
|
||||
#: .. versionadded:: 0.5.0
|
||||
MODE_GENERIC = BrotliEncoderMode.GENERIC
|
||||
|
||||
|
||||
#: A compression mode for UTF-8 format text input.
|
||||
#:
|
||||
#: .. note:: This name is defined for compatibility with the Brotli C
|
||||
#: extension. If you're not interested in that compatibility, it is
|
||||
#: recommended that you use :class:`BrotliEncoderMode
|
||||
#: <brotli.BrotliEncoderMode>` instead.
|
||||
#:
|
||||
#: .. versionadded:: 0.5.0
|
||||
MODE_TEXT = BrotliEncoderMode.TEXT
|
||||
|
||||
|
||||
#: The compression mode used in WOFF 2.0.
|
||||
#:
|
||||
#: .. note:: This name is defined for compatibility with the Brotli C
|
||||
#: extension. If you're not interested in that compatibility, it is
|
||||
#: recommended that you use :class:`BrotliEncoderMode
|
||||
#: <brotli.BrotliEncoderMode>` instead.
|
||||
#:
|
||||
#: .. versionadded:: 0.5.0
|
||||
MODE_FONT = BrotliEncoderMode.FONT
|
||||
|
||||
|
||||
def decompress(data):
|
||||
"""
|
||||
Decompress a complete Brotli-compressed string.
|
||||
|
||||
:param data: A bytestring containing Brotli-compressed data.
|
||||
"""
|
||||
d = Decompressor()
|
||||
data = d.decompress(data)
|
||||
d.finish()
|
||||
return data
|
||||
|
||||
|
||||
def compress(data,
|
||||
mode=DEFAULT_MODE,
|
||||
quality=lib.BROTLI_DEFAULT_QUALITY,
|
||||
lgwin=lib.BROTLI_DEFAULT_WINDOW,
|
||||
lgblock=0,
|
||||
dictionary=b''):
|
||||
"""
|
||||
Compress a string using Brotli.
|
||||
|
||||
.. versionchanged:: 0.5.0
|
||||
Added ``mode``, ``quality``, `lgwin``, ``lgblock``, and ``dictionary``
|
||||
parameters.
|
||||
|
||||
:param data: A bytestring containing the data to compress.
|
||||
:type data: ``bytes``
|
||||
|
||||
:param mode: The encoder mode.
|
||||
:type mode: :class:`BrotliEncoderMode` or ``int``
|
||||
|
||||
:param quality: Controls the compression-speed vs compression-density
|
||||
tradeoffs. The higher the quality, the slower the compression. The
|
||||
range of this value is 0 to 11.
|
||||
:type quality: ``int``
|
||||
|
||||
:param lgwin: The base-2 logarithm of the sliding window size. The range of
|
||||
this value is 10 to 24.
|
||||
:type lgwin: ``int``
|
||||
|
||||
:param lgblock: The base-2 logarithm of the maximum input block size. The
|
||||
range of this value is 16 to 24. If set to 0, the value will be set
|
||||
based on ``quality``.
|
||||
:type lgblock: ``int``
|
||||
|
||||
:param dictionary: A pre-set dictionary for LZ77. Please use this with
|
||||
caution: if a dictionary is used for compression, the same dictionary
|
||||
**must** be used for decompression!
|
||||
:type dictionary: ``bytes``
|
||||
|
||||
:returns: The compressed bytestring.
|
||||
:rtype: ``bytes``
|
||||
"""
|
||||
# This method uses private variables on the Compressor object, and
|
||||
# generally does a whole lot of stuff that's not supported by the public
|
||||
# API. The goal here is to minimise the number of allocations and copies
|
||||
# we have to do. Users should prefer this method over the Compressor if
|
||||
# they know they have single-shot data.
|
||||
compressor = Compressor(
|
||||
mode=mode,
|
||||
quality=quality,
|
||||
lgwin=lgwin,
|
||||
lgblock=lgblock,
|
||||
dictionary=dictionary
|
||||
)
|
||||
compressed_data = compressor._compress(data, lib.BROTLI_OPERATION_FINISH)
|
||||
assert lib.BrotliEncoderIsFinished(compressor._encoder) == lib.BROTLI_TRUE
|
||||
assert (
|
||||
lib.BrotliEncoderHasMoreOutput(compressor._encoder) == lib.BROTLI_FALSE
|
||||
)
|
||||
return compressed_data
|
||||
|
||||
|
||||
def _validate_mode(val):
|
||||
"""
|
||||
Validate that the mode is valid.
|
||||
"""
|
||||
try:
|
||||
val = BrotliEncoderMode(val)
|
||||
except ValueError:
|
||||
raise Error("%s is not a valid encoder mode" % val)
|
||||
|
||||
|
||||
def _validate_quality(val):
|
||||
"""
|
||||
Validate that the quality setting is valid.
|
||||
"""
|
||||
if not (0 <= val <= 11):
|
||||
raise Error(
|
||||
"%d is not a valid quality, must be between 0 and 11" % val
|
||||
)
|
||||
|
||||
|
||||
def _validate_lgwin(val):
|
||||
"""
|
||||
Validate that the lgwin setting is valid.
|
||||
"""
|
||||
if not (10 <= val <= 24):
|
||||
raise Error("%d is not a valid lgwin, must be between 10 and 24" % val)
|
||||
|
||||
|
||||
def _validate_lgblock(val):
|
||||
"""
|
||||
Validate that the lgblock setting is valid.
|
||||
"""
|
||||
if (val != 0) and not (16 <= val <= 24):
|
||||
raise Error(
|
||||
"%d is not a valid lgblock, must be either 0 or between 16 and 24"
|
||||
% val
|
||||
)
|
||||
|
||||
|
||||
def _set_parameter(encoder, parameter, parameter_name, val):
|
||||
"""
|
||||
This helper function sets a specific Brotli encoder parameter, checking
|
||||
the return code and raising :class:`Error <brotli.Error>` if it is
|
||||
invalid.
|
||||
"""
|
||||
rc = lib.BrotliEncoderSetParameter(encoder, parameter, val)
|
||||
|
||||
if parameter == lib.BROTLI_PARAM_MODE:
|
||||
_validate_mode(val)
|
||||
elif parameter == lib.BROTLI_PARAM_QUALITY:
|
||||
_validate_quality(val)
|
||||
elif parameter == lib.BROTLI_PARAM_LGWIN:
|
||||
_validate_lgwin(val)
|
||||
elif parameter == lib.BROTLI_PARAM_LGBLOCK:
|
||||
_validate_lgblock(val)
|
||||
else: # pragma: no cover
|
||||
raise RuntimeError("Unexpected parameter!")
|
||||
|
||||
# This block is defensive: I see no way to hit it, but as long as the
|
||||
# function returns a value we can live in hope that the brotli folks will
|
||||
# enforce their own constraints.
|
||||
if rc != lib.BROTLI_TRUE: # pragma: no cover
|
||||
raise Error(
|
||||
"Error setting parameter %s: %d" % (parameter_name, val)
|
||||
)
|
||||
|
||||
|
||||
class Compressor(object):
|
||||
"""
|
||||
An object that allows for streaming compression of data using the Brotli
|
||||
compression algorithm.
|
||||
|
||||
.. versionadded:: 0.5.0
|
||||
|
||||
:param mode: The encoder mode.
|
||||
:type mode: :class:`BrotliEncoderMode` or ``int``
|
||||
|
||||
:param quality: Controls the compression-speed vs compression-density
|
||||
tradeoffs. The higher the quality, the slower the compression. The
|
||||
range of this value is 0 to 11.
|
||||
:type quality: ``int``
|
||||
|
||||
:param lgwin: The base-2 logarithm of the sliding window size. The range of
|
||||
this value is 10 to 24.
|
||||
:type lgwin: ``int``
|
||||
|
||||
:param lgblock: The base-2 logarithm of the maximum input block size. The
|
||||
range of this value is 16 to 24. If set to 0, the value will be set
|
||||
based on ``quality``.
|
||||
:type lgblock: ``int``
|
||||
|
||||
:param dictionary: A pre-set dictionary for LZ77. Please use this with
|
||||
caution: if a dictionary is used for compression, the same dictionary
|
||||
**must** be used for decompression!
|
||||
:type dictionary: ``bytes``
|
||||
"""
|
||||
_dictionary = None
|
||||
_dictionary_size = None
|
||||
|
||||
def __init__(self,
|
||||
mode=DEFAULT_MODE,
|
||||
quality=lib.BROTLI_DEFAULT_QUALITY,
|
||||
lgwin=lib.BROTLI_DEFAULT_WINDOW,
|
||||
lgblock=0,
|
||||
dictionary=b''):
|
||||
enc = lib.BrotliEncoderCreateInstance(
|
||||
ffi.NULL, ffi.NULL, ffi.NULL
|
||||
)
|
||||
if not enc: # pragma: no cover
|
||||
raise RuntimeError("Unable to allocate Brotli encoder!")
|
||||
|
||||
enc = ffi.gc(enc, lib.BrotliEncoderDestroyInstance)
|
||||
|
||||
# Configure the encoder appropriately.
|
||||
_set_parameter(enc, lib.BROTLI_PARAM_MODE, "mode", mode)
|
||||
_set_parameter(enc, lib.BROTLI_PARAM_QUALITY, "quality", quality)
|
||||
_set_parameter(enc, lib.BROTLI_PARAM_LGWIN, "lgwin", lgwin)
|
||||
_set_parameter(enc, lib.BROTLI_PARAM_LGBLOCK, "lgblock", lgblock)
|
||||
|
||||
if dictionary:
|
||||
self._dictionary = ffi.new("uint8_t []", dictionary)
|
||||
self._dictionary_size = len(dictionary)
|
||||
lib.BrotliEncoderSetCustomDictionary(
|
||||
enc, self._dictionary_size, self._dictionary
|
||||
)
|
||||
|
||||
self._encoder = enc
|
||||
|
||||
def _compress(self, data, operation):
|
||||
"""
|
||||
This private method compresses some data in a given mode. This is used
|
||||
because almost all of the code uses the exact same setup. It wouldn't
|
||||
have to, but it doesn't hurt at all.
|
||||
"""
|
||||
# The 'algorithm' for working out how big to make this buffer is from
|
||||
# the Brotli source code, brotlimodule.cc.
|
||||
original_output_size = int(
|
||||
math.ceil(len(data) + (len(data) >> 2) + 10240)
|
||||
)
|
||||
available_out = ffi.new("size_t *")
|
||||
available_out[0] = original_output_size
|
||||
output_buffer = ffi.new("uint8_t []", available_out[0])
|
||||
ptr_to_output_buffer = ffi.new("uint8_t **", output_buffer)
|
||||
input_size = ffi.new("size_t *", len(data))
|
||||
input_buffer = ffi.new("uint8_t []", data)
|
||||
ptr_to_input_buffer = ffi.new("uint8_t **", input_buffer)
|
||||
|
||||
rc = lib.BrotliEncoderCompressStream(
|
||||
self._encoder,
|
||||
operation,
|
||||
input_size,
|
||||
ptr_to_input_buffer,
|
||||
available_out,
|
||||
ptr_to_output_buffer,
|
||||
ffi.NULL
|
||||
)
|
||||
if rc != lib.BROTLI_TRUE: # pragma: no cover
|
||||
raise Error("Error encountered compressing data.")
|
||||
|
||||
assert not input_size[0]
|
||||
|
||||
size_of_output = original_output_size - available_out[0]
|
||||
return ffi.buffer(output_buffer, size_of_output)[:]
|
||||
|
||||
def compress(self, data):
|
||||
"""
|
||||
Incrementally compress more data.
|
||||
|
||||
:param data: A bytestring containing data to compress.
|
||||
:returns: A bytestring containing some compressed data. May return the
|
||||
empty bytestring if not enough data has been inserted into the
|
||||
compressor to create the output yet.
|
||||
"""
|
||||
return self._compress(data, lib.BROTLI_OPERATION_PROCESS)
|
||||
|
||||
def flush(self):
|
||||
"""
|
||||
Flush the compressor. This will emit the remaining output data, but
|
||||
will not destroy the compressor. It can be used, for example, to ensure
|
||||
that given chunks of content will decompress immediately.
|
||||
"""
|
||||
chunks = []
|
||||
chunks.append(self._compress(b'', lib.BROTLI_OPERATION_FLUSH))
|
||||
|
||||
while lib.BrotliEncoderHasMoreOutput(self._encoder) == lib.BROTLI_TRUE:
|
||||
chunks.append(self._compress(b'', lib.BROTLI_OPERATION_FLUSH))
|
||||
|
||||
return b''.join(chunks)
|
||||
|
||||
def finish(self):
|
||||
"""
|
||||
Finish the compressor. This will emit the remaining output data and
|
||||
transition the compressor to a completed state. The compressor cannot
|
||||
be used again after this point, and must be replaced.
|
||||
"""
|
||||
chunks = []
|
||||
while lib.BrotliEncoderIsFinished(self._encoder) == lib.BROTLI_FALSE:
|
||||
chunks.append(self._compress(b'', lib.BROTLI_OPERATION_FINISH))
|
||||
|
||||
return b''.join(chunks)
|
||||
|
||||
|
||||
class Decompressor(object):
|
||||
"""
|
||||
An object that allows for streaming decompression of Brotli-compressed
|
||||
data.
|
||||
|
||||
.. versionchanged:: 0.5.0
|
||||
Added ``dictionary`` parameter.
|
||||
|
||||
:param dictionary: A pre-set dictionary for LZ77. Please use this with
|
||||
caution: if a dictionary is used for compression, the same dictionary
|
||||
**must** be used for decompression!
|
||||
:type dictionary: ``bytes``
|
||||
"""
|
||||
_dictionary = None
|
||||
_dictionary_size = None
|
||||
|
||||
def __init__(self, dictionary=b''):
|
||||
dec = lib.BrotliDecoderCreateInstance(ffi.NULL, ffi.NULL, ffi.NULL)
|
||||
self._decoder = ffi.gc(dec, lib.BrotliDecoderDestroyInstance)
|
||||
|
||||
if dictionary:
|
||||
self._dictionary = ffi.new("uint8_t []", dictionary)
|
||||
self._dictionary_size = len(dictionary)
|
||||
lib.BrotliDecoderSetCustomDictionary(
|
||||
self._decoder,
|
||||
self._dictionary_size,
|
||||
self._dictionary
|
||||
)
|
||||
|
||||
def decompress(self, data):
|
||||
"""
|
||||
Decompress part of a complete Brotli-compressed string.
|
||||
|
||||
:param data: A bytestring containing Brotli-compressed data.
|
||||
:returns: A bytestring containing the decompressed data.
|
||||
"""
|
||||
chunks = []
|
||||
|
||||
available_in = ffi.new("size_t *", len(data))
|
||||
in_buffer = ffi.new("uint8_t[]", data)
|
||||
next_in = ffi.new("uint8_t **", in_buffer)
|
||||
|
||||
while True:
|
||||
# Allocate a buffer that's hopefully overlarge, but if it's not we
|
||||
# don't mind: we'll spin around again.
|
||||
buffer_size = 5 * len(data)
|
||||
available_out = ffi.new("size_t *", buffer_size)
|
||||
out_buffer = ffi.new("uint8_t[]", buffer_size)
|
||||
next_out = ffi.new("uint8_t **", out_buffer)
|
||||
|
||||
rc = lib.BrotliDecoderDecompressStream(self._decoder,
|
||||
available_in,
|
||||
next_in,
|
||||
available_out,
|
||||
next_out,
|
||||
ffi.NULL)
|
||||
|
||||
# First, check for errors.
|
||||
if rc == lib.BROTLI_DECODER_RESULT_ERROR:
|
||||
error_code = lib.BrotliDecoderGetErrorCode(self._decoder)
|
||||
error_message = lib.BrotliDecoderErrorString(error_code)
|
||||
raise Error(
|
||||
"Decompression error: %s" % ffi.string(error_message)
|
||||
)
|
||||
|
||||
# Next, copy the result out.
|
||||
chunk = ffi.buffer(out_buffer, buffer_size - available_out[0])[:]
|
||||
chunks.append(chunk)
|
||||
|
||||
if rc == lib.BROTLI_DECODER_RESULT_NEEDS_MORE_INPUT:
|
||||
assert available_in[0] == 0
|
||||
break
|
||||
elif rc == lib.BROTLI_DECODER_RESULT_SUCCESS:
|
||||
break
|
||||
else:
|
||||
# It's cool if we need more output, we just loop again.
|
||||
assert rc == lib.BROTLI_DECODER_RESULT_NEEDS_MORE_OUTPUT
|
||||
|
||||
return b''.join(chunks)
|
||||
|
||||
def flush(self):
|
||||
"""
|
||||
Complete the decompression, return whatever data is remaining to be
|
||||
decompressed.
|
||||
|
||||
.. deprecated:: 0.4.0
|
||||
|
||||
This method is no longer required, as decompress() will now
|
||||
decompress eagerly.
|
||||
|
||||
:returns: A bytestring containing the remaining decompressed data.
|
||||
"""
|
||||
return b''
|
||||
|
||||
def finish(self):
|
||||
"""
|
||||
Finish the decompressor. As the decompressor decompresses eagerly, this
|
||||
will never actually emit any data. However, it will potentially throw
|
||||
errors if a truncated or damaged data stream has been used.
|
||||
|
||||
Note that, once this method is called, the decompressor is no longer
|
||||
safe for further use and must be thrown away.
|
||||
"""
|
||||
assert (
|
||||
lib.BrotliDecoderHasMoreOutput(self._decoder) == lib.BROTLI_FALSE
|
||||
)
|
||||
if lib.BrotliDecoderIsFinished(self._decoder) == lib.BROTLI_FALSE:
|
||||
raise Error("Decompression error: incomplete compressed stream.")
|
||||
|
||||
return b''
|
224
plugins/translate/brotli/build.py
Normal file
224
plugins/translate/brotli/build.py
Normal file
@ -0,0 +1,224 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import sys
|
||||
|
||||
from cffi import FFI
|
||||
ffi = FFI()
|
||||
|
||||
libraries = ['libbrotli']
|
||||
if 'win32' not in str(sys.platform).lower():
|
||||
libraries.append('stdc++')
|
||||
|
||||
|
||||
ffi.set_source(
|
||||
"_brotli",
|
||||
"""#include <brotli/decode.h>
|
||||
#include <brotli/encode.h>
|
||||
""",
|
||||
libraries=libraries,
|
||||
include_dirs=["libbrotli", "libbrotli/include"]
|
||||
)
|
||||
|
||||
ffi.cdef("""
|
||||
/* common/types.h */
|
||||
typedef bool BROTLI_BOOL;
|
||||
#define BROTLI_TRUE ...
|
||||
#define BROTLI_FALSE ...
|
||||
|
||||
/* dec/state.h */
|
||||
/* Allocating function pointer. Function MUST return 0 in the case of
|
||||
failure. Otherwise it MUST return a valid pointer to a memory region of
|
||||
at least size length. Neither items nor size are allowed to be 0.
|
||||
opaque argument is a pointer provided by client and could be used to
|
||||
bind function to specific object (memory pool). */
|
||||
typedef void* (*brotli_alloc_func)(void* opaque, size_t size);
|
||||
|
||||
/* Deallocating function pointer. Function SHOULD be no-op in the case the
|
||||
address is 0. */
|
||||
typedef void (*brotli_free_func)(void* opaque, void* address);
|
||||
|
||||
/* dec/decode.h */
|
||||
|
||||
typedef enum {
|
||||
/* Decoding error, e.g. corrupt input or memory allocation problem */
|
||||
BROTLI_DECODER_RESULT_ERROR = 0,
|
||||
/* Decoding successfully completed */
|
||||
BROTLI_DECODER_RESULT_SUCCESS = 1,
|
||||
/* Partially done; should be called again with more input */
|
||||
BROTLI_DECODER_RESULT_NEEDS_MORE_INPUT = 2,
|
||||
/* Partially done; should be called again with more output */
|
||||
BROTLI_DECODER_RESULT_NEEDS_MORE_OUTPUT = 3
|
||||
} BrotliDecoderResult;
|
||||
|
||||
typedef enum {...} BrotliDecoderErrorCode;
|
||||
typedef ... BrotliDecoderState;
|
||||
|
||||
/* Creates the instance of BrotliDecoderState and initializes it.
|
||||
|alloc_func| and |free_func| MUST be both zero or both non-zero. In the
|
||||
case they are both zero, default memory allocators are used. |opaque| is
|
||||
passed to |alloc_func| and |free_func| when they are called. */
|
||||
BrotliDecoderState* BrotliDecoderCreateInstance(brotli_alloc_func,
|
||||
brotli_free_func,
|
||||
void *);
|
||||
|
||||
/* Deinitializes and frees BrotliDecoderState instance. */
|
||||
void BrotliDecoderDestroyInstance(BrotliDecoderState* state);
|
||||
|
||||
/* Decompresses the data. Supports partial input and output.
|
||||
|
||||
Must be called with an allocated input buffer in |*next_in| and an
|
||||
allocated output buffer in |*next_out|. The values |*available_in| and
|
||||
|*available_out| must specify the allocated size in |*next_in| and
|
||||
|*next_out| respectively.
|
||||
|
||||
After each call, |*available_in| will be decremented by the amount of
|
||||
input bytes consumed, and the |*next_in| pointer will be incremented by
|
||||
that amount. Similarly, |*available_out| will be decremented by the
|
||||
amount of output bytes written, and the |*next_out| pointer will be
|
||||
incremented by that amount. |total_out|, if it is not a null-pointer,
|
||||
will be set to the number of bytes decompressed since the last state
|
||||
initialization.
|
||||
|
||||
Input is never overconsumed, so |next_in| and |available_in| could be
|
||||
passed to the next consumer after decoding is complete. */
|
||||
BrotliDecoderResult BrotliDecoderDecompressStream(BrotliDecoderState* s,
|
||||
size_t* available_in,
|
||||
const uint8_t** next_in,
|
||||
size_t* available_out,
|
||||
uint8_t** next_out,
|
||||
size_t* total_out);
|
||||
|
||||
/* Fills the new state with a dictionary for LZ77, warming up the
|
||||
ringbuffer, e.g. for custom static dictionaries for data formats.
|
||||
Not to be confused with the built-in transformable dictionary of Brotli.
|
||||
|size| should be less or equal to 2^24 (16MiB), otherwise the dictionary
|
||||
will be ignored. The dictionary must exist in memory until decoding is
|
||||
done and is owned by the caller. To use:
|
||||
1) Allocate and initialize state with BrotliCreateInstance
|
||||
2) Use BrotliSetCustomDictionary
|
||||
3) Use BrotliDecompressStream
|
||||
4) Clean up and free state with BrotliDestroyState
|
||||
*/
|
||||
void BrotliDecoderSetCustomDictionary(
|
||||
BrotliDecoderState* s, size_t size, const uint8_t* dict);
|
||||
|
||||
/* Returns true, if decoder has some unconsumed output.
|
||||
Otherwise returns false. */
|
||||
BROTLI_BOOL BrotliDecoderHasMoreOutput(const BrotliDecoderState* s);
|
||||
|
||||
/* Returns true, if decoder has already received some input bytes.
|
||||
Otherwise returns false. */
|
||||
BROTLI_BOOL BrotliDecoderIsUsed(const BrotliDecoderState* s);
|
||||
|
||||
/* Returns true, if decoder is in a state where we reached the end of the
|
||||
input and produced all of the output; returns false otherwise. */
|
||||
BROTLI_BOOL BrotliDecoderIsFinished(const BrotliDecoderState* s);
|
||||
|
||||
/* Returns detailed error code after BrotliDecompressStream returns
|
||||
BROTLI_DECODER_RESULT_ERROR. */
|
||||
BrotliDecoderErrorCode BrotliDecoderGetErrorCode(
|
||||
const BrotliDecoderState* s);
|
||||
|
||||
const char* BrotliDecoderErrorString(BrotliDecoderErrorCode c);
|
||||
|
||||
/* enc/encode.h */
|
||||
typedef ... BrotliEncoderState;
|
||||
|
||||
typedef enum BrotliEncoderParameter {
|
||||
BROTLI_PARAM_MODE = 0,
|
||||
/* Controls the compression-speed vs compression-density tradeoffs. The
|
||||
higher the quality, the slower the compression. Range is 0 to 11. */
|
||||
BROTLI_PARAM_QUALITY = 1,
|
||||
/* Base 2 logarithm of the sliding window size. Range is 10 to 24. */
|
||||
BROTLI_PARAM_LGWIN = 2,
|
||||
/* Base 2 logarithm of the maximum input block size. Range is 16 to 24.
|
||||
If set to 0, the value will be set based on the quality. */
|
||||
BROTLI_PARAM_LGBLOCK = 3
|
||||
} BrotliEncoderParameter;
|
||||
|
||||
typedef enum BrotliEncoderMode {
|
||||
/* Default compression mode. The compressor does not know anything in
|
||||
advance about the properties of the input. */
|
||||
BROTLI_MODE_GENERIC = 0,
|
||||
/* Compression mode for UTF-8 format text input. */
|
||||
BROTLI_MODE_TEXT = 1,
|
||||
/* Compression mode used in WOFF 2.0. */
|
||||
BROTLI_MODE_FONT = 2
|
||||
} BrotliEncoderMode;
|
||||
|
||||
int BROTLI_DEFAULT_QUALITY = 11;
|
||||
int BROTLI_DEFAULT_WINDOW = 22;
|
||||
#define BROTLI_DEFAULT_MODE ...
|
||||
|
||||
typedef enum BrotliEncoderOperation {
|
||||
BROTLI_OPERATION_PROCESS = 0,
|
||||
/* Request output stream to flush. Performed when input stream is
|
||||
depleted and there is enough space in output stream. */
|
||||
BROTLI_OPERATION_FLUSH = 1,
|
||||
/* Request output stream to finish. Performed when input stream is
|
||||
depleted and there is enough space in output stream. */
|
||||
BROTLI_OPERATION_FINISH = 2
|
||||
} BrotliEncoderOperation;
|
||||
|
||||
/* Creates the instance of BrotliEncoderState and initializes it.
|
||||
|alloc_func| and |free_func| MUST be both zero or both non-zero. In the
|
||||
case they are both zero, default memory allocators are used. |opaque| is
|
||||
passed to |alloc_func| and |free_func| when they are called. */
|
||||
BrotliEncoderState* BrotliEncoderCreateInstance(brotli_alloc_func,
|
||||
brotli_free_func,
|
||||
void *);
|
||||
|
||||
/* Deinitializes and frees BrotliEncoderState instance. */
|
||||
void BrotliEncoderDestroyInstance(BrotliEncoderState* state);
|
||||
|
||||
/* Compresses the data in |input_buffer| into |encoded_buffer|, and sets
|
||||
|*encoded_size| to the compressed length.
|
||||
BROTLI_DEFAULT_QUALITY, BROTLI_DEFAULT_WINDOW and BROTLI_DEFAULT_MODE
|
||||
should be used as |quality|, |lgwin| and |mode| if there are no specific
|
||||
requirements to encoder speed and compression ratio.
|
||||
If compression fails, |*encoded_size| is set to 0.
|
||||
If BrotliEncoderMaxCompressedSize(|input_size|) is not zero, then
|
||||
|*encoded_size| is never set to the bigger value.
|
||||
Returns false if there was an error and true otherwise. */
|
||||
BROTLI_BOOL BrotliEncoderCompress(int quality,
|
||||
int lgwin,
|
||||
BrotliEncoderMode mode,
|
||||
size_t input_size,
|
||||
const uint8_t* input_buffer,
|
||||
size_t* encoded_size,
|
||||
uint8_t* encoded_buffer);
|
||||
|
||||
BROTLI_BOOL BrotliEncoderCompressStream(BrotliEncoderState* s,
|
||||
BrotliEncoderOperation op,
|
||||
size_t* available_in,
|
||||
const uint8_t** next_in,
|
||||
size_t* available_out,
|
||||
uint8_t** next_out,
|
||||
size_t* total_out);
|
||||
|
||||
BROTLI_BOOL BrotliEncoderSetParameter(BrotliEncoderState* state,
|
||||
BrotliEncoderParameter p,
|
||||
uint32_t value);
|
||||
|
||||
/* Fills the new state with a dictionary for LZ77, warming up the
|
||||
ringbuffer, e.g. for custom static dictionaries for data formats.
|
||||
Not to be confused with the built-in transformable dictionary of Brotli.
|
||||
To decode, use BrotliSetCustomDictionary() of the decoder with the same
|
||||
dictionary. */
|
||||
void BrotliEncoderSetCustomDictionary(BrotliEncoderState* state,
|
||||
size_t size,
|
||||
const uint8_t* dict);
|
||||
|
||||
/* Check if encoder is in "finished" state, i.e. no more input is
|
||||
acceptable and no more output will be produced.
|
||||
Works only with BrotliEncoderCompressStream workflow.
|
||||
Returns 1 if stream is finished and 0 otherwise. */
|
||||
BROTLI_BOOL BrotliEncoderIsFinished(BrotliEncoderState* s);
|
||||
|
||||
/* Check if encoder has more output bytes in internal buffer.
|
||||
Works only with BrotliEncoderCompressStream workflow.
|
||||
Returns 1 if has more output (in internal buffer) and 0 otherwise. */
|
||||
BROTLI_BOOL BrotliEncoderHasMoreOutput(BrotliEncoderState* s);
|
||||
""")
|
||||
|
||||
if __name__ == '__main__':
|
||||
ffi.compile()
|
@ -3,6 +3,8 @@ import os
|
||||
import time
|
||||
import threading
|
||||
import requests
|
||||
import json
|
||||
from . import brotli
|
||||
|
||||
# Lib imports
|
||||
import gi
|
||||
@ -141,25 +143,45 @@ class Plugin(PluginBase):
|
||||
response = requests.post(tlink, headers=self._headers, data=from_translate)
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
self._translate_to_buffer.set_text(data["translated"])
|
||||
|
||||
data = self.get_data(response)
|
||||
self.translate_tries = 0
|
||||
self._translate_to_buffer.set_text(data["translated"])
|
||||
if data["detected_language"]:
|
||||
self._detected_language_lbl.set_label(f"Detected Language: {data['detected_language']}")
|
||||
else:
|
||||
self._detected_language_lbl.set_label(f"Selected Language: {self.from_trans}")
|
||||
elif response.status_code >= 400 or response.status_code < 500:
|
||||
self.get_vqd()
|
||||
if not self.translate_tries > 4:
|
||||
if not self.translate_tries > 2:
|
||||
self._translate()
|
||||
else:
|
||||
msg = f"Could not translate... Response Code: {response.status_code}"
|
||||
self._translate_to_buffer.set_text(msg)
|
||||
|
||||
def get_data(self, response):
|
||||
data = None
|
||||
|
||||
try:
|
||||
data = response.json()
|
||||
except Exception as e:
|
||||
...
|
||||
|
||||
try:
|
||||
data = json.loads(response.text)
|
||||
except Exception as e:
|
||||
...
|
||||
|
||||
try:
|
||||
decompress_str = brotli.decompress(response.content).decode("utf-8")
|
||||
data = json.loads(decompress_str)
|
||||
except Exception as e:
|
||||
...
|
||||
|
||||
return data
|
||||
|
||||
# NOTE: https://github.com/deedy5/duckduckgo_search/blob/72acb900a346be576f0917dd3d6c0fbd618a71bf/duckduckgo_search/utils.py
|
||||
def get_vqd(self):
|
||||
response = requests.post(self.vqd_link, headers=self.vqd_headers, data=self.vqd_data, timeout=10)
|
||||
response = requests.post(self.vqd_link, headers=self.vqd_headers, data=self.vqd_data, timeout=2)
|
||||
if response.status_code == 200:
|
||||
data = response.content
|
||||
vqd_start_index = data.index(b"vqd='") + 5
|
||||
|
BIN
src/versions/solarfm-0.0.1/solarfm/..o
Normal file
BIN
src/versions/solarfm-0.0.1/solarfm/..o
Normal file
Binary file not shown.
@ -56,11 +56,12 @@ class Controller(UIMixin, SignalsMixins, Controller_Data):
|
||||
|
||||
|
||||
def _setup_styling(self):
|
||||
...
|
||||
|
||||
def _setup_signals(self):
|
||||
self.window.connect("focus-out-event", self.unset_keys_and_data)
|
||||
self.window.connect("key-press-event", self.on_global_key_press_controller)
|
||||
self.window.connect("key-release-event", self.on_global_key_release_controller)
|
||||
|
||||
def _setup_signals(self):
|
||||
FileSystemActions()
|
||||
|
||||
def _subscribe_to_events(self):
|
||||
|
@ -10,6 +10,7 @@ from gi.repository import Gtk
|
||||
|
||||
# Application imports
|
||||
from .widgets.dialogs.message_widget import MessageWidget
|
||||
from .widgets.dialogs.user_pass_widget import UserPassWidget
|
||||
|
||||
from shellfm.windows.controller import WindowController
|
||||
from plugins.plugins_controller import PluginsController
|
||||
@ -31,6 +32,7 @@ class State:
|
||||
to_copy_files: [] = None
|
||||
to_cut_files: [] = None
|
||||
message_dialog: type = None
|
||||
user_pass_dialog: type = None
|
||||
|
||||
|
||||
class Controller_Data:
|
||||
@ -90,6 +92,7 @@ class Controller_Data:
|
||||
# state.icon_grid = event_system.emit_and_await("get_files_view_icon_grid", (state.wid, state.tid))
|
||||
state.store = state.icon_grid.get_model()
|
||||
state.message_dialog = MessageWidget()
|
||||
state.user_pass_dialog = UserPassWidget()
|
||||
|
||||
selected_files = state.icon_grid.get_selected_items()
|
||||
if selected_files:
|
||||
|
@ -36,10 +36,10 @@ class FileActionSignalsMixin:
|
||||
# NOTE: Too lazy to impliment a proper update handler and so just regen store and update tab.
|
||||
# Use a lock system to prevent too many update calls for certain instances but user can manually refresh if they have urgency
|
||||
def dir_watch_updates(self, file_monitor, file, other_file=None, eve_type=None, data=None):
|
||||
if eve_type == Gio.FileMonitorEvent.CHANGES_DONE_HINT:
|
||||
if eve_type in [Gio.FileMonitorEvent.CREATED, Gio.FileMonitorEvent.DELETED,
|
||||
Gio.FileMonitorEvent.RENAMED, Gio.FileMonitorEvent.MOVED_IN,
|
||||
Gio.FileMonitorEvent.MOVED_OUT]:
|
||||
if settings.is_debug():
|
||||
logger.debug(eve_type)
|
||||
|
||||
if eve_type in [Gio.FileMonitorEvent.MOVED_IN, Gio.FileMonitorEvent.MOVED_OUT]:
|
||||
|
@ -54,6 +54,9 @@ class TabMixin(GridMixin):
|
||||
|
||||
def close_tab(self, button, eve=None):
|
||||
notebook = button.get_parent().get_parent()
|
||||
if notebook.get_n_pages() == 1:
|
||||
return
|
||||
|
||||
wid = int(notebook.get_name()[-1])
|
||||
tid = self.get_id_from_tab_box(button.get_parent())
|
||||
scroll = self.builder.get_object(f"{wid}|{tid}")
|
||||
|
@ -0,0 +1,63 @@
|
||||
# Python imports
|
||||
|
||||
# Lib imports
|
||||
import gi
|
||||
gi.require_version('Gtk', '3.0')
|
||||
from gi.repository import Gtk
|
||||
from gi.repository import Gdk
|
||||
|
||||
# Application imports
|
||||
|
||||
|
||||
|
||||
|
||||
class UserPassWidget(Gtk.Dialog):
|
||||
"""docstring for UserPassWidget."""
|
||||
|
||||
def __init__(self):
|
||||
super(UserPassWidget, self).__init__()
|
||||
|
||||
self.user_input = Gtk.Entry()
|
||||
self.pass_input = Gtk.Entry()
|
||||
|
||||
self._setup_styling()
|
||||
self._setup_signals()
|
||||
self._subscribe_to_events()
|
||||
self._load_widgets()
|
||||
|
||||
|
||||
def _setup_styling(self):
|
||||
self.set_modal(True)
|
||||
self.set_type_hint(Gdk.WindowTypeHint.DIALOG)
|
||||
|
||||
self.user_input.set_placeholder_text("User:")
|
||||
self.pass_input.set_placeholder_text("Password:")
|
||||
self.pass_input.set_visibility(False)
|
||||
|
||||
def _setup_signals(self):
|
||||
# self.connect("response", self.on_response)
|
||||
...
|
||||
|
||||
def _subscribe_to_events(self):
|
||||
...
|
||||
|
||||
def _load_widgets(self):
|
||||
vbox = self.get_content_area()
|
||||
label = Gtk.Label(label="User & Password")
|
||||
vbox.add(label)
|
||||
vbox.add(self.user_input)
|
||||
vbox.add(self.pass_input)
|
||||
vbox.show_all()
|
||||
|
||||
self.add_buttons(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
|
||||
"Skip Input", Gtk.ResponseType.CLOSE,
|
||||
"Submit Input", Gtk.ResponseType.OK
|
||||
)
|
||||
|
||||
def do_response(self, response_id):
|
||||
self.hide()
|
||||
return response_id
|
||||
|
||||
# def on_response(self, widget, response_id):
|
||||
# self.hide()
|
||||
# return response_id
|
@ -52,10 +52,11 @@ class TabMixin(GridMixin):
|
||||
self.set_file_watcher(tab)
|
||||
|
||||
|
||||
|
||||
|
||||
def close_tab(self, button, eve=None):
|
||||
notebook = button.get_parent().get_parent()
|
||||
if notebook.get_n_pages() == 1:
|
||||
return
|
||||
|
||||
wid = int(notebook.get_name()[-1])
|
||||
tid = self.get_id_from_tab_box(button.get_parent())
|
||||
scroll = self.builder.get_object(f"{wid}|{tid}")
|
||||
|
@ -10,7 +10,7 @@ from gi.repository import Gtk
|
||||
|
||||
|
||||
|
||||
class TabHeaderWidget(Gtk.ButtonBox):
|
||||
class TabHeaderWidget(Gtk.Box):
|
||||
"""docstring for TabHeaderWidget"""
|
||||
|
||||
def __init__(self, tab, close_tab):
|
||||
@ -26,6 +26,7 @@ class TabHeaderWidget(Gtk.ButtonBox):
|
||||
|
||||
def _setup_styling(self):
|
||||
self.set_orientation(0)
|
||||
self.set_hexpand(False)
|
||||
|
||||
def _setup_signals(self):
|
||||
...
|
||||
@ -39,6 +40,9 @@ class TabHeaderWidget(Gtk.ButtonBox):
|
||||
label.set_label(f"{self._tab.get_end_of_path()}")
|
||||
label.set_width_chars(len(self._tab.get_end_of_path()))
|
||||
label.set_xalign(0.0)
|
||||
label.set_margin_left(25)
|
||||
label.set_margin_right(25)
|
||||
label.set_hexpand(True)
|
||||
tid.set_label(f"{self._tab.get_id()}")
|
||||
|
||||
close.connect("released", self._close_tab)
|
||||
|
@ -10,6 +10,7 @@ from os.path import isdir
|
||||
import gi
|
||||
gi.require_version('Gtk', '3.0')
|
||||
from gi.repository import Gtk
|
||||
from gi.repository import GLib
|
||||
from gi.repository import Gio
|
||||
|
||||
# Application imports
|
||||
@ -52,6 +53,7 @@ class PluginsController:
|
||||
Gio.FileMonitorEvent.MOVED_OUT]:
|
||||
self.reload_plugins(file)
|
||||
|
||||
@daemon_threaded
|
||||
def load_plugins(self, file: str = None) -> None:
|
||||
logger.info(f"Loading plugins...")
|
||||
parent_path = os.getcwd()
|
||||
@ -66,7 +68,9 @@ class PluginsController:
|
||||
|
||||
plugin, loading_data = manifest.get_loading_data()
|
||||
module = self.load_plugin_module(path, folder, target)
|
||||
self.execute_plugin(module, plugin, loading_data)
|
||||
|
||||
GLib.idle_add(self.execute_plugin, *(module, plugin, loading_data))
|
||||
# self.execute_plugin(module, plugin, loading_data)
|
||||
except InvalidPluginException as e:
|
||||
logger.info(f"Malformed Plugin: Not loading -->: '{folder}' !")
|
||||
logger.debug("Trace: ", traceback.print_exc())
|
||||
|
@ -49,6 +49,8 @@ class Icon(DesktopIconMixin, VideoIconMixin, MeshsIconMixin):
|
||||
thumbnl = self.find_thumbnail_from_desktop_file(full_path)
|
||||
|
||||
if not thumbnl:
|
||||
# TODO: Detect if not in a thread and use directly for speed get_system_thumbnail
|
||||
# thumbnl = self.get_system_thumbnail(full_path, full_path, self.sys_icon_wh[0])
|
||||
thumbnl = self._get_system_thumbnail_gtk_thread(full_path, self.sys_icon_wh[0])
|
||||
if not thumbnl:
|
||||
raise IconException("No known icons found.")
|
||||
@ -110,7 +112,13 @@ class Icon(DesktopIconMixin, VideoIconMixin, MeshsIconMixin):
|
||||
elif full_path.lower().endswith(".webp") and PImage:
|
||||
return self.image2pixbuf(full_path, wxh)
|
||||
|
||||
return GdkPixbuf.Pixbuf.new_from_file_at_scale(full_path, wxh[0], wxh[1], True)
|
||||
pixbuf = None
|
||||
try:
|
||||
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_scale(full_path, wxh[0], wxh[1], True)
|
||||
except Exception as e:
|
||||
...
|
||||
|
||||
return pixbuf
|
||||
except IconException as e:
|
||||
print("Image Scaling Issue:")
|
||||
print( repr(e) )
|
||||
|
@ -3,11 +3,11 @@
|
||||
# Lib imports
|
||||
|
||||
# Application imports
|
||||
from .singleton import Singleton
|
||||
|
||||
|
||||
|
||||
|
||||
class EndpointRegistry():
|
||||
class EndpointRegistry(Singleton):
|
||||
def __init__(self):
|
||||
self._endpoints = {}
|
||||
|
||||
|
@ -4,11 +4,11 @@ from collections import defaultdict
|
||||
# Lib imports
|
||||
|
||||
# Application imports
|
||||
from .singleton import Singleton
|
||||
|
||||
|
||||
|
||||
|
||||
class EventSystem:
|
||||
class EventSystem(Singleton):
|
||||
""" Create event system. """
|
||||
|
||||
def __init__(self):
|
||||
|
@ -8,11 +8,11 @@ from multiprocessing.connection import Listener
|
||||
from gi.repository import GLib
|
||||
|
||||
# Application imports
|
||||
from .singleton import Singleton
|
||||
|
||||
|
||||
|
||||
|
||||
class IPCServer:
|
||||
class IPCServer(Singleton):
|
||||
""" Create a listener so that other SolarFM instances send requests back to existing instance. """
|
||||
def __init__(self, ipc_address: str = '127.0.0.1', conn_type: str = "socket"):
|
||||
self.is_ipc_alive = False
|
||||
|
@ -7,20 +7,20 @@ gi.require_version('Gdk', '3.0')
|
||||
from gi.repository import Gdk
|
||||
|
||||
# Application imports
|
||||
from .singleton import Singleton
|
||||
|
||||
|
||||
|
||||
def err(log = ""):
|
||||
"""Print an error message"""
|
||||
def logger(log = ""):
|
||||
print(log)
|
||||
|
||||
|
||||
class KeymapError(Exception):
|
||||
"""Custom exception for errors in keybinding configurations"""
|
||||
""" Custom exception for errors in keybinding configurations """
|
||||
|
||||
MODIFIER = re.compile('<([^<]+)>')
|
||||
class Keybindings:
|
||||
"""Class to handle loading and lookup of Terminator keybindings"""
|
||||
class Keybindings(Singleton):
|
||||
""" Class to handle loading and lookup of Terminator keybindings """
|
||||
|
||||
modifiers = {
|
||||
'ctrl': Gdk.ModifierType.CONTROL_MASK,
|
||||
@ -46,7 +46,7 @@ class Keybindings:
|
||||
print(self.keys)
|
||||
|
||||
def append_bindings(self, combos):
|
||||
"""Accept new binding(s) and reload"""
|
||||
""" Accept new binding(s) and reload """
|
||||
for item in combos:
|
||||
method, keys = item.split(":")
|
||||
self.keys[method] = keys
|
||||
@ -54,12 +54,12 @@ class Keybindings:
|
||||
self.reload()
|
||||
|
||||
def configure(self, bindings):
|
||||
"""Accept new bindings and reconfigure with them"""
|
||||
""" Accept new bindings and reconfigure with them """
|
||||
self.keys = bindings
|
||||
self.reload()
|
||||
|
||||
def reload(self):
|
||||
"""Parse bindings and mangle into an appropriate form"""
|
||||
""" Parse bindings and mangle into an appropriate form """
|
||||
self._lookup = {}
|
||||
self._masks = 0
|
||||
|
||||
@ -77,9 +77,9 @@ class Keybindings:
|
||||
try:
|
||||
keyval, mask = self._parsebinding(binding)
|
||||
# Does much the same, but with poorer error handling.
|
||||
#keyval, mask = Gtk.accelerator_parse(binding)
|
||||
# keyval, mask = Gtk.accelerator_parse(binding)
|
||||
except KeymapError as e:
|
||||
err ("keybinding reload failed to parse binding '%s': %s" % (binding, e))
|
||||
logger(f"Keybinding reload failed to parse binding '{binding}': {e}")
|
||||
else:
|
||||
if mask & Gdk.ModifierType.SHIFT_MASK:
|
||||
if keyval == Gdk.KEY_Tab:
|
||||
@ -98,7 +98,7 @@ class Keybindings:
|
||||
self._masks |= mask
|
||||
|
||||
def _parsebinding(self, binding):
|
||||
"""Parse an individual binding using Gtk's binding function"""
|
||||
""" Parse an individual binding using Gtk's binding function """
|
||||
mask = 0
|
||||
modifiers = re.findall(MODIFIER, binding)
|
||||
|
||||
@ -113,25 +113,25 @@ class Keybindings:
|
||||
keyval = Gdk.keyval_from_name(key)
|
||||
|
||||
if keyval == 0:
|
||||
raise KeymapError("Key '%s' is unrecognised..." % key)
|
||||
raise KeymapError(f"Key '{key}' is unrecognised...")
|
||||
return (keyval, mask)
|
||||
|
||||
def _lookup_modifier(self, modifier):
|
||||
"""Map modifier names to gtk values"""
|
||||
""" Map modifier names to gtk values """
|
||||
try:
|
||||
return self.modifiers[modifier.lower()]
|
||||
except KeyError:
|
||||
raise KeymapError("Unhandled modifier '<%s>'" % modifier)
|
||||
raise KeymapError(f"Unhandled modifier '<{modifier}>'")
|
||||
|
||||
def lookup(self, event):
|
||||
"""Translate a keyboard event into a mapped key"""
|
||||
""" Translate a keyboard event into a mapped key """
|
||||
try:
|
||||
_found, keyval, _egp, _lvl, consumed = self.keymap.translate_keyboard_state(
|
||||
event.hardware_keycode,
|
||||
Gdk.ModifierType(event.get_state() & ~Gdk.ModifierType.LOCK_MASK),
|
||||
event.group)
|
||||
except TypeError:
|
||||
err ("Keybinding lookup failed to translate keyboard event: %s" % dir(event))
|
||||
logger("Keybinding lookup failed to translate keyboard event: {dir(event)}")
|
||||
return None
|
||||
|
||||
mask = (event.get_state() & ~consumed) & self._masks
|
||||
|
@ -3,9 +3,11 @@ import os
|
||||
import logging
|
||||
|
||||
# Application imports
|
||||
from .singleton import Singleton
|
||||
|
||||
|
||||
class Logger:
|
||||
|
||||
class Logger(Singleton):
|
||||
"""
|
||||
Create a new logging object and return it.
|
||||
:note:
|
||||
|
@ -11,11 +11,12 @@ from gi.repository import Gtk
|
||||
from gi.repository import GLib
|
||||
|
||||
# Application imports
|
||||
from ..singleton import Singleton
|
||||
from .start_check_mixin import StartCheckMixin
|
||||
|
||||
|
||||
|
||||
class Settings(StartCheckMixin):
|
||||
class Settings(StartCheckMixin, Singleton):
|
||||
def __init__(self):
|
||||
self._SCRIPT_PTH = os.path.dirname(os.path.realpath(__file__))
|
||||
self._USER_HOME = path.expanduser('~')
|
||||
@ -31,6 +32,12 @@ class Settings(StartCheckMixin):
|
||||
self._KEY_BINDINGS_FILE = f"{self._HOME_CONFIG_PATH}/key-bindings.json"
|
||||
self._PID_FILE = f"{self._HOME_CONFIG_PATH}/{app_name.lower()}.pid"
|
||||
self._WINDOW_ICON = f"{self._DEFAULT_ICONS}/icons/{app_name.lower()}.png"
|
||||
self._UI_WIDEGTS_PATH = f"{self._HOME_CONFIG_PATH}/ui_widgets"
|
||||
self._CONTEXT_MENU = f"{self._HOME_CONFIG_PATH}/contexct_menu.json"
|
||||
self._TRASH_FILES_PATH = f"{GLib.get_user_data_dir()}/Trash/files"
|
||||
self._TRASH_INFO_PATH = f"{GLib.get_user_data_dir()}/Trash/info"
|
||||
self._ICON_THEME = Gtk.IconTheme.get_default()
|
||||
|
||||
|
||||
if not os.path.exists(self._HOME_CONFIG_PATH):
|
||||
os.mkdir(self._HOME_CONFIG_PATH)
|
||||
@ -64,23 +71,25 @@ class Settings(StartCheckMixin):
|
||||
self._WINDOW_ICON = f"{self._USR_PATH}/icons/{app_name.lower()}.png"
|
||||
if not os.path.exists(self._WINDOW_ICON):
|
||||
raise MissingConfigError("Unable to find the application icon.")
|
||||
|
||||
|
||||
if not os.path.exists(self._UI_WIDEGTS_PATH):
|
||||
self._UI_WIDEGTS_PATH = f"{self._USR_PATH}/ui_widgets"
|
||||
self._CONTEXT_MENU = f"{self._USR_PATH}/contexct_menu.json"
|
||||
self._TRASH_FILES_PATH = f"{GLib.get_user_data_dir()}/Trash/files"
|
||||
self._TRASH_INFO_PATH = f"{GLib.get_user_data_dir()}/Trash/info"
|
||||
self._ICON_THEME = Gtk.IconTheme.get_default()
|
||||
|
||||
if not os.path.exists(self._CONTEXT_MENU):
|
||||
self._CONTEXT_MENU = f"{self._USR_PATH}/contexct_menu.json"
|
||||
|
||||
|
||||
try:
|
||||
with open(self._KEY_BINDINGS_FILE) as file:
|
||||
bindings = json.load(file)["keybindings"]
|
||||
keybindings.configure(bindings)
|
||||
except Exception as e:
|
||||
print( f"Settings: {self._KEY_BINDINGS_FILE}\n\t\t{repr(e)}" )
|
||||
|
||||
try:
|
||||
with open(self._CONTEXT_MENU) as file:
|
||||
self._context_menu_data = json.load(file)
|
||||
except Exception as e:
|
||||
print( f"Settings: {self._CONTEXT_MENU}\n\t\t{repr(e)}" )
|
||||
|
||||
|
||||
self._main_window = None
|
||||
self._main_window_w = 1670
|
||||
|
23
src/versions/solarfm-0.0.1/solarfm/utils/singleton.py
Normal file
23
src/versions/solarfm-0.0.1/solarfm/utils/singleton.py
Normal file
@ -0,0 +1,23 @@
|
||||
# Python imports
|
||||
|
||||
# Lib imports
|
||||
|
||||
# Application imports
|
||||
|
||||
|
||||
class SingletonError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
|
||||
class Singleton:
|
||||
ccount = 0
|
||||
|
||||
def __new__(cls, *args, **kwargs):
|
||||
obj = super(Singleton, cls).__new__(cls)
|
||||
cls.ccount += 1
|
||||
|
||||
if cls.ccount == 2:
|
||||
raise SingletonError(f"Exceeded {cls.__name__} instantiation limit...")
|
||||
|
||||
return obj
|
@ -6,7 +6,7 @@
|
||||
"open_terminal" : "F4",
|
||||
"refresh_tab" : ["F5",
|
||||
"<Control>r"],
|
||||
"tggl_top_main_menubar" : "<Alt>h",
|
||||
"tggl_top_main_menubar" : "<Alt>Alt_L",
|
||||
"tear_down" : "<Control>q",
|
||||
"go_up" : "<Control>Up",
|
||||
"go_home" : "<Control>slash",
|
||||
|
Loading…
Reference in New Issue
Block a user