mirror of
https://github.com/junegunn/fzf.git
synced 2025-11-15 23:03:47 -05:00
Compare commits
41 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e89eebb7ba | ||
|
|
fee404399a | ||
|
|
6b4805ca1a | ||
|
|
159699b5d7 | ||
|
|
af809c9661 | ||
|
|
329de8f416 | ||
|
|
e825b07e85 | ||
|
|
71fdb99a07 | ||
|
|
55ee4186aa | ||
|
|
941b0a0ff7 | ||
|
|
6aae12288e | ||
|
|
302cc552ef | ||
|
|
a2a4df0886 | ||
|
|
3399e39968 | ||
|
|
87874bba88 | ||
|
|
c304fc4333 | ||
|
|
6977cf268f | ||
|
|
931c78a70c | ||
|
|
8d23646fe6 | ||
|
|
656963e018 | ||
|
|
644277faf1 | ||
|
|
0558dfee79 | ||
|
|
487c8fe88f | ||
|
|
0d171ba1d8 | ||
|
|
2069bbc8b5 | ||
|
|
053d628b53 | ||
|
|
6bc592e6c9 | ||
|
|
6c76d8cd1c | ||
|
|
a09e411936 | ||
|
|
02a7b96f33 | ||
|
|
e55e029ae8 | ||
|
|
6b18b144cf | ||
|
|
6d53089cc1 | ||
|
|
e85a8a68d0 | ||
|
|
dc55e68524 | ||
|
|
462c68b625 | ||
|
|
999d374f0c | ||
|
|
b208aa675e | ||
|
|
2b98fee136 | ||
|
|
e5e75efebc | ||
|
|
4a4fef2daf |
@@ -1,19 +1,18 @@
|
||||
language: ruby
|
||||
dist: trusty
|
||||
sudo: required
|
||||
matrix:
|
||||
include:
|
||||
- env: TAGS=
|
||||
rvm: 2.3.3
|
||||
# - env: TAGS=tcell
|
||||
# rvm: 2.2.0
|
||||
# rvm: 2.3.3
|
||||
|
||||
install:
|
||||
- sudo apt-get update
|
||||
- sudo apt-get install -y libncurses-dev lib32ncurses5-dev libgpm-dev
|
||||
- sudo add-apt-repository -y ppa:pi-rho/dev
|
||||
- sudo apt-add-repository -y ppa:fish-shell/release-2
|
||||
- sudo apt-get update
|
||||
- sudo apt-get install -y tmux=1.9a-1~ppa1~p
|
||||
- sudo apt-get install -y zsh fish
|
||||
- sudo apt-get install -y tmux zsh fish
|
||||
|
||||
script: |
|
||||
make test install &&
|
||||
|
||||
@@ -1,6 +1,13 @@
|
||||
CHANGELOG
|
||||
=========
|
||||
|
||||
0.17.0
|
||||
------
|
||||
- Performance optimization
|
||||
- One can match literal spaces in extended-search mode with a space prepended
|
||||
by a backslash.
|
||||
- `--expect` is now additive and can be specified multiple times.
|
||||
|
||||
0.16.11
|
||||
-------
|
||||
- Performance optimization
|
||||
|
||||
@@ -55,6 +55,19 @@ let g:fzf_action = {
|
||||
\ 'ctrl-x': 'split',
|
||||
\ 'ctrl-v': 'vsplit' }
|
||||
|
||||
" An action can be a reference to a function that processes selected lines
|
||||
function! s:build_quickfix_list(lines)
|
||||
call setqflist(map(copy(a:lines), '{ "filename": v:val }'))
|
||||
copen
|
||||
cc
|
||||
endfunction
|
||||
|
||||
let g:fzf_action = {
|
||||
\ 'ctrl-q': function('s:build_quickfix_list'),
|
||||
\ 'ctrl-t': 'tab split',
|
||||
\ 'ctrl-x': 'split',
|
||||
\ 'ctrl-v': 'vsplit' }
|
||||
|
||||
" Default fzf layout
|
||||
" - down / up / left / right
|
||||
let g:fzf_layout = { 'down': '~40%' }
|
||||
|
||||
@@ -24,7 +24,7 @@ Table of Contents
|
||||
|
||||
* [Installation](#installation)
|
||||
* [Using git](#using-git)
|
||||
* [Using Homebrew](#using-homebrew)
|
||||
* [Using Homebrew or Linuxbrew](#using-homebrew-or-linuxbrew)
|
||||
* [As Vim plugin](#as-vim-plugin)
|
||||
* [Windows](#windows)
|
||||
* [Upgrading fzf](#upgrading-fzf)
|
||||
@@ -83,9 +83,10 @@ git clone --depth 1 https://github.com/junegunn/fzf.git ~/.fzf
|
||||
~/.fzf/install
|
||||
```
|
||||
|
||||
### Using Homebrew
|
||||
### Using Homebrew or Linuxbrew
|
||||
|
||||
On OS X, you can use [Homebrew](http://brew.sh/) to install fzf.
|
||||
Alternatively, you can use [Homebrew](http://brew.sh/) or
|
||||
[Linuxbrew](http://linuxbrew.sh/) to install fzf.
|
||||
|
||||
```sh
|
||||
brew install fzf
|
||||
@@ -411,7 +412,7 @@ options that affect the performance.
|
||||
algorithm. However, this algorithm is not guaranteed to find the optimal
|
||||
ordering of the matches and is not recommended.
|
||||
|
||||
[perf]: https://junegunn.kr/images/fzf-0.16.10.png
|
||||
[perf]: https://junegunn.kr/images/fzf-0.16.11.png
|
||||
|
||||
### Executing external programs
|
||||
|
||||
|
||||
14
bin/fzf-tmux
14
bin/fzf-tmux
@@ -146,6 +146,7 @@ cleanup() {
|
||||
fi
|
||||
|
||||
if [ $# -gt 0 ]; then
|
||||
trap - EXIT
|
||||
exit 130
|
||||
fi
|
||||
}
|
||||
@@ -170,21 +171,22 @@ for arg in "${args[@]}"; do
|
||||
done
|
||||
|
||||
pppid=$$
|
||||
trap_set="trap 'kill -SIGUSR1 -$pppid' EXIT SIGINT SIGTERM"
|
||||
trap_unset="trap - EXIT SIGINT SIGTERM"
|
||||
echo -n "trap 'kill -SIGUSR1 -$pppid' EXIT SIGINT SIGTERM;" > $argsf
|
||||
close="; trap - EXIT SIGINT SIGTERM $close"
|
||||
|
||||
if [[ -n "$term" ]] || [[ -t 0 ]]; then
|
||||
cat <<< "\"$fzf\" $opts > $fifo2; echo \$? > $fifo3 $close" > $argsf
|
||||
cat <<< "\"$fzf\" $opts > $fifo2; echo \$? > $fifo3 $close" >> $argsf
|
||||
cat $argsf
|
||||
TMUX=$(echo $TMUX | cut -d , -f 1,2) tmux set-window-option synchronize-panes off \;\
|
||||
set-window-option remain-on-exit off \;\
|
||||
split-window $opt "$trap_set;cd $(printf %q "$PWD");$envs bash $argsf;$trap_unset" $swap \
|
||||
split-window $opt "cd $(printf %q "$PWD");$envs bash $argsf" $swap \
|
||||
> /dev/null 2>&1
|
||||
else
|
||||
mkfifo $fifo1
|
||||
cat <<< "\"$fzf\" $opts < $fifo1 > $fifo2; echo \$? > $fifo3 $close" > $argsf
|
||||
cat <<< "\"$fzf\" $opts < $fifo1 > $fifo2; echo \$? > $fifo3 $close" >> $argsf
|
||||
TMUX=$(echo $TMUX | cut -d , -f 1,2) tmux set-window-option synchronize-panes off \;\
|
||||
set-window-option remain-on-exit off \;\
|
||||
split-window $opt "$trap_set;$envs bash $argsf;$trap_unset" $swap \
|
||||
split-window $opt "$envs bash $argsf" $swap \
|
||||
> /dev/null 2>&1
|
||||
cat <&0 > $fifo1 &
|
||||
fi
|
||||
|
||||
15
doc/fzf.txt
15
doc/fzf.txt
@@ -1,4 +1,4 @@
|
||||
fzf.txt fzf Last change: April 28 2017
|
||||
fzf.txt fzf Last change: August 14 2017
|
||||
FZF - TABLE OF CONTENTS *fzf* *fzf-toc*
|
||||
==============================================================================
|
||||
|
||||
@@ -80,6 +80,19 @@ Examples~
|
||||
\ 'ctrl-x': 'split',
|
||||
\ 'ctrl-v': 'vsplit' }
|
||||
|
||||
" An action can be a reference to a function that processes selected lines
|
||||
function! s:build_quickfix_list(lines)
|
||||
call setqflist(map(copy(a:lines), '{ "filename": v:val }'))
|
||||
copen
|
||||
cc
|
||||
endfunction
|
||||
|
||||
let g:fzf_action = {
|
||||
\ 'ctrl-q': function('s:build_quickfix_list'),
|
||||
\ 'ctrl-t': 'tab split',
|
||||
\ 'ctrl-x': 'split',
|
||||
\ 'ctrl-v': 'vsplit' }
|
||||
|
||||
" Default fzf layout
|
||||
" - down / up / left / right
|
||||
let g:fzf_layout = { 'down': '~40%' }
|
||||
|
||||
3
install
3
install
@@ -2,7 +2,7 @@
|
||||
|
||||
set -u
|
||||
|
||||
version=0.16.11
|
||||
version=0.17.0
|
||||
auto_completion=
|
||||
key_bindings=
|
||||
update_config=2
|
||||
@@ -172,6 +172,7 @@ case "$archi" in
|
||||
OpenBSD\ *86) download fzf-$version-openbsd_${binary_arch:-386}.tgz ;;
|
||||
CYGWIN*\ *64) download fzf-$version-windows_${binary_arch:-amd64}.zip ;;
|
||||
MINGW*\ *86) download fzf-$version-windows_${binary_arch:-386}.zip ;;
|
||||
MINGW*\ *64) download fzf-$version-windows_${binary_arch:-amd64}.zip ;;
|
||||
*) binary_available=0 binary_error=1 ;;
|
||||
esac
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
..
|
||||
.TH fzf-tmux 1 "Aug 2017" "fzf 0.16.11" "fzf-tmux - open fzf in tmux split pane"
|
||||
.TH fzf-tmux 1 "Aug 2017" "fzf 0.17.0" "fzf-tmux - open fzf in tmux split pane"
|
||||
|
||||
.SH NAME
|
||||
fzf-tmux - open fzf in tmux split pane
|
||||
|
||||
@@ -21,7 +21,7 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
..
|
||||
.TH fzf 1 "Aug 2017" "fzf 0.16.11" "fzf - a command-line fuzzy finder"
|
||||
.TH fzf 1 "Aug 2017" "fzf 0.17.0" "fzf - a command-line fuzzy finder"
|
||||
|
||||
.SH NAME
|
||||
fzf - a command-line fuzzy finder
|
||||
@@ -331,10 +331,12 @@ Comma-separated list of keys that can be used to complete fzf in addition to
|
||||
the default enter key. When this option is set, fzf will print the name of the
|
||||
key pressed as the first line of its output (or as the second line if
|
||||
\fB--print-query\fR is also used). The line will be empty if fzf is completed
|
||||
with the default enter key.
|
||||
with the default enter key. If \fB--expect\fR option is specified multiple
|
||||
times, fzf will expect the union of the keys. \fB--no-expect\fR will clear the
|
||||
list.
|
||||
|
||||
.RS
|
||||
e.g. \fBfzf --expect=ctrl-v,ctrl-t,alt-s,f1,f2,~,@\fR
|
||||
e.g. \fBfzf --expect=ctrl-v,ctrl-t,alt-s --expect=f1,f2,~,@\fR
|
||||
.RE
|
||||
.TP
|
||||
.B "--read0"
|
||||
@@ -410,6 +412,9 @@ Unless specified otherwise, fzf will start in "extended-search mode". In this
|
||||
mode, you can specify multiple patterns delimited by spaces, such as: \fB'wild
|
||||
^music .mp3$ sbtrkt !rmx\fR
|
||||
|
||||
You can prepend a backslash to a space (\fB\\ \fR) to match a literal space
|
||||
character.
|
||||
|
||||
.SS Exact-match (quoted)
|
||||
A term that is prefixed by a single-quote character (\fB'\fR) is interpreted as
|
||||
an "exact-match" (or "non-fuzzy") term. fzf will search for the exact
|
||||
|
||||
@@ -66,8 +66,8 @@ function! s:shellesc_cmd(arg)
|
||||
let escaped = substitute(a:arg, '[&|<>()@^]', '^&', 'g')
|
||||
let escaped = substitute(escaped, '%', '%%', 'g')
|
||||
let escaped = substitute(escaped, '"', '\\^&', 'g')
|
||||
let escaped = substitute(escaped, '\\\+\(\\^\)', '\\\\\1', 'g')
|
||||
return '^"'.substitute(escaped, '[^\\]\zs\\$', '\\\\', '').'^"'
|
||||
let escaped = substitute(escaped, '\(\\\+\)\(\\^\)', '\1\1\2', 'g')
|
||||
return '^"'.substitute(escaped, '\(\\\+\)$', '\1\1', '').'^"'
|
||||
endfunction
|
||||
|
||||
function! fzf#shellescape(arg, ...)
|
||||
@@ -201,7 +201,10 @@ function! s:common_sink(action, lines) abort
|
||||
return
|
||||
endif
|
||||
let key = remove(a:lines, 0)
|
||||
let cmd = get(a:action, key, 'e')
|
||||
let Cmd = get(a:action, key, 'e')
|
||||
if type(Cmd) == type(function('call'))
|
||||
return Cmd(a:lines)
|
||||
endif
|
||||
if len(a:lines) > 1
|
||||
augroup fzf_swap
|
||||
autocmd SwapExists * let v:swapchoice='o'
|
||||
@@ -217,7 +220,7 @@ function! s:common_sink(action, lines) abort
|
||||
execute 'e' s:escape(item)
|
||||
let empty = 0
|
||||
else
|
||||
call s:open(cmd, item)
|
||||
call s:open(Cmd, item)
|
||||
endif
|
||||
if !has('patch-8.0.0177') && !has('nvim-0.2') && exists('#BufEnter')
|
||||
\ && isdirectory(item)
|
||||
@@ -337,13 +340,6 @@ try
|
||||
set shell=sh
|
||||
endif
|
||||
|
||||
if has('nvim')
|
||||
let running = filter(range(1, bufnr('$')), "bufname(v:val) =~# ';#FZF'")
|
||||
if len(running)
|
||||
call s:warn('FZF is already running (in buffer '.join(running, ', ').')!')
|
||||
return []
|
||||
endif
|
||||
endif
|
||||
let dict = exists('a:1') ? s:upgrade(a:1) : {}
|
||||
let temps = { 'result': s:fzf_tempname() }
|
||||
let optstr = s:evaluate_opts(get(dict, 'options', ''))
|
||||
@@ -466,11 +462,11 @@ augroup fzf_popd
|
||||
augroup END
|
||||
|
||||
function! s:dopopd()
|
||||
if !exists('w:fzf_prev_dir') || exists('*haslocaldir') && !haslocaldir()
|
||||
if !exists('w:fzf_dir') || s:fzf_getcwd() != w:fzf_dir[1]
|
||||
return
|
||||
endif
|
||||
execute 'lcd' s:escape(w:fzf_prev_dir)
|
||||
unlet w:fzf_prev_dir
|
||||
execute 'lcd' s:escape(w:fzf_dir[0])
|
||||
unlet w:fzf_dir
|
||||
endfunction
|
||||
|
||||
function! s:xterm_launcher()
|
||||
@@ -719,7 +715,7 @@ function! s:callback(dict, lines) abort
|
||||
let popd = has_key(a:dict, 'prev_dir') &&
|
||||
\ (!&autochdir || (empty(a:lines) || len(a:lines) == 1 && empty(a:lines[0])))
|
||||
if popd
|
||||
let w:fzf_prev_dir = a:dict.prev_dir
|
||||
let w:fzf_dir = [a:dict.prev_dir, a:dict.dir]
|
||||
endif
|
||||
|
||||
try
|
||||
@@ -743,7 +739,7 @@ function! s:callback(dict, lines) abort
|
||||
|
||||
" We may have opened a new window or tab
|
||||
if popd
|
||||
let w:fzf_prev_dir = a:dict.prev_dir
|
||||
let w:fzf_dir = [a:dict.prev_dir, a:dict.dir]
|
||||
call s:dopopd()
|
||||
endif
|
||||
endfunction
|
||||
|
||||
287
src/algo/algo.go
287
src/algo/algo.go
@@ -158,27 +158,17 @@ func posArray(withPos bool, len int) *[]int {
|
||||
return nil
|
||||
}
|
||||
|
||||
func alloc16(offset int, slab *util.Slab, size int, clear bool) (int, []int16) {
|
||||
func alloc16(offset int, slab *util.Slab, size int) (int, []int16) {
|
||||
if slab != nil && cap(slab.I16) > offset+size {
|
||||
slice := slab.I16[offset : offset+size]
|
||||
if clear {
|
||||
for idx := range slice {
|
||||
slice[idx] = 0
|
||||
}
|
||||
}
|
||||
return offset + size, slice
|
||||
}
|
||||
return offset, make([]int16, size)
|
||||
}
|
||||
|
||||
func alloc32(offset int, slab *util.Slab, size int, clear bool) (int, []int32) {
|
||||
func alloc32(offset int, slab *util.Slab, size int) (int, []int32) {
|
||||
if slab != nil && cap(slab.I32) > offset+size {
|
||||
slice := slab.I32[offset : offset+size]
|
||||
if clear {
|
||||
for idx := range slice {
|
||||
slice[idx] = 0
|
||||
}
|
||||
}
|
||||
return offset + size, slice
|
||||
}
|
||||
return offset, make([]int32, size)
|
||||
@@ -229,7 +219,7 @@ func bonusFor(prevClass charClass, class charClass) int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func bonusAt(input util.Chars, idx int) int16 {
|
||||
func bonusAt(input *util.Chars, idx int) int16 {
|
||||
if idx == 0 {
|
||||
return bonusBoundary
|
||||
}
|
||||
@@ -251,7 +241,7 @@ func normalizeRune(r rune) rune {
|
||||
// Algo functions make two assumptions
|
||||
// 1. "pattern" is given in lowercase if "caseSensitive" is false
|
||||
// 2. "pattern" is already normalized if "normalize" is true
|
||||
type Algo func(caseSensitive bool, normalize bool, forward bool, input util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int)
|
||||
type Algo func(caseSensitive bool, normalize bool, forward bool, input *util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int)
|
||||
|
||||
func trySkip(input *util.Chars, caseSensitive bool, b byte, from int) int {
|
||||
byteArray := input.Bytes()[from:]
|
||||
@@ -263,8 +253,11 @@ func trySkip(input *util.Chars, caseSensitive bool, b byte, from int) int {
|
||||
// We may need to search for the uppercase letter again. We don't have to
|
||||
// consider normalization as we can be sure that this is an ASCII string.
|
||||
if !caseSensitive && b >= 'a' && b <= 'z' {
|
||||
if idx > 0 {
|
||||
byteArray = byteArray[:idx]
|
||||
}
|
||||
uidx := bytes.IndexByte(byteArray, b-32)
|
||||
if idx < 0 || uidx >= 0 && uidx < idx {
|
||||
if uidx >= 0 {
|
||||
idx = uidx
|
||||
}
|
||||
}
|
||||
@@ -309,7 +302,43 @@ func asciiFuzzyIndex(input *util.Chars, pattern []rune, caseSensitive bool) int
|
||||
return firstIdx
|
||||
}
|
||||
|
||||
func FuzzyMatchV2(caseSensitive bool, normalize bool, forward bool, input util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
||||
func debugV2(T []rune, pattern []rune, F []int32, lastIdx int, H []int16, C []int16) {
|
||||
width := lastIdx - int(F[0]) + 1
|
||||
|
||||
for i, f := range F {
|
||||
I := i * width
|
||||
if i == 0 {
|
||||
fmt.Print(" ")
|
||||
for j := int(f); j <= lastIdx; j++ {
|
||||
fmt.Printf(" " + string(T[j]) + " ")
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
fmt.Print(string(pattern[i]) + " ")
|
||||
for idx := int(F[0]); idx < int(f); idx++ {
|
||||
fmt.Print(" 0 ")
|
||||
}
|
||||
for idx := int(f); idx <= lastIdx; idx++ {
|
||||
fmt.Printf("%2d ", H[i*width+idx-int(F[0])])
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
fmt.Print(" ")
|
||||
for idx, p := range C[I : I+width] {
|
||||
if idx+int(F[0]) < int(F[i]) {
|
||||
p = 0
|
||||
}
|
||||
if p > 0 {
|
||||
fmt.Printf("%2d ", p)
|
||||
} else {
|
||||
fmt.Print(" ")
|
||||
}
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
}
|
||||
|
||||
func FuzzyMatchV2(caseSensitive bool, normalize bool, forward bool, input *util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
||||
// Assume that pattern is given in lowercase if case-insensitive.
|
||||
// First check if there's a match and calculate bonus for each position.
|
||||
// If the input string is too long, consider finding the matching chars in
|
||||
@@ -326,174 +355,175 @@ func FuzzyMatchV2(caseSensitive bool, normalize bool, forward bool, input util.C
|
||||
return FuzzyMatchV1(caseSensitive, normalize, forward, input, pattern, withPos, slab)
|
||||
}
|
||||
|
||||
// Reuse pre-allocated integer slice to avoid unnecessary sweeping of garbages
|
||||
offset16 := 0
|
||||
offset32 := 0
|
||||
// Bonus point for each position
|
||||
offset16, B := alloc16(offset16, slab, N, false)
|
||||
// The first occurrence of each character in the pattern
|
||||
offset32, F := alloc32(offset32, slab, M, false)
|
||||
// Rune array
|
||||
offset32, T := alloc32(offset32, slab, N, false)
|
||||
|
||||
// Phase 1. Optimized search for ASCII string
|
||||
idx := asciiFuzzyIndex(&input, pattern, caseSensitive)
|
||||
idx := asciiFuzzyIndex(input, pattern, caseSensitive)
|
||||
if idx < 0 {
|
||||
return Result{-1, -1, 0}, nil
|
||||
}
|
||||
|
||||
// Phase 2. Calculate bonus for each point
|
||||
pidx, lastIdx, prevClass := 0, 0, charNonWord
|
||||
// Reuse pre-allocated integer slice to avoid unnecessary sweeping of garbages
|
||||
offset16 := 0
|
||||
offset32 := 0
|
||||
offset16, H0 := alloc16(offset16, slab, N)
|
||||
offset16, C0 := alloc16(offset16, slab, N)
|
||||
// Bonus point for each position
|
||||
offset16, B := alloc16(offset16, slab, N)
|
||||
// The first occurrence of each character in the pattern
|
||||
offset32, F := alloc32(offset32, slab, M)
|
||||
// Rune array
|
||||
offset32, T := alloc32(offset32, slab, N)
|
||||
input.CopyRunes(T)
|
||||
for ; idx < N; idx++ {
|
||||
char := T[idx]
|
||||
|
||||
// Phase 2. Calculate bonus for each point
|
||||
maxScore, maxScorePos := int16(0), 0
|
||||
pidx, lastIdx := 0, 0
|
||||
pchar0, pchar, prevH0, prevClass, inGap := pattern[0], pattern[0], int16(0), charNonWord, false
|
||||
Tsub := T[idx:]
|
||||
H0sub, C0sub, Bsub := H0[idx:][:len(Tsub)], C0[idx:][:len(Tsub)], B[idx:][:len(Tsub)]
|
||||
for off, char := range Tsub {
|
||||
var class charClass
|
||||
if char <= unicode.MaxASCII {
|
||||
class = charClassOfAscii(char)
|
||||
if !caseSensitive && class == charUpper {
|
||||
char += 32
|
||||
}
|
||||
} else {
|
||||
class = charClassOfNonAscii(char)
|
||||
}
|
||||
|
||||
if !caseSensitive && class == charUpper {
|
||||
if char <= unicode.MaxASCII {
|
||||
char += 32
|
||||
} else {
|
||||
if !caseSensitive && class == charUpper {
|
||||
char = unicode.To(unicode.LowerCase, char)
|
||||
}
|
||||
if normalize {
|
||||
char = normalizeRune(char)
|
||||
}
|
||||
}
|
||||
|
||||
if normalize {
|
||||
char = normalizeRune(char)
|
||||
}
|
||||
|
||||
T[idx] = char
|
||||
B[idx] = bonusFor(prevClass, class)
|
||||
Tsub[off] = char
|
||||
bonus := bonusFor(prevClass, class)
|
||||
Bsub[off] = bonus
|
||||
prevClass = class
|
||||
|
||||
if pidx < M {
|
||||
if char == pattern[pidx] {
|
||||
lastIdx = idx
|
||||
F[pidx] = int32(idx)
|
||||
if char == pchar {
|
||||
if pidx < M {
|
||||
F[pidx] = int32(idx + off)
|
||||
pidx++
|
||||
pchar = pattern[util.Min(pidx, M-1)]
|
||||
}
|
||||
} else {
|
||||
if char == pattern[M-1] {
|
||||
lastIdx = idx
|
||||
}
|
||||
lastIdx = idx + off
|
||||
}
|
||||
|
||||
if char == pchar0 {
|
||||
score := scoreMatch + bonus*bonusFirstCharMultiplier
|
||||
H0sub[off] = score
|
||||
C0sub[off] = 1
|
||||
if M == 1 && (forward && score > maxScore || !forward && score >= maxScore) {
|
||||
maxScore, maxScorePos = score, idx+off
|
||||
if forward && bonus == bonusBoundary {
|
||||
break
|
||||
}
|
||||
}
|
||||
inGap = false
|
||||
} else {
|
||||
if inGap {
|
||||
H0sub[off] = util.Max16(prevH0+scoreGapExtention, 0)
|
||||
} else {
|
||||
H0sub[off] = util.Max16(prevH0+scoreGapStart, 0)
|
||||
}
|
||||
C0sub[off] = 0
|
||||
inGap = true
|
||||
}
|
||||
prevH0 = H0sub[off]
|
||||
}
|
||||
if pidx != M {
|
||||
return Result{-1, -1, 0}, nil
|
||||
}
|
||||
if M == 1 && B[F[0]] == bonusBoundary {
|
||||
p := int(F[0])
|
||||
result := Result{p, p + 1, scoreMatch + bonusBoundary*bonusFirstCharMultiplier}
|
||||
if M == 1 {
|
||||
result := Result{maxScorePos, maxScorePos + 1, int(maxScore)}
|
||||
if !withPos {
|
||||
return result, nil
|
||||
}
|
||||
pos := []int{p}
|
||||
pos := []int{maxScorePos}
|
||||
return result, &pos
|
||||
}
|
||||
|
||||
// Phase 3. Fill in score matrix (H)
|
||||
// Unlike the original algorithm, we do not allow omission.
|
||||
width := lastIdx - int(F[0]) + 1
|
||||
offset16, H := alloc16(offset16, slab, width*M, false)
|
||||
f0 := int(F[0])
|
||||
width := lastIdx - f0 + 1
|
||||
offset16, H := alloc16(offset16, slab, width*M)
|
||||
copy(H, H0[f0:lastIdx+1])
|
||||
|
||||
// Possible length of consecutive chunk at each position.
|
||||
offset16, C := alloc16(offset16, slab, width*M, false)
|
||||
offset16, C := alloc16(offset16, slab, width*M)
|
||||
copy(C, C0[f0:lastIdx+1])
|
||||
|
||||
maxScore, maxScorePos := int16(0), 0
|
||||
for i := 0; i < M; i++ {
|
||||
I := i * width
|
||||
Fsub := F[1:]
|
||||
Psub := pattern[1:][:len(Fsub)]
|
||||
for off, f := range Fsub {
|
||||
f := int(f)
|
||||
pchar := Psub[off]
|
||||
pidx := off + 1
|
||||
row := pidx * width
|
||||
inGap := false
|
||||
for j := int(F[i]); j <= lastIdx; j++ {
|
||||
j0 := j - int(F[0])
|
||||
Tsub := T[f : lastIdx+1]
|
||||
Bsub := B[f:][:len(Tsub)]
|
||||
Csub := C[row+f-f0:][:len(Tsub)]
|
||||
Cdiag := C[row+f-f0-1-width:][:len(Tsub)]
|
||||
Hsub := H[row+f-f0:][:len(Tsub)]
|
||||
Hdiag := H[row+f-f0-1-width:][:len(Tsub)]
|
||||
Hleft := H[row+f-f0-1:][:len(Tsub)]
|
||||
Hleft[0] = 0
|
||||
for off, char := range Tsub {
|
||||
col := off + f
|
||||
var s1, s2, consecutive int16
|
||||
|
||||
if j > int(F[i]) {
|
||||
if inGap {
|
||||
s2 = H[I+j0-1] + scoreGapExtention
|
||||
} else {
|
||||
s2 = H[I+j0-1] + scoreGapStart
|
||||
}
|
||||
if inGap {
|
||||
s2 = Hleft[off] + scoreGapExtention
|
||||
} else {
|
||||
s2 = Hleft[off] + scoreGapStart
|
||||
}
|
||||
|
||||
if pattern[i] == T[j] {
|
||||
var diag int16
|
||||
if i > 0 && j0 > 0 {
|
||||
diag = H[I-width+j0-1]
|
||||
}
|
||||
s1 = diag + scoreMatch
|
||||
b := B[j]
|
||||
if i > 0 {
|
||||
// j > 0 if i > 0
|
||||
consecutive = C[I-width+j0-1] + 1
|
||||
// Break consecutive chunk
|
||||
if b == bonusBoundary {
|
||||
consecutive = 1
|
||||
} else if consecutive > 1 {
|
||||
b = util.Max16(b, util.Max16(bonusConsecutive, B[j-int(consecutive)+1]))
|
||||
}
|
||||
} else {
|
||||
if pchar == char {
|
||||
s1 = Hdiag[off] + scoreMatch
|
||||
b := Bsub[off]
|
||||
consecutive = Cdiag[off] + 1
|
||||
// Break consecutive chunk
|
||||
if b == bonusBoundary {
|
||||
consecutive = 1
|
||||
b *= bonusFirstCharMultiplier
|
||||
} else if consecutive > 1 {
|
||||
b = util.Max16(b, util.Max16(bonusConsecutive, B[col-int(consecutive)+1]))
|
||||
}
|
||||
if s1+b < s2 {
|
||||
s1 += B[j]
|
||||
s1 += Bsub[off]
|
||||
consecutive = 0
|
||||
} else {
|
||||
s1 += b
|
||||
}
|
||||
}
|
||||
C[I+j0] = consecutive
|
||||
Csub[off] = consecutive
|
||||
|
||||
inGap = s1 < s2
|
||||
score := util.Max16(util.Max16(s1, s2), 0)
|
||||
if i == M-1 && (forward && score > maxScore || !forward && score >= maxScore) {
|
||||
maxScore, maxScorePos = score, j
|
||||
if pidx == M-1 && (forward && score > maxScore || !forward && score >= maxScore) {
|
||||
maxScore, maxScorePos = score, col
|
||||
}
|
||||
H[I+j0] = score
|
||||
Hsub[off] = score
|
||||
}
|
||||
}
|
||||
|
||||
if DEBUG {
|
||||
if i == 0 {
|
||||
fmt.Print(" ")
|
||||
for j := int(F[i]); j <= lastIdx; j++ {
|
||||
fmt.Printf(" " + string(T[j]) + " ")
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
fmt.Print(string(pattern[i]) + " ")
|
||||
for idx := int(F[0]); idx < int(F[i]); idx++ {
|
||||
fmt.Print(" 0 ")
|
||||
}
|
||||
for idx := int(F[i]); idx <= lastIdx; idx++ {
|
||||
fmt.Printf("%2d ", H[i*width+idx-int(F[0])])
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
fmt.Print(" ")
|
||||
for idx, p := range C[I : I+width] {
|
||||
if idx+int(F[0]) < int(F[i]) {
|
||||
p = 0
|
||||
}
|
||||
fmt.Printf("%2d ", p)
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
if DEBUG {
|
||||
debugV2(T, pattern, F, lastIdx, H, C)
|
||||
}
|
||||
|
||||
// Phase 4. (Optional) Backtrace to find character positions
|
||||
pos := posArray(withPos, M)
|
||||
j := int(F[0])
|
||||
j := f0
|
||||
if withPos {
|
||||
i := M - 1
|
||||
j = maxScorePos
|
||||
preferMatch := true
|
||||
for {
|
||||
I := i * width
|
||||
j0 := j - int(F[0])
|
||||
j0 := j - f0
|
||||
s := H[I+j0]
|
||||
|
||||
var s1, s2 int16
|
||||
@@ -522,7 +552,7 @@ func FuzzyMatchV2(caseSensitive bool, normalize bool, forward bool, input util.C
|
||||
}
|
||||
|
||||
// Implement the same sorting criteria as V2
|
||||
func calculateScore(caseSensitive bool, normalize bool, text util.Chars, pattern []rune, sidx int, eidx int, withPos bool) (int, *[]int) {
|
||||
func calculateScore(caseSensitive bool, normalize bool, text *util.Chars, pattern []rune, sidx int, eidx int, withPos bool) (int, *[]int) {
|
||||
pidx, score, inGap, consecutive, firstBonus := 0, 0, false, 0, int16(0)
|
||||
pos := posArray(withPos, len(pattern))
|
||||
prevClass := charNonWord
|
||||
@@ -582,10 +612,13 @@ func calculateScore(caseSensitive bool, normalize bool, text util.Chars, pattern
|
||||
}
|
||||
|
||||
// FuzzyMatchV1 performs fuzzy-match
|
||||
func FuzzyMatchV1(caseSensitive bool, normalize bool, forward bool, text util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
||||
func FuzzyMatchV1(caseSensitive bool, normalize bool, forward bool, text *util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
||||
if len(pattern) == 0 {
|
||||
return Result{0, 0, 0}, nil
|
||||
}
|
||||
if asciiFuzzyIndex(text, pattern, caseSensitive) < 0 {
|
||||
return Result{-1, -1, 0}, nil
|
||||
}
|
||||
|
||||
pidx := 0
|
||||
sidx := -1
|
||||
@@ -665,7 +698,7 @@ func FuzzyMatchV1(caseSensitive bool, normalize bool, forward bool, text util.Ch
|
||||
// bonus point, instead of stopping immediately after finding the first match.
|
||||
// The solution is much cheaper since there is only one possible alignment of
|
||||
// the pattern.
|
||||
func ExactMatchNaive(caseSensitive bool, normalize bool, forward bool, text util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
||||
func ExactMatchNaive(caseSensitive bool, normalize bool, forward bool, text *util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
||||
if len(pattern) == 0 {
|
||||
return Result{0, 0, 0}, nil
|
||||
}
|
||||
@@ -677,7 +710,7 @@ func ExactMatchNaive(caseSensitive bool, normalize bool, forward bool, text util
|
||||
return Result{-1, -1, 0}, nil
|
||||
}
|
||||
|
||||
if asciiFuzzyIndex(&text, pattern, caseSensitive) < 0 {
|
||||
if asciiFuzzyIndex(text, pattern, caseSensitive) < 0 {
|
||||
return Result{-1, -1, 0}, nil
|
||||
}
|
||||
|
||||
@@ -735,7 +768,7 @@ func ExactMatchNaive(caseSensitive bool, normalize bool, forward bool, text util
|
||||
}
|
||||
|
||||
// PrefixMatch performs prefix-match
|
||||
func PrefixMatch(caseSensitive bool, normalize bool, forward bool, text util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
||||
func PrefixMatch(caseSensitive bool, normalize bool, forward bool, text *util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
||||
if len(pattern) == 0 {
|
||||
return Result{0, 0, 0}, nil
|
||||
}
|
||||
@@ -762,7 +795,7 @@ func PrefixMatch(caseSensitive bool, normalize bool, forward bool, text util.Cha
|
||||
}
|
||||
|
||||
// SuffixMatch performs suffix-match
|
||||
func SuffixMatch(caseSensitive bool, normalize bool, forward bool, text util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
||||
func SuffixMatch(caseSensitive bool, normalize bool, forward bool, text *util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
||||
lenRunes := text.Length()
|
||||
trimmedLen := lenRunes - text.TrailingWhitespaces()
|
||||
if len(pattern) == 0 {
|
||||
@@ -793,7 +826,7 @@ func SuffixMatch(caseSensitive bool, normalize bool, forward bool, text util.Cha
|
||||
}
|
||||
|
||||
// EqualMatch performs equal-match
|
||||
func EqualMatch(caseSensitive bool, normalize bool, forward bool, text util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
||||
func EqualMatch(caseSensitive bool, normalize bool, forward bool, text *util.Chars, pattern []rune, withPos bool, slab *util.Slab) (Result, *[]int) {
|
||||
lenPattern := len(pattern)
|
||||
if text.Length() != lenPattern {
|
||||
return Result{-1, -1, 0}, nil
|
||||
|
||||
@@ -17,7 +17,8 @@ func assertMatch2(t *testing.T, fun Algo, caseSensitive, normalize, forward bool
|
||||
if !caseSensitive {
|
||||
pattern = strings.ToLower(pattern)
|
||||
}
|
||||
res, pos := fun(caseSensitive, normalize, forward, util.ToChars([]byte(input)), []rune(pattern), true, nil)
|
||||
chars := util.ToChars([]byte(input))
|
||||
res, pos := fun(caseSensitive, normalize, forward, &chars, []rune(pattern), true, nil)
|
||||
var start, end int
|
||||
if pos == nil || len(*pos) == 0 {
|
||||
start = res.Start
|
||||
|
||||
30
src/ansi.go
30
src/ansi.go
@@ -73,15 +73,13 @@ func extractColor(str string, state *ansiState, proc func(string, *ansiState) bo
|
||||
runeCount := 0
|
||||
for idx := 0; idx < len(str); {
|
||||
idx += findAnsiStart(str[idx:])
|
||||
|
||||
// No sign of ANSI code
|
||||
if idx == len(str) {
|
||||
break
|
||||
}
|
||||
|
||||
// Make sure that we found an ANSI code
|
||||
offset := ansiRegex.FindStringIndex(str[idx:])
|
||||
if offset == nil {
|
||||
if len(offset) < 2 {
|
||||
idx++
|
||||
continue
|
||||
}
|
||||
@@ -117,22 +115,30 @@ func extractColor(str string, state *ansiState, proc func(string, *ansiState) bo
|
||||
}
|
||||
}
|
||||
|
||||
rest := str[prevIdx:]
|
||||
if len(rest) > 0 {
|
||||
var rest string
|
||||
var trimmed string
|
||||
|
||||
if prevIdx == 0 {
|
||||
// No ANSI code found
|
||||
rest = str
|
||||
trimmed = str
|
||||
} else {
|
||||
rest = str[prevIdx:]
|
||||
output.WriteString(rest)
|
||||
if state != nil {
|
||||
// Update last offset
|
||||
runeCount += utf8.RuneCountInString(rest)
|
||||
(&offsets[len(offsets)-1]).offset[1] = int32(runeCount)
|
||||
}
|
||||
trimmed = output.String()
|
||||
}
|
||||
if len(rest) > 0 && state != nil {
|
||||
// Update last offset
|
||||
runeCount += utf8.RuneCountInString(rest)
|
||||
(&offsets[len(offsets)-1]).offset[1] = int32(runeCount)
|
||||
}
|
||||
if proc != nil {
|
||||
proc(rest, state)
|
||||
}
|
||||
if len(offsets) == 0 {
|
||||
return output.String(), nil, state
|
||||
return trimmed, nil, state
|
||||
}
|
||||
return output.String(), &offsets, state
|
||||
return trimmed, &offsets, state
|
||||
}
|
||||
|
||||
func interpretCode(ansiCode string, prevState *ansiState) *ansiState {
|
||||
|
||||
@@ -4,9 +4,8 @@ import "testing"
|
||||
|
||||
func TestChunkCache(t *testing.T) {
|
||||
cache := NewChunkCache()
|
||||
chunk2 := make(Chunk, chunkSize)
|
||||
chunk1p := &Chunk{}
|
||||
chunk2p := &chunk2
|
||||
chunk2p := &Chunk{count: chunkSize}
|
||||
items1 := []Result{Result{}}
|
||||
items2 := []Result{Result{}, Result{}}
|
||||
cache.Add(chunk1p, "foo", items1)
|
||||
|
||||
@@ -3,16 +3,17 @@ package fzf
|
||||
import "sync"
|
||||
|
||||
// Chunk is a list of Items whose size has the upper limit of chunkSize
|
||||
type Chunk []Item
|
||||
type Chunk struct {
|
||||
items [chunkSize]Item
|
||||
count int
|
||||
}
|
||||
|
||||
// ItemBuilder is a closure type that builds Item object from a pointer to a
|
||||
// string and an integer
|
||||
type ItemBuilder func([]byte, int) Item
|
||||
// ItemBuilder is a closure type that builds Item object from byte array
|
||||
type ItemBuilder func(*Item, []byte) bool
|
||||
|
||||
// ChunkList is a list of Chunks
|
||||
type ChunkList struct {
|
||||
chunks []*Chunk
|
||||
count int
|
||||
mutex sync.Mutex
|
||||
trans ItemBuilder
|
||||
}
|
||||
@@ -21,23 +22,21 @@ type ChunkList struct {
|
||||
func NewChunkList(trans ItemBuilder) *ChunkList {
|
||||
return &ChunkList{
|
||||
chunks: []*Chunk{},
|
||||
count: 0,
|
||||
mutex: sync.Mutex{},
|
||||
trans: trans}
|
||||
}
|
||||
|
||||
func (c *Chunk) push(trans ItemBuilder, data []byte, index int) bool {
|
||||
item := trans(data, index)
|
||||
if item.Nil() {
|
||||
return false
|
||||
func (c *Chunk) push(trans ItemBuilder, data []byte) bool {
|
||||
if trans(&c.items[c.count], data) {
|
||||
c.count++
|
||||
return true
|
||||
}
|
||||
*c = append(*c, item)
|
||||
return true
|
||||
return false
|
||||
}
|
||||
|
||||
// IsFull returns true if the Chunk is full
|
||||
func (c *Chunk) IsFull() bool {
|
||||
return len(*c) == chunkSize
|
||||
return c.count == chunkSize
|
||||
}
|
||||
|
||||
func (cl *ChunkList) lastChunk() *Chunk {
|
||||
@@ -49,7 +48,7 @@ func CountItems(cs []*Chunk) int {
|
||||
if len(cs) == 0 {
|
||||
return 0
|
||||
}
|
||||
return chunkSize*(len(cs)-1) + len(*(cs[len(cs)-1]))
|
||||
return chunkSize*(len(cs)-1) + cs[len(cs)-1].count
|
||||
}
|
||||
|
||||
// Push adds the item to the list
|
||||
@@ -57,17 +56,12 @@ func (cl *ChunkList) Push(data []byte) bool {
|
||||
cl.mutex.Lock()
|
||||
|
||||
if len(cl.chunks) == 0 || cl.lastChunk().IsFull() {
|
||||
newChunk := Chunk(make([]Item, 0, chunkSize))
|
||||
cl.chunks = append(cl.chunks, &newChunk)
|
||||
cl.chunks = append(cl.chunks, &Chunk{})
|
||||
}
|
||||
|
||||
if cl.lastChunk().push(cl.trans, data, cl.count) {
|
||||
cl.count++
|
||||
cl.mutex.Unlock()
|
||||
return true
|
||||
}
|
||||
ret := cl.lastChunk().push(cl.trans, data)
|
||||
cl.mutex.Unlock()
|
||||
return false
|
||||
return ret
|
||||
}
|
||||
|
||||
// Snapshot returns immutable snapshot of the ChunkList
|
||||
@@ -75,7 +69,6 @@ func (cl *ChunkList) Snapshot() ([]*Chunk, int) {
|
||||
cl.mutex.Lock()
|
||||
|
||||
ret := make([]*Chunk, len(cl.chunks))
|
||||
count := cl.count
|
||||
copy(ret, cl.chunks)
|
||||
|
||||
// Duplicate the last chunk
|
||||
@@ -85,5 +78,5 @@ func (cl *ChunkList) Snapshot() ([]*Chunk, int) {
|
||||
}
|
||||
|
||||
cl.mutex.Unlock()
|
||||
return ret, count
|
||||
return ret, CountItems(ret)
|
||||
}
|
||||
|
||||
@@ -11,10 +11,9 @@ func TestChunkList(t *testing.T) {
|
||||
// FIXME global
|
||||
sortCriteria = []criterion{byScore, byLength}
|
||||
|
||||
cl := NewChunkList(func(s []byte, i int) Item {
|
||||
chars := util.ToChars(s)
|
||||
chars.Index = int32(i * 2)
|
||||
return Item{text: chars}
|
||||
cl := NewChunkList(func(item *Item, s []byte) bool {
|
||||
item.text = util.ToChars(s)
|
||||
return true
|
||||
})
|
||||
|
||||
// Snapshot
|
||||
@@ -40,11 +39,11 @@ func TestChunkList(t *testing.T) {
|
||||
|
||||
// Check the content of the ChunkList
|
||||
chunk1 := snapshot[0]
|
||||
if len(*chunk1) != 2 {
|
||||
if chunk1.count != 2 {
|
||||
t.Error("Snapshot should contain only two items")
|
||||
}
|
||||
if (*chunk1)[0].text.ToString() != "hello" || (*chunk1)[0].Index() != 0 ||
|
||||
(*chunk1)[1].text.ToString() != "world" || (*chunk1)[1].Index() != 2 {
|
||||
if chunk1.items[0].text.ToString() != "hello" ||
|
||||
chunk1.items[1].text.ToString() != "world" {
|
||||
t.Error("Invalid data")
|
||||
}
|
||||
if chunk1.IsFull() {
|
||||
@@ -67,14 +66,14 @@ func TestChunkList(t *testing.T) {
|
||||
!snapshot[1].IsFull() || snapshot[2].IsFull() || count != chunkSize*2+2 {
|
||||
t.Error("Expected two full chunks and one more chunk")
|
||||
}
|
||||
if len(*snapshot[2]) != 2 {
|
||||
if snapshot[2].count != 2 {
|
||||
t.Error("Unexpected number of items")
|
||||
}
|
||||
|
||||
cl.Push([]byte("hello"))
|
||||
cl.Push([]byte("world"))
|
||||
|
||||
lastChunkCount := len(*snapshot[len(snapshot)-1])
|
||||
lastChunkCount := snapshot[len(snapshot)-1].count
|
||||
if lastChunkCount != 2 {
|
||||
t.Error("Unexpected number of items:", lastChunkCount)
|
||||
}
|
||||
|
||||
@@ -9,14 +9,17 @@ import (
|
||||
|
||||
const (
|
||||
// Current version
|
||||
version = "0.16.11"
|
||||
version = "0.17.0"
|
||||
|
||||
// Core
|
||||
coordinatorDelayMax time.Duration = 100 * time.Millisecond
|
||||
coordinatorDelayStep time.Duration = 10 * time.Millisecond
|
||||
|
||||
// Reader
|
||||
readerBufferSize = 64 * 1024
|
||||
readerBufferSize = 64 * 1024
|
||||
readerPollIntervalMin = 10 * time.Millisecond
|
||||
readerPollIntervalStep = 5 * time.Millisecond
|
||||
readerPollIntervalMax = 50 * time.Millisecond
|
||||
|
||||
// Terminal
|
||||
initialDelay = 20 * time.Millisecond
|
||||
@@ -68,7 +71,7 @@ const (
|
||||
EvtSearchProgress
|
||||
EvtSearchFin
|
||||
EvtHeader
|
||||
EvtClose
|
||||
EvtReady
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
42
src/core.go
42
src/core.go
@@ -83,40 +83,44 @@ func Run(opts *Options, revision string) {
|
||||
|
||||
// Chunk list
|
||||
var chunkList *ChunkList
|
||||
var itemIndex int32
|
||||
header := make([]string, 0, opts.HeaderLines)
|
||||
if len(opts.WithNth) == 0 {
|
||||
chunkList = NewChunkList(func(data []byte, index int) Item {
|
||||
chunkList = NewChunkList(func(item *Item, data []byte) bool {
|
||||
if len(header) < opts.HeaderLines {
|
||||
header = append(header, string(data))
|
||||
eventBox.Set(EvtHeader, header)
|
||||
return nilItem
|
||||
return false
|
||||
}
|
||||
chars, colors := ansiProcessor(data)
|
||||
chars.Index = int32(index)
|
||||
return Item{text: chars, colors: colors}
|
||||
item.text, item.colors = ansiProcessor(data)
|
||||
item.text.Index = itemIndex
|
||||
itemIndex++
|
||||
return true
|
||||
})
|
||||
} else {
|
||||
chunkList = NewChunkList(func(data []byte, index int) Item {
|
||||
chunkList = NewChunkList(func(item *Item, data []byte) bool {
|
||||
tokens := Tokenize(string(data), opts.Delimiter)
|
||||
trans := Transform(tokens, opts.WithNth)
|
||||
transformed := joinTokens(trans)
|
||||
if len(header) < opts.HeaderLines {
|
||||
header = append(header, transformed)
|
||||
eventBox.Set(EvtHeader, header)
|
||||
return nilItem
|
||||
return false
|
||||
}
|
||||
trimmed, colors := ansiProcessor([]byte(transformed))
|
||||
trimmed.Index = int32(index)
|
||||
return Item{text: trimmed, colors: colors, origText: &data}
|
||||
item.text, item.colors = ansiProcessor([]byte(transformed))
|
||||
item.text.Index = itemIndex
|
||||
item.origText = &data
|
||||
itemIndex++
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
// Reader
|
||||
streamingFilter := opts.Filter != nil && !sort && !opts.Tac && !opts.Sync
|
||||
if !streamingFilter {
|
||||
reader := Reader{func(data []byte) bool {
|
||||
reader := NewReader(func(data []byte) bool {
|
||||
return chunkList.Push(data)
|
||||
}, eventBox, opts.ReadZero}
|
||||
}, eventBox, opts.ReadZero)
|
||||
go reader.ReadSource()
|
||||
}
|
||||
|
||||
@@ -149,17 +153,17 @@ func Run(opts *Options, revision string) {
|
||||
found := false
|
||||
if streamingFilter {
|
||||
slab := util.MakeSlab(slab16Size, slab32Size)
|
||||
reader := Reader{
|
||||
reader := NewReader(
|
||||
func(runes []byte) bool {
|
||||
item := chunkList.trans(runes, 0)
|
||||
if !item.Nil() {
|
||||
item := Item{}
|
||||
if chunkList.trans(&item, runes) {
|
||||
if result, _, _ := pattern.MatchItem(&item, false, slab); result != nil {
|
||||
opts.Printer(item.text.ToString())
|
||||
found = true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}, eventBox, opts.ReadZero}
|
||||
}, eventBox, opts.ReadZero)
|
||||
reader.ReadSource()
|
||||
} else {
|
||||
eventBox.Unwatch(EvtReadNew)
|
||||
@@ -205,6 +209,9 @@ func Run(opts *Options, revision string) {
|
||||
delay := true
|
||||
ticks++
|
||||
eventBox.Wait(func(events *util.Events) {
|
||||
if _, fin := (*events)[EvtReadFin]; fin {
|
||||
delete(*events, EvtReadNew)
|
||||
}
|
||||
for evt, value := range *events {
|
||||
switch evt {
|
||||
|
||||
@@ -212,6 +219,9 @@ func Run(opts *Options, revision string) {
|
||||
reading = reading && evt == EvtReadNew
|
||||
snapshot, count := chunkList.Snapshot()
|
||||
terminal.UpdateCount(count, !reading, value.(bool))
|
||||
if opts.Sync {
|
||||
terminal.UpdateList(PassMerger(&snapshot, opts.Tac))
|
||||
}
|
||||
matcher.Reset(snapshot, terminal.Input(), false, !reading, sort)
|
||||
|
||||
case EvtSearchNew:
|
||||
|
||||
@@ -17,11 +17,7 @@ func (item *Item) Index() int32 {
|
||||
return item.text.Index
|
||||
}
|
||||
|
||||
var nilItem = Item{text: util.Chars{Index: -1}}
|
||||
|
||||
func (item *Item) Nil() bool {
|
||||
return item.Index() < 0
|
||||
}
|
||||
var minItem = Item{text: util.Chars{Index: -1}}
|
||||
|
||||
func (item *Item) TrimLength() uint16 {
|
||||
return item.text.TrimLength()
|
||||
|
||||
@@ -29,7 +29,7 @@ func PassMerger(chunks *[]*Chunk, tac bool) *Merger {
|
||||
count: 0}
|
||||
|
||||
for _, chunk := range *mg.chunks {
|
||||
mg.count += len(*chunk)
|
||||
mg.count += chunk.count
|
||||
}
|
||||
return &mg
|
||||
}
|
||||
@@ -65,7 +65,7 @@ func (mg *Merger) Get(idx int) Result {
|
||||
idx = mg.count - idx - 1
|
||||
}
|
||||
chunk := (*mg.chunks)[idx/chunkSize]
|
||||
return Result{item: &(*chunk)[idx%chunkSize]}
|
||||
return Result{item: &chunk.items[idx%chunkSize]}
|
||||
}
|
||||
|
||||
if mg.sorted {
|
||||
|
||||
@@ -962,7 +962,9 @@ func parseOptions(opts *Options, allArgs []string) {
|
||||
case "--algo":
|
||||
opts.FuzzyAlgo = parseAlgo(nextString(allArgs, &i, "algorithm required (v1|v2)"))
|
||||
case "--expect":
|
||||
opts.Expect = parseKeyChords(nextString(allArgs, &i, "key names required"), "key names required")
|
||||
for k, v := range parseKeyChords(nextString(allArgs, &i, "key names required"), "key names required") {
|
||||
opts.Expect[k] = v
|
||||
}
|
||||
case "--no-expect":
|
||||
opts.Expect = make(map[int]string)
|
||||
case "--tiebreak":
|
||||
@@ -1140,7 +1142,9 @@ func parseOptions(opts *Options, allArgs []string) {
|
||||
} else if match, value := optString(arg, "--toggle-sort="); match {
|
||||
parseToggleSort(opts.Keymap, value)
|
||||
} else if match, value := optString(arg, "--expect="); match {
|
||||
opts.Expect = parseKeyChords(value, "key names required")
|
||||
for k, v := range parseKeyChords(value, "key names required") {
|
||||
opts.Expect[k] = v
|
||||
}
|
||||
} else if match, value := optString(arg, "--tiebreak="); match {
|
||||
opts.Criteria = parseTiebreak(value)
|
||||
} else if match, value := optString(arg, "--color="); match {
|
||||
|
||||
@@ -414,3 +414,10 @@ func TestPreviewOpts(t *testing.T) {
|
||||
t.Error(opts.Preview)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAdditiveExpect(t *testing.T) {
|
||||
opts := optsFor("--expect=a", "--expect", "b", "--expect=c")
|
||||
if len(opts.Expect) != 3 {
|
||||
t.Error(opts.Expect)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,12 +10,12 @@ import (
|
||||
|
||||
// fuzzy
|
||||
// 'exact
|
||||
// ^exact-prefix
|
||||
// exact-suffix$
|
||||
// !not-fuzzy
|
||||
// !'not-exact
|
||||
// !^not-exact-prefix
|
||||
// !not-exact-suffix$
|
||||
// ^prefix-exact
|
||||
// suffix-exact$
|
||||
// !inverse-exact
|
||||
// !'inverse-fuzzy
|
||||
// !^inverse-prefix-exact
|
||||
// !inverse-suffix-exact$
|
||||
|
||||
type termType int
|
||||
|
||||
@@ -32,7 +32,6 @@ type term struct {
|
||||
inv bool
|
||||
text []rune
|
||||
caseSensitive bool
|
||||
origText []rune
|
||||
}
|
||||
|
||||
type termSet []term
|
||||
@@ -48,6 +47,7 @@ type Pattern struct {
|
||||
text []rune
|
||||
termSets []termSet
|
||||
cacheable bool
|
||||
cacheKey string
|
||||
delimiter Delimiter
|
||||
nth []Range
|
||||
procFun map[termType]algo.Algo
|
||||
@@ -60,7 +60,7 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
_splitRegex = regexp.MustCompile("\\s+")
|
||||
_splitRegex = regexp.MustCompile(" +")
|
||||
clearPatternCache()
|
||||
clearChunkCache()
|
||||
}
|
||||
@@ -81,7 +81,10 @@ func BuildPattern(fuzzy bool, fuzzyAlgo algo.Algo, extended bool, caseMode Case,
|
||||
|
||||
var asString string
|
||||
if extended {
|
||||
asString = strings.Trim(string(runes), " ")
|
||||
asString = strings.TrimLeft(string(runes), " ")
|
||||
for strings.HasSuffix(asString, " ") && !strings.HasSuffix(asString, "\\ ") {
|
||||
asString = asString[:len(asString)-1]
|
||||
}
|
||||
} else {
|
||||
asString = string(runes)
|
||||
}
|
||||
@@ -101,7 +104,7 @@ func BuildPattern(fuzzy bool, fuzzyAlgo algo.Algo, extended bool, caseMode Case,
|
||||
for idx, term := range termSet {
|
||||
// If the query contains inverse search terms or OR operators,
|
||||
// we cannot cache the search scope
|
||||
if !cacheable || idx > 0 || term.inv || !fuzzy && term.typ != termExact {
|
||||
if !cacheable || idx > 0 || term.inv || fuzzy && term.typ != termFuzzy || !fuzzy && term.typ != termExact {
|
||||
cacheable = false
|
||||
break Loop
|
||||
}
|
||||
@@ -130,6 +133,7 @@ func BuildPattern(fuzzy bool, fuzzyAlgo algo.Algo, extended bool, caseMode Case,
|
||||
delimiter: delimiter,
|
||||
procFun: make(map[termType]algo.Algo)}
|
||||
|
||||
ptr.cacheKey = ptr.buildCacheKey()
|
||||
ptr.procFun[termFuzzy] = fuzzyAlgo
|
||||
ptr.procFun[termEqual] = algo.EqualMatch
|
||||
ptr.procFun[termExact] = algo.ExactMatchNaive
|
||||
@@ -141,27 +145,30 @@ func BuildPattern(fuzzy bool, fuzzyAlgo algo.Algo, extended bool, caseMode Case,
|
||||
}
|
||||
|
||||
func parseTerms(fuzzy bool, caseMode Case, normalize bool, str string) []termSet {
|
||||
str = strings.Replace(str, "\\ ", "\t", -1)
|
||||
tokens := _splitRegex.Split(str, -1)
|
||||
sets := []termSet{}
|
||||
set := termSet{}
|
||||
switchSet := false
|
||||
afterBar := false
|
||||
for _, token := range tokens {
|
||||
typ, inv, text := termFuzzy, false, token
|
||||
typ, inv, text := termFuzzy, false, strings.Replace(token, "\t", " ", -1)
|
||||
lowerText := strings.ToLower(text)
|
||||
caseSensitive := caseMode == CaseRespect ||
|
||||
caseMode == CaseSmart && text != lowerText
|
||||
if !caseSensitive {
|
||||
text = lowerText
|
||||
}
|
||||
origText := []rune(text)
|
||||
if !fuzzy {
|
||||
typ = termExact
|
||||
}
|
||||
|
||||
if text == "|" {
|
||||
if len(set) > 0 && !afterBar && text == "|" {
|
||||
switchSet = false
|
||||
afterBar = true
|
||||
continue
|
||||
}
|
||||
afterBar = false
|
||||
|
||||
if strings.HasPrefix(text, "!") {
|
||||
inv = true
|
||||
@@ -169,6 +176,11 @@ func parseTerms(fuzzy bool, caseMode Case, normalize bool, str string) []termSet
|
||||
text = text[1:]
|
||||
}
|
||||
|
||||
if text != "$" && strings.HasSuffix(text, "$") {
|
||||
typ = termSuffix
|
||||
text = text[:len(text)-1]
|
||||
}
|
||||
|
||||
if strings.HasPrefix(text, "'") {
|
||||
// Flip exactness
|
||||
if fuzzy && !inv {
|
||||
@@ -179,16 +191,12 @@ func parseTerms(fuzzy bool, caseMode Case, normalize bool, str string) []termSet
|
||||
text = text[1:]
|
||||
}
|
||||
} else if strings.HasPrefix(text, "^") {
|
||||
if strings.HasSuffix(text, "$") {
|
||||
if typ == termSuffix {
|
||||
typ = termEqual
|
||||
text = text[1 : len(text)-1]
|
||||
} else {
|
||||
typ = termPrefix
|
||||
text = text[1:]
|
||||
}
|
||||
} else if strings.HasSuffix(text, "$") {
|
||||
typ = termSuffix
|
||||
text = text[:len(text)-1]
|
||||
text = text[1:]
|
||||
}
|
||||
|
||||
if len(text) > 0 {
|
||||
@@ -204,8 +212,7 @@ func parseTerms(fuzzy bool, caseMode Case, normalize bool, str string) []termSet
|
||||
typ: typ,
|
||||
inv: inv,
|
||||
text: textRunes,
|
||||
caseSensitive: caseSensitive,
|
||||
origText: origText})
|
||||
caseSensitive: caseSensitive})
|
||||
switchSet = true
|
||||
}
|
||||
}
|
||||
@@ -228,18 +235,22 @@ func (p *Pattern) AsString() string {
|
||||
return string(p.text)
|
||||
}
|
||||
|
||||
// CacheKey is used to build string to be used as the key of result cache
|
||||
func (p *Pattern) CacheKey() string {
|
||||
func (p *Pattern) buildCacheKey() string {
|
||||
if !p.extended {
|
||||
return p.AsString()
|
||||
}
|
||||
cacheableTerms := []string{}
|
||||
for _, termSet := range p.termSets {
|
||||
if len(termSet) == 1 && !termSet[0].inv && (p.fuzzy || termSet[0].typ == termExact) {
|
||||
cacheableTerms = append(cacheableTerms, string(termSet[0].origText))
|
||||
cacheableTerms = append(cacheableTerms, string(termSet[0].text))
|
||||
}
|
||||
}
|
||||
return strings.Join(cacheableTerms, " ")
|
||||
return strings.Join(cacheableTerms, "\t")
|
||||
}
|
||||
|
||||
// CacheKey is used to build string to be used as the key of result cache
|
||||
func (p *Pattern) CacheKey() string {
|
||||
return p.cacheKey
|
||||
}
|
||||
|
||||
// Match returns the list of matches Items in the given Chunk
|
||||
@@ -267,8 +278,8 @@ func (p *Pattern) matchChunk(chunk *Chunk, space []Result, slab *util.Slab) []Re
|
||||
matches := []Result{}
|
||||
|
||||
if space == nil {
|
||||
for idx := range *chunk {
|
||||
if match, _, _ := p.MatchItem(&(*chunk)[idx], false, slab); match != nil {
|
||||
for idx := 0; idx < chunk.count; idx++ {
|
||||
if match, _, _ := p.MatchItem(&chunk.items[idx], false, slab); match != nil {
|
||||
matches = append(matches, *match)
|
||||
}
|
||||
}
|
||||
@@ -376,7 +387,7 @@ func (p *Pattern) transformInput(item *Item) []Token {
|
||||
|
||||
func (p *Pattern) iter(pfun algo.Algo, tokens []Token, caseSensitive bool, normalize bool, forward bool, pattern []rune, withPos bool, slab *util.Slab) (Offset, int, *[]int) {
|
||||
for _, part := range tokens {
|
||||
if res, pos := pfun(caseSensitive, normalize, forward, *part.text, pattern, withPos, slab); res.Start >= 0 {
|
||||
if res, pos := pfun(caseSensitive, normalize, forward, part.text, pattern, withPos, slab); res.Start >= 0 {
|
||||
sidx := int32(res.Start) + part.prefixLength
|
||||
eidx := int32(res.End) + part.prefixLength
|
||||
if pos != nil {
|
||||
|
||||
@@ -16,7 +16,7 @@ func init() {
|
||||
|
||||
func TestParseTermsExtended(t *testing.T) {
|
||||
terms := parseTerms(true, CaseSmart, false,
|
||||
"| aaa 'bbb ^ccc ddd$ !eee !'fff !^ggg !hhh$ | ^iii$ ^xxx | 'yyy | | zzz$ | !ZZZ |")
|
||||
"aaa 'bbb ^ccc ddd$ !eee !'fff !^ggg !hhh$ | ^iii$ ^xxx | 'yyy | zzz$ | !ZZZ |")
|
||||
if len(terms) != 9 ||
|
||||
terms[0][0].typ != termFuzzy || terms[0][0].inv ||
|
||||
terms[1][0].typ != termExact || terms[1][0].inv ||
|
||||
@@ -33,19 +33,11 @@ func TestParseTermsExtended(t *testing.T) {
|
||||
terms[8][3].typ != termExact || !terms[8][3].inv {
|
||||
t.Errorf("%s", terms)
|
||||
}
|
||||
for idx, termSet := range terms[:8] {
|
||||
for _, termSet := range terms[:8] {
|
||||
term := termSet[0]
|
||||
if len(term.text) != 3 {
|
||||
t.Errorf("%s", term)
|
||||
}
|
||||
if idx > 0 && len(term.origText) != 4+idx/5 {
|
||||
t.Errorf("%s", term)
|
||||
}
|
||||
}
|
||||
for _, term := range terms[8] {
|
||||
if len(term.origText) != 4 {
|
||||
t.Errorf("%s", term)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -66,7 +58,7 @@ func TestParseTermsExtendedExact(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestParseTermsEmpty(t *testing.T) {
|
||||
terms := parseTerms(true, CaseSmart, false, "' $ ^ !' !^ !$")
|
||||
terms := parseTerms(true, CaseSmart, false, "' ^ !' !^")
|
||||
if len(terms) != 0 {
|
||||
t.Errorf("%s", terms)
|
||||
}
|
||||
@@ -77,8 +69,9 @@ func TestExact(t *testing.T) {
|
||||
clearPatternCache()
|
||||
pattern := BuildPattern(true, algo.FuzzyMatchV2, true, CaseSmart, false, true, true,
|
||||
[]Range{}, Delimiter{}, []rune("'abc"))
|
||||
chars := util.ToChars([]byte("aabbcc abc"))
|
||||
res, pos := algo.ExactMatchNaive(
|
||||
pattern.caseSensitive, pattern.normalize, pattern.forward, util.ToChars([]byte("aabbcc abc")), pattern.termSets[0][0].text, true, nil)
|
||||
pattern.caseSensitive, pattern.normalize, pattern.forward, &chars, pattern.termSets[0][0].text, true, nil)
|
||||
if res.Start != 7 || res.End != 10 {
|
||||
t.Errorf("%s / %d / %d", pattern.termSets, res.Start, res.End)
|
||||
}
|
||||
@@ -93,8 +86,9 @@ func TestEqual(t *testing.T) {
|
||||
pattern := BuildPattern(true, algo.FuzzyMatchV2, true, CaseSmart, false, true, true, []Range{}, Delimiter{}, []rune("^AbC$"))
|
||||
|
||||
match := func(str string, sidxExpected int, eidxExpected int) {
|
||||
chars := util.ToChars([]byte(str))
|
||||
res, pos := algo.EqualMatch(
|
||||
pattern.caseSensitive, pattern.normalize, pattern.forward, util.ToChars([]byte(str)), pattern.termSets[0][0].text, true, nil)
|
||||
pattern.caseSensitive, pattern.normalize, pattern.forward, &chars, pattern.termSets[0][0].text, true, nil)
|
||||
if res.Start != sidxExpected || res.End != eidxExpected {
|
||||
t.Errorf("%s / %d / %d", pattern.termSets, res.Start, res.End)
|
||||
}
|
||||
@@ -138,12 +132,11 @@ func TestOrigTextAndTransformed(t *testing.T) {
|
||||
|
||||
origBytes := []byte("junegunn.choi")
|
||||
for _, extended := range []bool{false, true} {
|
||||
chunk := Chunk{
|
||||
Item{
|
||||
text: util.ToChars([]byte("junegunn")),
|
||||
origText: &origBytes,
|
||||
transformed: &trans},
|
||||
}
|
||||
chunk := Chunk{count: 1}
|
||||
chunk.items[0] = Item{
|
||||
text: util.ToChars([]byte("junegunn")),
|
||||
origText: &origBytes,
|
||||
transformed: &trans}
|
||||
pattern.extended = extended
|
||||
matches := pattern.matchChunk(&chunk, nil, slab) // No cache
|
||||
if !(matches[0].item.text.ToString() == "junegunn" &&
|
||||
@@ -152,7 +145,7 @@ func TestOrigTextAndTransformed(t *testing.T) {
|
||||
t.Error("Invalid match result", matches)
|
||||
}
|
||||
|
||||
match, offsets, pos := pattern.MatchItem(&chunk[0], true, slab)
|
||||
match, offsets, pos := pattern.MatchItem(&chunk.items[0], true, slab)
|
||||
if !(match.item.text.ToString() == "junegunn" &&
|
||||
string(*match.item.origText) == "junegunn.choi" &&
|
||||
offsets[0][0] == 0 && offsets[0][1] == 5 &&
|
||||
@@ -167,40 +160,47 @@ func TestOrigTextAndTransformed(t *testing.T) {
|
||||
|
||||
func TestCacheKey(t *testing.T) {
|
||||
test := func(extended bool, patStr string, expected string, cacheable bool) {
|
||||
clearPatternCache()
|
||||
pat := BuildPattern(true, algo.FuzzyMatchV2, extended, CaseSmart, false, true, true, []Range{}, Delimiter{}, []rune(patStr))
|
||||
if pat.CacheKey() != expected {
|
||||
t.Errorf("Expected: %s, actual: %s", expected, pat.CacheKey())
|
||||
}
|
||||
if pat.cacheable != cacheable {
|
||||
t.Errorf("Expected: %s, actual: %s (%s)", cacheable, pat.cacheable, patStr)
|
||||
t.Errorf("Expected: %t, actual: %t (%s)", cacheable, pat.cacheable, patStr)
|
||||
}
|
||||
clearPatternCache()
|
||||
}
|
||||
test(false, "foo !bar", "foo !bar", true)
|
||||
test(false, "foo | bar !baz", "foo | bar !baz", true)
|
||||
test(true, "foo bar baz", "foo bar baz", true)
|
||||
test(true, "foo bar baz", "foo\tbar\tbaz", true)
|
||||
test(true, "foo !bar", "foo", false)
|
||||
test(true, "foo !bar baz", "foo baz", false)
|
||||
test(true, "foo !bar baz", "foo\tbaz", false)
|
||||
test(true, "foo | bar baz", "baz", false)
|
||||
test(true, "foo | bar | baz", "", false)
|
||||
test(true, "foo | bar !baz", "", false)
|
||||
test(true, "| | | foo", "foo", true)
|
||||
test(true, "| | foo", "", false)
|
||||
test(true, "| | | foo", "foo", false)
|
||||
}
|
||||
|
||||
func TestCacheable(t *testing.T) {
|
||||
test := func(fuzzy bool, str string, cacheable bool) {
|
||||
test := func(fuzzy bool, str string, expected string, cacheable bool) {
|
||||
clearPatternCache()
|
||||
pat := BuildPattern(fuzzy, algo.FuzzyMatchV2, true, CaseSmart, true, true, true, []Range{}, Delimiter{}, []rune(str))
|
||||
if pat.CacheKey() != expected {
|
||||
t.Errorf("Expected: %s, actual: %s", expected, pat.CacheKey())
|
||||
}
|
||||
if cacheable != pat.cacheable {
|
||||
t.Errorf("Invalid Pattern.cacheable for \"%s\": %v (expected: %v)", str, pat.cacheable, cacheable)
|
||||
}
|
||||
clearPatternCache()
|
||||
}
|
||||
test(true, "foo bar", true)
|
||||
test(true, "foo 'bar", true)
|
||||
test(true, "foo !bar", false)
|
||||
test(true, "foo bar", "foo\tbar", true)
|
||||
test(true, "foo 'bar", "foo\tbar", false)
|
||||
test(true, "foo !bar", "foo", false)
|
||||
|
||||
test(false, "foo bar", true)
|
||||
test(false, "foo '", true)
|
||||
test(false, "foo 'bar", false)
|
||||
test(false, "foo !bar", false)
|
||||
test(false, "foo bar", "foo\tbar", true)
|
||||
test(false, "foo 'bar", "foo", false)
|
||||
test(false, "foo '", "foo", true)
|
||||
test(false, "foo 'bar", "foo", false)
|
||||
test(false, "foo !bar", "foo", false)
|
||||
}
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"bufio"
|
||||
"io"
|
||||
"os"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/junegunn/fzf/src/util"
|
||||
)
|
||||
@@ -13,10 +15,43 @@ type Reader struct {
|
||||
pusher func([]byte) bool
|
||||
eventBox *util.EventBox
|
||||
delimNil bool
|
||||
event int32
|
||||
}
|
||||
|
||||
// NewReader returns new Reader object
|
||||
func NewReader(pusher func([]byte) bool, eventBox *util.EventBox, delimNil bool) *Reader {
|
||||
return &Reader{pusher, eventBox, delimNil, int32(EvtReady)}
|
||||
}
|
||||
|
||||
func (r *Reader) startEventPoller() {
|
||||
go func() {
|
||||
ptr := &r.event
|
||||
pollInterval := readerPollIntervalMin
|
||||
for {
|
||||
if atomic.CompareAndSwapInt32(ptr, int32(EvtReadNew), int32(EvtReady)) {
|
||||
r.eventBox.Set(EvtReadNew, true)
|
||||
pollInterval = readerPollIntervalMin
|
||||
} else if atomic.LoadInt32(ptr) == int32(EvtReadFin) {
|
||||
return
|
||||
} else {
|
||||
pollInterval += readerPollIntervalStep
|
||||
if pollInterval > readerPollIntervalMax {
|
||||
pollInterval = readerPollIntervalMax
|
||||
}
|
||||
}
|
||||
time.Sleep(pollInterval)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (r *Reader) fin(success bool) {
|
||||
atomic.StoreInt32(&r.event, int32(EvtReadFin))
|
||||
r.eventBox.Set(EvtReadFin, success)
|
||||
}
|
||||
|
||||
// ReadSource reads data from the default command or from standard input
|
||||
func (r *Reader) ReadSource() {
|
||||
r.startEventPoller()
|
||||
var success bool
|
||||
if util.IsTty() {
|
||||
cmd := os.Getenv("FZF_DEFAULT_COMMAND")
|
||||
@@ -27,7 +62,7 @@ func (r *Reader) ReadSource() {
|
||||
} else {
|
||||
success = r.readFromStdin()
|
||||
}
|
||||
r.eventBox.Set(EvtReadFin, success)
|
||||
r.fin(success)
|
||||
}
|
||||
|
||||
func (r *Reader) feed(src io.Reader) {
|
||||
@@ -41,7 +76,7 @@ func (r *Reader) feed(src io.Reader) {
|
||||
// end in delim.
|
||||
bytea, err := reader.ReadBytes(delim)
|
||||
byteaLen := len(bytea)
|
||||
if len(bytea) > 0 {
|
||||
if byteaLen > 0 {
|
||||
if err == nil {
|
||||
// get rid of carriage return if under Windows:
|
||||
if util.IsWindows() && byteaLen >= 2 && bytea[byteaLen-2] == byte('\r') {
|
||||
@@ -51,7 +86,7 @@ func (r *Reader) feed(src io.Reader) {
|
||||
}
|
||||
}
|
||||
if r.pusher(bytea) {
|
||||
r.eventBox.Set(EvtReadNew, true)
|
||||
atomic.StoreInt32(&r.event, int32(EvtReadNew))
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
|
||||
@@ -2,6 +2,7 @@ package fzf
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/junegunn/fzf/src/util"
|
||||
)
|
||||
@@ -11,7 +12,10 @@ func TestReadFromCommand(t *testing.T) {
|
||||
eb := util.NewEventBox()
|
||||
reader := Reader{
|
||||
pusher: func(s []byte) bool { strs = append(strs, string(s)); return true },
|
||||
eventBox: eb}
|
||||
eventBox: eb,
|
||||
event: int32(EvtReady)}
|
||||
|
||||
reader.startEventPoller()
|
||||
|
||||
// Check EventBox
|
||||
if eb.Peek(EvtReadNew) {
|
||||
@@ -19,21 +23,16 @@ func TestReadFromCommand(t *testing.T) {
|
||||
}
|
||||
|
||||
// Normal command
|
||||
reader.readFromCommand(`echo abc && echo def`)
|
||||
reader.fin(reader.readFromCommand(`echo abc && echo def`))
|
||||
if len(strs) != 2 || strs[0] != "abc" || strs[1] != "def" {
|
||||
t.Errorf("%s", strs)
|
||||
}
|
||||
|
||||
// Check EventBox again
|
||||
if !eb.Peek(EvtReadNew) {
|
||||
t.Error("EvtReadNew should be set yet")
|
||||
}
|
||||
eb.WaitFor(EvtReadFin)
|
||||
|
||||
// Wait should return immediately
|
||||
eb.Wait(func(events *util.Events) {
|
||||
if _, found := (*events)[EvtReadNew]; !found {
|
||||
t.Errorf("%s", events)
|
||||
}
|
||||
events.Clear()
|
||||
})
|
||||
|
||||
@@ -42,8 +41,14 @@ func TestReadFromCommand(t *testing.T) {
|
||||
t.Error("EvtReadNew should not be set yet")
|
||||
}
|
||||
|
||||
// Make sure that event poller is finished
|
||||
time.Sleep(readerPollIntervalMax)
|
||||
|
||||
// Restart event poller
|
||||
reader.startEventPoller()
|
||||
|
||||
// Failing command
|
||||
reader.readFromCommand(`no-such-command`)
|
||||
reader.fin(reader.readFromCommand(`no-such-command`))
|
||||
strs = []string{}
|
||||
if len(strs) > 0 {
|
||||
t.Errorf("%s", strs)
|
||||
@@ -51,6 +56,9 @@ func TestReadFromCommand(t *testing.T) {
|
||||
|
||||
// Check EventBox again
|
||||
if eb.Peek(EvtReadNew) {
|
||||
t.Error("Command failed. EvtReadNew should be set")
|
||||
t.Error("Command failed. EvtReadNew should not be set")
|
||||
}
|
||||
if !eb.Peek(EvtReadFin) {
|
||||
t.Error("EvtReadFin should be set")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -70,7 +70,7 @@ func buildResult(item *Item, offsets []Offset, score int) Result {
|
||||
}
|
||||
}
|
||||
}
|
||||
result.points[idx] = val
|
||||
result.points[3-idx] = val
|
||||
}
|
||||
|
||||
return result
|
||||
@@ -85,7 +85,7 @@ func (result *Result) Index() int32 {
|
||||
}
|
||||
|
||||
func minRank() Result {
|
||||
return Result{item: &nilItem, points: [4]uint16{math.MaxUint16, 0, 0, 0}}
|
||||
return Result{item: &minItem, points: [4]uint16{math.MaxUint16, 0, 0, 0}}
|
||||
}
|
||||
|
||||
func (result *Result) colorOffsets(matchOffsets []Offset, theme *tui.ColorTheme, color tui.ColorPair, attr tui.Attr, current bool) []colorOffset {
|
||||
@@ -224,16 +224,3 @@ func (a ByRelevanceTac) Swap(i, j int) {
|
||||
func (a ByRelevanceTac) Less(i, j int) bool {
|
||||
return compareRanks(a[i], a[j], true)
|
||||
}
|
||||
|
||||
func compareRanks(irank Result, jrank Result, tac bool) bool {
|
||||
for idx := 0; idx < 4; idx++ {
|
||||
left := irank.points[idx]
|
||||
right := jrank.points[idx]
|
||||
if left < right {
|
||||
return true
|
||||
} else if left > right {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return (irank.item.Index() <= jrank.item.Index()) != tac
|
||||
}
|
||||
|
||||
16
src/result_others.go
Normal file
16
src/result_others.go
Normal file
@@ -0,0 +1,16 @@
|
||||
// +build !386,!amd64
|
||||
|
||||
package fzf
|
||||
|
||||
func compareRanks(irank Result, jrank Result, tac bool) bool {
|
||||
for idx := 3; idx >= 0; idx-- {
|
||||
left := irank.points[idx]
|
||||
right := jrank.points[idx]
|
||||
if left < right {
|
||||
return true
|
||||
} else if left > right {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return (irank.item.Index() <= jrank.item.Index()) != tac
|
||||
}
|
||||
@@ -59,10 +59,10 @@ func TestResultRank(t *testing.T) {
|
||||
strs := [][]rune{[]rune("foo"), []rune("foobar"), []rune("bar"), []rune("baz")}
|
||||
item1 := buildResult(
|
||||
withIndex(&Item{text: util.RunesToChars(strs[0])}, 1), []Offset{}, 2)
|
||||
if item1.points[0] != math.MaxUint16-2 || // Bonus
|
||||
item1.points[1] != 3 || // Length
|
||||
item1.points[2] != 0 || // Unused
|
||||
item1.points[3] != 0 || // Unused
|
||||
if item1.points[3] != math.MaxUint16-2 || // Bonus
|
||||
item1.points[2] != 3 || // Length
|
||||
item1.points[1] != 0 || // Unused
|
||||
item1.points[0] != 0 || // Unused
|
||||
item1.item.Index() != 1 {
|
||||
t.Error(item1)
|
||||
}
|
||||
|
||||
16
src/result_x86.go
Normal file
16
src/result_x86.go
Normal file
@@ -0,0 +1,16 @@
|
||||
// +build 386 amd64
|
||||
|
||||
package fzf
|
||||
|
||||
import "unsafe"
|
||||
|
||||
func compareRanks(irank Result, jrank Result, tac bool) bool {
|
||||
left := *(*uint64)(unsafe.Pointer(&irank.points[0]))
|
||||
right := *(*uint64)(unsafe.Pointer(&jrank.points[0]))
|
||||
if left < right {
|
||||
return true
|
||||
} else if left > right {
|
||||
return false
|
||||
}
|
||||
return (irank.item.Index() <= jrank.item.Index()) != tac
|
||||
}
|
||||
@@ -281,9 +281,13 @@ func defaultKeymap() map[int][]action {
|
||||
return keymap
|
||||
}
|
||||
|
||||
func trimQuery(query string) []rune {
|
||||
return []rune(strings.Replace(query, "\t", " ", -1))
|
||||
}
|
||||
|
||||
// NewTerminal returns new Terminal object
|
||||
func NewTerminal(opts *Options, eventBox *util.EventBox) *Terminal {
|
||||
input := []rune(opts.Query)
|
||||
input := trimQuery(opts.Query)
|
||||
var header []string
|
||||
if opts.Reverse {
|
||||
header = opts.Header
|
||||
@@ -1694,13 +1698,13 @@ func (t *Terminal) Loop() {
|
||||
case actPreviousHistory:
|
||||
if t.history != nil {
|
||||
t.history.override(string(t.input))
|
||||
t.input = []rune(t.history.previous())
|
||||
t.input = trimQuery(t.history.previous())
|
||||
t.cx = len(t.input)
|
||||
}
|
||||
case actNextHistory:
|
||||
if t.history != nil {
|
||||
t.history.override(string(t.input))
|
||||
t.input = []rune(t.history.next())
|
||||
t.input = trimQuery(t.history.next())
|
||||
t.cx = len(t.input)
|
||||
}
|
||||
case actSigStop:
|
||||
|
||||
@@ -147,7 +147,7 @@ func Tokenize(text string, delimiter Delimiter) []Token {
|
||||
if delimiter.regex != nil {
|
||||
for len(text) > 0 {
|
||||
loc := delimiter.regex.FindStringIndex(text)
|
||||
if loc == nil {
|
||||
if len(loc) < 2 {
|
||||
loc = []int{0, len(text)}
|
||||
}
|
||||
last := util.Max(loc[1], 1)
|
||||
|
||||
@@ -209,7 +209,9 @@ func (r *LightRenderer) Init() {
|
||||
r.csi(fmt.Sprintf("%dA", r.MaxY()-1))
|
||||
r.csi("G")
|
||||
r.csi("K")
|
||||
r.csi("s")
|
||||
if !r.clearOnExit && !r.fullscreen {
|
||||
r.csi("s")
|
||||
}
|
||||
if !r.fullscreen && r.mouse {
|
||||
r.yoffset, _ = r.findOffset()
|
||||
}
|
||||
|
||||
@@ -160,7 +160,7 @@ func (chars *Chars) CopyRunes(dest []rune) {
|
||||
copy(dest, runes)
|
||||
return
|
||||
}
|
||||
for idx, b := range chars.slice {
|
||||
for idx, b := range chars.slice[:len(dest)] {
|
||||
dest[idx] = rune(b)
|
||||
}
|
||||
return
|
||||
|
||||
@@ -6,11 +6,11 @@ Execute (Setup):
|
||||
|
||||
Execute (fzf#run with dir option):
|
||||
let cwd = getcwd()
|
||||
let result = fzf#run({ 'options': '--filter=vdr', 'dir': g:dir })
|
||||
let result = fzf#run({ 'source': 'git ls-files', 'options': '--filter=vdr', 'dir': g:dir })
|
||||
AssertEqual ['fzf.vader'], result
|
||||
AssertEqual getcwd(), cwd
|
||||
|
||||
let result = sort(fzf#run({ 'options': '--filter e', 'dir': g:dir }))
|
||||
let result = sort(fzf#run({ 'source': 'git ls-files', 'options': '--filter e', 'dir': g:dir }))
|
||||
AssertEqual ['fzf.vader', 'test_go.rb'], result
|
||||
AssertEqual getcwd(), cwd
|
||||
|
||||
@@ -19,7 +19,7 @@ Execute (fzf#run with Funcref command):
|
||||
function! g:FzfTest(e)
|
||||
call add(g:ret, a:e)
|
||||
endfunction
|
||||
let result = sort(fzf#run({ 'sink': function('g:FzfTest'), 'options': '--filter e', 'dir': g:dir }))
|
||||
let result = sort(fzf#run({ 'source': 'git ls-files', 'sink': function('g:FzfTest'), 'options': '--filter e', 'dir': g:dir }))
|
||||
AssertEqual ['fzf.vader', 'test_go.rb'], result
|
||||
AssertEqual ['fzf.vader', 'test_go.rb'], sort(g:ret)
|
||||
|
||||
@@ -140,7 +140,7 @@ Execute (fzf#wrap):
|
||||
let g:fzf_history_dir = '/tmp'
|
||||
let opts = fzf#wrap('foobar', {'options': '--color light'})
|
||||
Log opts
|
||||
Assert opts.options =~ '--history /tmp/foobar'
|
||||
Assert opts.options =~ "--history '/tmp/foobar'"
|
||||
Assert opts.options =~ '--color light'
|
||||
|
||||
let g:fzf_colors = { 'fg': ['fg', 'Error'] }
|
||||
@@ -149,16 +149,18 @@ Execute (fzf#wrap):
|
||||
|
||||
Execute (fzf#shellescape with sh):
|
||||
AssertEqual '''''', fzf#shellescape('', 'sh')
|
||||
AssertEqual '''\''', fzf#shellescape('\', 'sh')
|
||||
AssertEqual '''""''', fzf#shellescape('""', 'sh')
|
||||
AssertEqual '''foobar>''', fzf#shellescape('foobar>', 'sh')
|
||||
AssertEqual '''\"''', fzf#shellescape('\"', 'sh')
|
||||
AssertEqual '''\\\"\\\''', fzf#shellescape('\\\"\\\', 'sh')
|
||||
AssertEqual '''echo ''\''''a''\'''' && echo ''\''''b''\''''''', fzf#shellescape('echo ''a'' && echo ''b''', 'sh')
|
||||
|
||||
Execute (fzf#shellescape with cmd.exe):
|
||||
AssertEqual '^"^"', fzf#shellescape('', 'cmd.exe')
|
||||
AssertEqual '^"\\^"', fzf#shellescape('\', 'cmd.exe')
|
||||
AssertEqual '^"\^"\^"^"', fzf#shellescape('""', 'cmd.exe')
|
||||
AssertEqual '^"foobar^>^"', fzf#shellescape('foobar>', 'cmd.exe')
|
||||
AssertEqual '^"\\\^"\\^"', fzf#shellescape('\\\\\\\\"\', 'cmd.exe')
|
||||
AssertEqual '^"\\\\\\\^"\\\\\\^"', fzf#shellescape('\\\"\\\', 'cmd.exe')
|
||||
AssertEqual '^"echo ''a'' ^&^& echo ''b''^"', fzf#shellescape('echo ''a'' && echo ''b''', 'cmd.exe')
|
||||
|
||||
AssertEqual '^"C:\Program Files ^(x86^)\\^"', fzf#shellescape('C:\Program Files (x86)\', 'cmd.exe')
|
||||
|
||||
527
test/test_go.rb
527
test/test_go.rb
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user