Skip to content
This repository has been archived by the owner on May 31, 2024. It is now read-only.

Commit

Permalink
reformat
Browse files Browse the repository at this point in the history
Signed-off-by: zychen5186 <[email protected]>
  • Loading branch information
zychen5186 committed Apr 30, 2024
1 parent b6b2c04 commit 6a19d5c
Showing 1 changed file with 29 additions and 33 deletions.
62 changes: 29 additions & 33 deletions pkg/bubbletea/bubbletea_pagination.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,47 +53,26 @@ func (m pageModel) Init() tea.Cmd {

func (m pageModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
var cmd tea.Cmd

switch msg := msg.(type) {
case tea.KeyMsg:
switch {
case key.Matches(msg, m.paginator.KeyMap.PrevPage):
// If previous page will be out of the range of the first batch, don't update
if m.paginator.Page == firstBatchIndex*pagePerBatch {
return m, cmd
}
}
}

m.paginator, _ = m.paginator.Update(msg)

switch msg := msg.(type) {

case tea.KeyMsg:
switch msg.String() {
case "q", "esc", "ctrl+c":
return m, tea.Quit
}
switch {
case key.Matches(msg, m.paginator.KeyMap.NextPage):
if (m.paginator.Page >= (lastBatchIndex+1)*pagePerBatch-prefetchThreshold) && !fetchingForward {
// If no more data, don't fetch again (won't show spinner)
value, ok := batchLen[lastBatchIndex+1]
if !ok || value != 0 {
fetchingForward = true
cmd = fetchDataCmd(lastBatchIndex+1, forward)
}
}
case key.Matches(msg, m.paginator.KeyMap.PrevPage):
if (m.paginator.Page <= firstBatchIndex*pagePerBatch+prefetchThreshold) && (firstBatchIndex > 0) && !fetchingBackward {
fetchingBackward = true
cmd = fetchDataCmd(firstBatchIndex-1, backward)
// If previous page will be out of the range of the first batch, don't update
if m.paginator.Page == firstBatchIndex*pagePerBatch {
return m, nil
}
}
case spinner.TickMsg:
m.spinner, cmd = m.spinner.Update(msg)
return m, cmd
case newDataMsg:
if msg.fetchDirection == forward {
// Update if current page is in the range of the last batch
// e.g. user left last batch while still fetching, then don't update
// i.e. if user not in last batch when finished fetching, don't update
if m.paginator.Page/pagePerBatch >= lastBatchIndex {
*m.items = append(*m.items, msg.newItems...)
lastBatchIndex++
Expand All @@ -105,7 +84,7 @@ func (m pageModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
fetchingForward = false
} else {
// Update if current page is in the range of the first batch
// e.g. user left first batch while still fetching, then don't update
// i.e. if user not in first batch when finished fetching, don't update
if m.paginator.Page/pagePerBatch <= firstBatchIndex {
*m.items = append(msg.newItems, *m.items...)
firstBatchIndex--
Expand All @@ -118,9 +97,27 @@ func (m pageModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
}
m.paginator.SetTotalPages(getLocalLastPage())
return m, nil
case spinner.TickMsg:
m.spinner, cmd = m.spinner.Update(msg)
return m, cmd
}

m.paginator, _ = m.paginator.Update(msg)
switch msg := msg.(type) {
case tea.KeyMsg:
switch {
case key.Matches(msg, m.paginator.KeyMap.NextPage):
if (m.paginator.Page >= (lastBatchIndex+1)*pagePerBatch-prefetchThreshold) && !fetchingForward {
// If no more data, don't fetch again (won't show spinner)
value, ok := batchLen[lastBatchIndex+1]
if !ok || value != 0 {
fetchingForward = true
cmd = fetchDataCmd(lastBatchIndex+1, forward)
}
}
case key.Matches(msg, m.paginator.KeyMap.PrevPage):
if (m.paginator.Page <= firstBatchIndex*pagePerBatch+prefetchThreshold) && (firstBatchIndex > 0) && !fetchingBackward {
fetchingBackward = true
cmd = fetchDataCmd(firstBatchIndex-1, backward)
}
}
}

return m, cmd
Expand Down Expand Up @@ -153,7 +150,6 @@ func Paginator(_listHeader []printer.Column, _callback DataCallback, _filter fil
var msg []proto.Message
for i := firstBatchIndex; i < lastBatchIndex+1; i++ {
newMessages := getMessageList(i)
fmt.Println("newMessages", len(newMessages))
if int(filter.Page)-(firstBatchIndex*pagePerBatch) > int(math.Ceil(float64(len(newMessages))/msgPerPage)) {
return fmt.Errorf("the specified page has no data, please enter a valid page number")
}
Expand Down

0 comments on commit 6a19d5c

Please sign in to comment.