mirror of
https://codeberg.org/forgejo/forgejo.git
synced 2024-12-26 09:50:41 +00:00
Move modules/indexer to modules/indexer/code (#9301)
This commit is contained in:
parent
2c83dac5d4
commit
50da9f7dae
4 changed files with 20 additions and 21 deletions
|
@ -15,7 +15,6 @@ import (
|
||||||
"code.gitea.io/gitea/modules/charset"
|
"code.gitea.io/gitea/modules/charset"
|
||||||
"code.gitea.io/gitea/modules/git"
|
"code.gitea.io/gitea/modules/git"
|
||||||
"code.gitea.io/gitea/modules/graceful"
|
"code.gitea.io/gitea/modules/graceful"
|
||||||
"code.gitea.io/gitea/modules/indexer"
|
|
||||||
"code.gitea.io/gitea/modules/log"
|
"code.gitea.io/gitea/modules/log"
|
||||||
"code.gitea.io/gitea/modules/setting"
|
"code.gitea.io/gitea/modules/setting"
|
||||||
"github.com/ethantkoenig/rupture"
|
"github.com/ethantkoenig/rupture"
|
||||||
|
@ -39,7 +38,7 @@ func InitRepoIndexer() {
|
||||||
go func() {
|
go func() {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
log.Info("Initializing Repository Indexer")
|
log.Info("Initializing Repository Indexer")
|
||||||
indexer.InitRepoIndexer(populateRepoIndexerAsynchronously)
|
initRepoIndexer(populateRepoIndexerAsynchronously)
|
||||||
go processRepoIndexerOperationQueue()
|
go processRepoIndexerOperationQueue()
|
||||||
waitChannel <- time.Since(start)
|
waitChannel <- time.Since(start)
|
||||||
}()
|
}()
|
||||||
|
@ -130,7 +129,7 @@ func updateRepoIndexer(repoID int64) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
batch := indexer.RepoIndexerBatch()
|
batch := RepoIndexerBatch()
|
||||||
for _, update := range changes.Updates {
|
for _, update := range changes.Updates {
|
||||||
if err := addUpdate(update, repo, batch); err != nil {
|
if err := addUpdate(update, repo, batch); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -198,10 +197,10 @@ func addUpdate(update fileUpdate, repo *models.Repository, batch rupture.Flushin
|
||||||
// FIXME: UTF-16 files will probably fail here
|
// FIXME: UTF-16 files will probably fail here
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
indexerUpdate := indexer.RepoIndexerUpdate{
|
indexerUpdate := RepoIndexerUpdate{
|
||||||
Filepath: update.Filename,
|
Filepath: update.Filename,
|
||||||
Op: indexer.RepoIndexerOpUpdate,
|
Op: RepoIndexerOpUpdate,
|
||||||
Data: &indexer.RepoIndexerData{
|
Data: &RepoIndexerData{
|
||||||
RepoID: repo.ID,
|
RepoID: repo.ID,
|
||||||
Content: string(charset.ToUTF8DropErrors(fileContents)),
|
Content: string(charset.ToUTF8DropErrors(fileContents)),
|
||||||
},
|
},
|
||||||
|
@ -210,10 +209,10 @@ func addUpdate(update fileUpdate, repo *models.Repository, batch rupture.Flushin
|
||||||
}
|
}
|
||||||
|
|
||||||
func addDelete(filename string, repo *models.Repository, batch rupture.FlushingBatch) error {
|
func addDelete(filename string, repo *models.Repository, batch rupture.FlushingBatch) error {
|
||||||
indexerUpdate := indexer.RepoIndexerUpdate{
|
indexerUpdate := RepoIndexerUpdate{
|
||||||
Filepath: filename,
|
Filepath: filename,
|
||||||
Op: indexer.RepoIndexerOpDelete,
|
Op: RepoIndexerOpDelete,
|
||||||
Data: &indexer.RepoIndexerData{
|
Data: &RepoIndexerData{
|
||||||
RepoID: repo.ID,
|
RepoID: repo.ID,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -279,7 +278,7 @@ func nonGenesisChanges(repo *models.Repository, revision string) (*repoChanges,
|
||||||
// previous commit sha may have been removed by a force push, so
|
// previous commit sha may have been removed by a force push, so
|
||||||
// try rebuilding from scratch
|
// try rebuilding from scratch
|
||||||
log.Warn("git diff: %v", err)
|
log.Warn("git diff: %v", err)
|
||||||
if err = indexer.DeleteRepoFromIndexer(repo.ID); err != nil {
|
if err = deleteRepoFromIndexer(repo.ID); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return genesisChanges(repo, revision)
|
return genesisChanges(repo, revision)
|
||||||
|
@ -326,8 +325,8 @@ func processRepoIndexerOperationQueue() {
|
||||||
op := <-repoIndexerOperationQueue
|
op := <-repoIndexerOperationQueue
|
||||||
var err error
|
var err error
|
||||||
if op.deleted {
|
if op.deleted {
|
||||||
if err = indexer.DeleteRepoFromIndexer(op.repoID); err != nil {
|
if err = deleteRepoFromIndexer(op.repoID); err != nil {
|
||||||
log.Error("DeleteRepoFromIndexer: %v", err)
|
log.Error("deleteRepoFromIndexer: %v", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if err = updateRepoIndexer(op.repoID); err != nil {
|
if err = updateRepoIndexer(op.repoID); err != nil {
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
package indexer
|
package code
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
|
@ -2,7 +2,7 @@
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
package indexer
|
package code
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -100,8 +100,8 @@ func (update RepoIndexerUpdate) AddToFlushingBatch(batch rupture.FlushingBatch)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// InitRepoIndexer initialize repo indexer
|
// initRepoIndexer initialize repo indexer
|
||||||
func InitRepoIndexer(populateIndexer func() error) {
|
func initRepoIndexer(populateIndexer func() error) {
|
||||||
indexer, err := openIndexer(setting.Indexer.RepoPath, repoIndexerLatestVersion)
|
indexer, err := openIndexer(setting.Indexer.RepoPath, repoIndexerLatestVersion)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal("InitRepoIndexer: %v", err)
|
log.Fatal("InitRepoIndexer: %v", err)
|
||||||
|
@ -173,8 +173,8 @@ func RepoIndexerBatch() rupture.FlushingBatch {
|
||||||
return rupture.NewFlushingBatch(indexerHolder.get(), maxBatchSize)
|
return rupture.NewFlushingBatch(indexerHolder.get(), maxBatchSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteRepoFromIndexer delete all of a repo's files from indexer
|
// deleteRepoFromIndexer delete all of a repo's files from indexer
|
||||||
func DeleteRepoFromIndexer(repoID int64) error {
|
func deleteRepoFromIndexer(repoID int64) error {
|
||||||
query := numericEqualityQuery(repoID, "RepoID")
|
query := numericEqualityQuery(repoID, "RepoID")
|
||||||
searchRequest := bleve.NewSearchRequestOptions(query, 2147483647, 0, false)
|
searchRequest := bleve.NewSearchRequestOptions(query, 2147483647, 0, false)
|
||||||
result, err := indexerHolder.get().Search(searchRequest)
|
result, err := indexerHolder.get().Search(searchRequest)
|
|
@ -11,7 +11,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"code.gitea.io/gitea/modules/highlight"
|
"code.gitea.io/gitea/modules/highlight"
|
||||||
"code.gitea.io/gitea/modules/indexer"
|
code_indexer "code.gitea.io/gitea/modules/indexer/code"
|
||||||
"code.gitea.io/gitea/modules/util"
|
"code.gitea.io/gitea/modules/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -60,7 +60,7 @@ func writeStrings(buf *bytes.Buffer, strs ...string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func searchResult(result *indexer.RepoSearchResult, startIndex, endIndex int) (*Result, error) {
|
func searchResult(result *code_indexer.RepoSearchResult, startIndex, endIndex int) (*Result, error) {
|
||||||
startLineNum := 1 + strings.Count(result.Content[:startIndex], "\n")
|
startLineNum := 1 + strings.Count(result.Content[:startIndex], "\n")
|
||||||
|
|
||||||
var formattedLinesBuffer bytes.Buffer
|
var formattedLinesBuffer bytes.Buffer
|
||||||
|
@ -113,7 +113,7 @@ func PerformSearch(repoIDs []int64, keyword string, page, pageSize int) (int, []
|
||||||
return 0, nil, nil
|
return 0, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
total, results, err := indexer.SearchRepoByKeyword(repoIDs, keyword, page, pageSize)
|
total, results, err := code_indexer.SearchRepoByKeyword(repoIDs, keyword, page, pageSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, nil, err
|
return 0, nil, err
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue