diff --git a/Dockerfile b/Dockerfile index dfe181a75..f25bef109 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,17 @@ -FROM azul/zulu-openjdk-debian:14 +FROM debian:12 ENV DEBIAN_FRONTEND noninteractive +RUN apt-get update && \ + apt-get install -y wget gnupg2 software-properties-common + +RUN wget -O- https://apt.corretto.aws/corretto.key | apt-key add - +RUN add-apt-repository -y 'deb https://apt.corretto.aws stable main' +# For some reason, needs to be run again for the repo to be usable +RUN add-apt-repository -y 'deb https://apt.corretto.aws stable main' +RUN apt-get update && \ + apt-get install -y java-17-amazon-corretto-jdk + RUN mkdir /usr/app/ COPY . /user/app/ WORKDIR /user/app/ diff --git a/README.md b/README.md index 288a4c675..81ff1f5f5 100644 --- a/README.md +++ b/README.md @@ -47,7 +47,7 @@ In the home directory. ./gradlew clean installDist test ``` -Note: This code has been tested on *Java14* +Note: This code has been tested on *Java17* # Run gRPC Server diff --git a/build.gradle b/build.gradle index 0205cb020..fdb5d0308 100644 --- a/build.gradle +++ b/build.gradle @@ -5,11 +5,11 @@ plugins { id 'idea' id 'maven-publish' id 'signing' - id "com.diffplug.gradle.spotless" version "4.3.0" + id "com.diffplug.spotless" version "6.22.0" id 'java-library' id 'jacoco' // Build docs locally by running "site" command - id 'kr.motd.sphinx' version '2.10.0' + id 'kr.motd.sphinx' version '2.10.1' } repositories { @@ -20,8 +20,10 @@ repositories { mavenCentral() } -sourceCompatibility = 1.14 -targetCompatibility = 1.14 +java { + sourceCompatibility = JavaVersion.VERSION_17 + targetCompatibility = JavaVersion.VERSION_17 +} allprojects { version = '1.0.0-SNAPSHOT' @@ -47,7 +49,7 @@ def spatial4jVersion = '0.7' def s3mockVersion = '0.2.5' def commonsCompressVersion = '1.21' def awsJavaSdkVersion = '1.11.695' -def guicedeeVersion = '1.1.1.3-jre14' +def guicedeeVersion = '1.2.2.1-jre17' def prometheusClientVersion = '0.8.0' def fastutilVersion = '8.5.6' @@ -117,32 +119,32 @@ dependencies { startScripts.enabled = false task luceneServer(type: CreateStartScripts) { - mainClassName = 'com.yelp.nrtsearch.server.grpc.LuceneServer' + mainClass = 'com.yelp.nrtsearch.server.grpc.LuceneServer' applicationName = 'lucene-server' - outputDir = new File(project.buildDir, 'tmp') + outputDir = new File(project.buildDir, 'tmp-app') classpath = startScripts.classpath // Add additional dependencies, e.g. custom loggers classpath += files('$APP_HOME/additional_libs') } task luceneServerClient(type: CreateStartScripts) { - mainClassName = 'com.yelp.nrtsearch.server.cli.LuceneClientCommand' + mainClass = 'com.yelp.nrtsearch.server.cli.LuceneClientCommand' applicationName = 'lucene-client' - outputDir = new File(project.buildDir, 'tmp') + outputDir = new File(project.buildDir, 'tmp-app') classpath = startScripts.classpath } task backupRestoreTool(type: CreateStartScripts) { - mainClassName = 'com.yelp.nrtsearch.server.cli.BackupRestoreCommand' + mainClass = 'com.yelp.nrtsearch.server.cli.BackupRestoreCommand' applicationName = 'backup-restore' - outputDir = new File(project.buildDir, 'tmp') + outputDir = new File(project.buildDir, 'tmp-app') classpath = startScripts.classpath } task nrtUtils(type: CreateStartScripts) { - mainClassName = 'com.yelp.nrtsearch.tools.nrt_utils.NrtUtilsCommand' + mainClass = 'com.yelp.nrtsearch.tools.nrt_utils.NrtUtilsCommand' applicationName = 'nrt_utils' - outputDir = new File(project.buildDir, 'tmp') + outputDir = new File(project.buildDir, 'tmp-app') classpath = startScripts.classpath } @@ -165,6 +167,8 @@ task buildGrpcGateway(dependsOn: installDist, type: Exec) { //e.g. default is to exclude perfTests: ./gradlew test test { finalizedBy 'spotlessJavaCheck' + // Used by LuceneServerConfigurationTest + environment(Map.of('CUSTOM_HOST', 'my_custom_host', 'VAR1', 'v1', 'VAR2', 'v2', 'VAR3', 'v3')) if (project.hasProperty('longRunningTestsOnly')) { include '**/IncrementalDataCleanupCommandTest.class' } else { @@ -189,7 +193,7 @@ test { jacocoTestReport { reports { - csv.enabled true + csv.required = true } } @@ -198,17 +202,17 @@ task javadocs(type: Javadoc) { } task javadocsJar(type: Jar) { - classifier('javadoc') + archiveClassifier = 'javadoc' from javadocs.destinationDir } task sourcesJar(type: Jar) { - classifier('sources') + archiveClassifier = 'sources' from sourceSets.main.java.srcDirs } task testsJar(type: Jar) { - classifier('tests') + archiveClassifier = 'tests' from sourceSets.test.output } diff --git a/clientlib/build.gradle b/clientlib/build.gradle index 8a4ef2383..d1491e415 100644 --- a/clientlib/build.gradle +++ b/clientlib/build.gradle @@ -2,13 +2,13 @@ plugins { // Provide convenience executables for trying out the examples. id 'application' // ASSUMES GRADLE 2.12 OR HIGHER. Use plugin version 0.7.5 with earlier gradle versions - id 'com.google.protobuf' version '0.8.12' + id 'com.google.protobuf' version '0.9.4' // Generate IntelliJ IDEA's .idea & .iml project files id 'idea' // Publish clientlib to maven central id 'maven-publish' id 'signing' - id "com.diffplug.gradle.spotless" + id "com.diffplug.spotless" id 'java-library' } @@ -20,8 +20,10 @@ repositories { mavenCentral() } -sourceCompatibility = 1.8 -targetCompatibility = 1.8 +java { + sourceCompatibility = JavaVersion.VERSION_1_8 + targetCompatibility = JavaVersion.VERSION_1_8 +} startScripts.enabled = false // groupId, artifactId and version for the generated pom @@ -88,12 +90,12 @@ task javadocs(type: Javadoc) { } task javadocsJar(type: Jar) { - classifier('javadoc') + archiveClassifier = 'javadoc' from javadocs.destinationDir } task sourcesJar(type: Jar) { - classifier('sources') + archiveClassifier = 'sources' from sourceSets.main.java.srcDirs } @@ -183,6 +185,7 @@ publishing { spotless { java { + targetExclude "build/**/*.java" licenseHeaderFile '../license_header' removeUnusedImports() endWithNewline() diff --git a/docs/introduction.rst b/docs/introduction.rst index c9c412fcc..864c41efb 100644 --- a/docs/introduction.rst +++ b/docs/introduction.rst @@ -56,7 +56,7 @@ In the home directory, one can build nrtSearch locally like this: ./gradlew clean installDist test -Note: This code has been tested on *Java14*. +Note: This code has been tested on *Java17*. Run gRPC Server --------------------------- diff --git a/example-plugin/build.gradle b/example-plugin/build.gradle index 14ecb9691..6722011cd 100644 --- a/example-plugin/build.gradle +++ b/example-plugin/build.gradle @@ -1,5 +1,5 @@ plugins { - id 'com.diffplug.gradle.spotless' version '4.3.0' + id 'com.diffplug.spotless' version '6.22.0' id 'distribution' // Generate IntelliJ IDEA's .idea & .iml project files id 'idea' @@ -14,8 +14,10 @@ repositories { mavenCentral() } -sourceCompatibility = 1.17 -targetCompatibility = 1.17 +java { + sourceCompatibility = JavaVersion.VERSION_17 + targetCompatibility = JavaVersion.VERSION_17 +} group 'com.yelp.nrtsearch.plugins' version '0.0.1' diff --git a/example-plugin/gradle/wrapper/gradle-wrapper.jar b/example-plugin/gradle/wrapper/gradle-wrapper.jar index 41d9927a4..7454180f2 100644 Binary files a/example-plugin/gradle/wrapper/gradle-wrapper.jar and b/example-plugin/gradle/wrapper/gradle-wrapper.jar differ diff --git a/example-plugin/gradle/wrapper/gradle-wrapper.properties b/example-plugin/gradle/wrapper/gradle-wrapper.properties index e750102e0..e411586a5 100644 --- a/example-plugin/gradle/wrapper/gradle-wrapper.properties +++ b/example-plugin/gradle/wrapper/gradle-wrapper.properties @@ -1,5 +1,5 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-7.3-bin.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.4-bin.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar index cc4fdc293..7454180f2 100644 Binary files a/gradle/wrapper/gradle-wrapper.jar and b/gradle/wrapper/gradle-wrapper.jar differ diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index e750102e0..e411586a5 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,5 +1,5 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-7.3-bin.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.4-bin.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists diff --git a/gradlew b/gradlew index 2fe81a7d9..1b6c78733 100755 --- a/gradlew +++ b/gradlew @@ -1,7 +1,7 @@ -#!/usr/bin/env sh +#!/bin/sh # -# Copyright 2015 the original author or authors. +# Copyright © 2015-2021 the original authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,78 +17,113 @@ # ############################################################################## -## -## Gradle start up script for UN*X -## +# +# Gradle start up script for POSIX generated by Gradle. +# +# Important for running: +# +# (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is +# noncompliant, but you have some other compliant shell such as ksh or +# bash, then to run this script, type that shell name before the whole +# command line, like: +# +# ksh Gradle +# +# Busybox and similar reduced shells will NOT work, because this script +# requires all of these POSIX shell features: +# * functions; +# * expansions «$var», «${var}», «${var:-default}», «${var+SET}», +# «${var#prefix}», «${var%suffix}», and «$( cmd )»; +# * compound commands having a testable exit status, especially «case»; +# * various built-in commands including «command», «set», and «ulimit». +# +# Important for patching: +# +# (2) This script targets any POSIX shell, so it avoids extensions provided +# by Bash, Ksh, etc; in particular arrays are avoided. +# +# The "traditional" practice of packing multiple parameters into a +# space-separated string is a well documented source of bugs and security +# problems, so this is (mostly) avoided, by progressively accumulating +# options in "$@", and eventually passing that to Java. +# +# Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS, +# and GRADLE_OPTS) rely on word-splitting, this is performed explicitly; +# see the in-line comments for details. +# +# There are tweaks for specific operating systems such as AIX, CygWin, +# Darwin, MinGW, and NonStop. +# +# (3) This script is generated from the Groovy template +# https://github.com/gradle/gradle/blob/master/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt +# within the Gradle project. +# +# You can find Gradle at https://github.com/gradle/gradle/. +# ############################################################################## # Attempt to set APP_HOME + # Resolve links: $0 may be a link -PRG="$0" -# Need this for relative symlinks. -while [ -h "$PRG" ] ; do - ls=`ls -ld "$PRG"` - link=`expr "$ls" : '.*-> \(.*\)$'` - if expr "$link" : '/.*' > /dev/null; then - PRG="$link" - else - PRG=`dirname "$PRG"`"/$link" - fi +app_path=$0 + +# Need this for daisy-chained symlinks. +while + APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path + [ -h "$app_path" ] +do + ls=$( ls -ld "$app_path" ) + link=${ls#*' -> '} + case $link in #( + /*) app_path=$link ;; #( + *) app_path=$APP_HOME$link ;; + esac done -SAVED="`pwd`" -cd "`dirname \"$PRG\"`/" >/dev/null -APP_HOME="`pwd -P`" -cd "$SAVED" >/dev/null + +APP_HOME=$( cd "${APP_HOME:-./}" && pwd -P ) || exit APP_NAME="Gradle" -APP_BASE_NAME=`basename "$0"` +APP_BASE_NAME=${0##*/} # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' # Use the maximum available, or set MAX_FD != -1 to use that value. -MAX_FD="maximum" +MAX_FD=maximum warn () { echo "$*" -} +} >&2 die () { echo echo "$*" echo exit 1 -} +} >&2 # OS specific support (must be 'true' or 'false'). cygwin=false msys=false darwin=false nonstop=false -case "`uname`" in - CYGWIN* ) - cygwin=true - ;; - Darwin* ) - darwin=true - ;; - MINGW* ) - msys=true - ;; - NONSTOP* ) - nonstop=true - ;; +case "$( uname )" in #( + CYGWIN* ) cygwin=true ;; #( + Darwin* ) darwin=true ;; #( + MSYS* | MINGW* ) msys=true ;; #( + NONSTOP* ) nonstop=true ;; esac CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + # Determine the Java command to use to start the JVM. if [ -n "$JAVA_HOME" ] ; then if [ -x "$JAVA_HOME/jre/sh/java" ] ; then # IBM's JDK on AIX uses strange locations for the executables - JAVACMD="$JAVA_HOME/jre/sh/java" + JAVACMD=$JAVA_HOME/jre/sh/java else - JAVACMD="$JAVA_HOME/bin/java" + JAVACMD=$JAVA_HOME/bin/java fi if [ ! -x "$JAVACMD" ] ; then die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME @@ -97,7 +132,7 @@ Please set the JAVA_HOME variable in your environment to match the location of your Java installation." fi else - JAVACMD="java" + JAVACMD=java which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. Please set the JAVA_HOME variable in your environment to match the @@ -105,79 +140,95 @@ location of your Java installation." fi # Increase the maximum file descriptors if we can. -if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then - MAX_FD_LIMIT=`ulimit -H -n` - if [ $? -eq 0 ] ; then - if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then - MAX_FD="$MAX_FD_LIMIT" - fi - ulimit -n $MAX_FD - if [ $? -ne 0 ] ; then - warn "Could not set maximum file descriptor limit: $MAX_FD" - fi - else - warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" - fi +if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then + case $MAX_FD in #( + max*) + MAX_FD=$( ulimit -H -n ) || + warn "Could not query maximum file descriptor limit" + esac + case $MAX_FD in #( + '' | soft) :;; #( + *) + ulimit -n "$MAX_FD" || + warn "Could not set maximum file descriptor limit to $MAX_FD" + esac fi -# For Darwin, add options to specify how the application appears in the dock -if $darwin; then - GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" -fi +# Collect all arguments for the java command, stacking in reverse order: +# * args from the command line +# * the main class name +# * -classpath +# * -D...appname settings +# * --module-path (only if needed) +# * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables. # For Cygwin or MSYS, switch paths to Windows format before running java -if [ "$cygwin" = "true" -o "$msys" = "true" ] ; then - APP_HOME=`cygpath --path --mixed "$APP_HOME"` - CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` - JAVACMD=`cygpath --unix "$JAVACMD"` - - # We build the pattern for arguments to be converted via cygpath - ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` - SEP="" - for dir in $ROOTDIRSRAW ; do - ROOTDIRS="$ROOTDIRS$SEP$dir" - SEP="|" - done - OURCYGPATTERN="(^($ROOTDIRS))" - # Add a user-defined pattern to the cygpath arguments - if [ "$GRADLE_CYGPATTERN" != "" ] ; then - OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" - fi +if "$cygwin" || "$msys" ; then + APP_HOME=$( cygpath --path --mixed "$APP_HOME" ) + CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" ) + + JAVACMD=$( cygpath --unix "$JAVACMD" ) + # Now convert the arguments - kludge to limit ourselves to /bin/sh - i=0 - for arg in "$@" ; do - CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` - CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option - - if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition - eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` - else - eval `echo args$i`="\"$arg\"" + for arg do + if + case $arg in #( + -*) false ;; # don't mess with options #( + /?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath + [ -e "$t" ] ;; #( + *) false ;; + esac + then + arg=$( cygpath --path --ignore --mixed "$arg" ) fi - i=`expr $i + 1` + # Roll the args list around exactly as many times as the number of + # args, so each arg winds up back in the position where it started, but + # possibly modified. + # + # NB: a `for` loop captures its iteration list before it begins, so + # changing the positional parameters here affects neither the number of + # iterations, nor the values presented in `arg`. + shift # remove old arg + set -- "$@" "$arg" # push replacement arg done - case $i in - 0) set -- ;; - 1) set -- "$args0" ;; - 2) set -- "$args0" "$args1" ;; - 3) set -- "$args0" "$args1" "$args2" ;; - 4) set -- "$args0" "$args1" "$args2" "$args3" ;; - 5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; - 6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; - 7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; - 8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; - 9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; - esac fi -# Escape application args -save () { - for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done - echo " " -} -APP_ARGS=`save "$@"` +# Collect all arguments for the java command; +# * $DEFAULT_JVM_OPTS, $JAVA_OPTS, and $GRADLE_OPTS can contain fragments of +# shell script including quotes and variable substitutions, so put them in +# double quotes to make sure that they get re-expanded; and +# * put everything else in single quotes, so that it's not re-expanded. + +set -- \ + "-Dorg.gradle.appname=$APP_BASE_NAME" \ + -classpath "$CLASSPATH" \ + org.gradle.wrapper.GradleWrapperMain \ + "$@" + +# Use "xargs" to parse quoted args. +# +# With -n1 it outputs one arg per line, with the quotes and backslashes removed. +# +# In Bash we could simply go: +# +# readarray ARGS < <( xargs -n1 <<<"$var" ) && +# set -- "${ARGS[@]}" "$@" +# +# but POSIX shell has neither arrays nor command substitution, so instead we +# post-process each arg (as a line of input to sed) to backslash-escape any +# character that might be a shell metacharacter, then use eval to reverse +# that process (while maintaining the separation between arguments), and wrap +# the whole thing up as a single "set" statement. +# +# This will of course break if any of these variables contains a newline or +# an unmatched quote. +# -# Collect all arguments for the java command, following the shell quoting and substitution rules -eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" +eval "set -- $( + printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" | + xargs -n1 | + sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' | + tr '\n' ' ' + )" '"$@"' exec "$JAVACMD" "$@" diff --git a/gradlew.bat b/gradlew.bat index 24467a141..ac1b06f93 100644 --- a/gradlew.bat +++ b/gradlew.bat @@ -29,6 +29,9 @@ if "%DIRNAME%" == "" set DIRNAME=. set APP_BASE_NAME=%~n0 set APP_HOME=%DIRNAME% +@rem Resolve any "." and ".." in APP_HOME to make it shorter. +for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi + @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" @@ -37,7 +40,7 @@ if defined JAVA_HOME goto findJavaFromJavaHome set JAVA_EXE=java.exe %JAVA_EXE% -version >NUL 2>&1 -if "%ERRORLEVEL%" == "0" goto init +if "%ERRORLEVEL%" == "0" goto execute echo. echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. @@ -51,7 +54,7 @@ goto fail set JAVA_HOME=%JAVA_HOME:"=% set JAVA_EXE=%JAVA_HOME%/bin/java.exe -if exist "%JAVA_EXE%" goto init +if exist "%JAVA_EXE%" goto execute echo. echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% @@ -61,28 +64,14 @@ echo location of your Java installation. goto fail -:init -@rem Get command-line arguments, handling Windows variants - -if not "%OS%" == "Windows_NT" goto win9xME_args - -:win9xME_args -@rem Slurp the command line arguments. -set CMD_LINE_ARGS= -set _SKIP=2 - -:win9xME_args_slurp -if "x%~1" == "x" goto execute - -set CMD_LINE_ARGS=%* - :execute @rem Setup the command line set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + @rem Execute Gradle -"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* :end @rem End local scope for the variables with windows NT shell diff --git a/grpc-gateway/Dockerfile b/grpc-gateway/Dockerfile index f2412a487..2a7a8da17 100644 --- a/grpc-gateway/Dockerfile +++ b/grpc-gateway/Dockerfile @@ -1,9 +1,17 @@ -FROM azul/zulu-openjdk-debian:14 +FROM debian:12 RUN apt-get update && \ DEBIAN_FRONTEND=noninteractive apt-get install -y wget unzip htop \ golang-go \ - git + git \ + gnupg2 software-properties-common + +RUN wget -O- https://apt.corretto.aws/corretto.key | apt-key add - +RUN add-apt-repository -y 'deb https://apt.corretto.aws stable main' +# For some reason, needs to be run again for the repo to be usable +RUN add-apt-repository -y 'deb https://apt.corretto.aws stable main' +RUN apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install -y java-17-amazon-corretto-jdk # Install protoc ENV PROTOC_VERSION=3.11.4 @@ -45,6 +53,11 @@ RUN go get \ github.com/golang/protobuf/protoc-gen-go@v${PROTOC_GEN_GO_VERSION} \ google.golang.org/grpc@v${GRPC_VERSION} +RUN go install \ + github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway@v${GRPC_GATEWAY_VERSION} \ + github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger@v${GRPC_GATEWAY_VERSION} +RUN go install github.com/golang/protobuf/protoc-gen-go@v${PROTOC_GEN_GO_VERSION} + ENV PROTO_PATH=/code/clientlib/src/main/proto ENV PROTO_BUILD_PATH=/code/clientlib/build diff --git a/grpc-gateway/analysis.pb.go b/grpc-gateway/analysis.pb.go index 2996321d1..11794dab6 100644 --- a/grpc-gateway/analysis.pb.go +++ b/grpc-gateway/analysis.pb.go @@ -281,6 +281,7 @@ type Analyzer struct { unknownFields protoimpl.UnknownFields // Types that are assignable to AnalyzerType: + // // *Analyzer_Predefined // *Analyzer_Custom AnalyzerType isAnalyzer_AnalyzerType `protobuf_oneof:"AnalyzerType"` diff --git a/grpc-gateway/luceneserver.pb.go b/grpc-gateway/luceneserver.pb.go index fd1b12342..d2c3011dd 100644 --- a/grpc-gateway/luceneserver.pb.go +++ b/grpc-gateway/luceneserver.pb.go @@ -32,7 +32,7 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -//Type of the field +// Type of the field type FieldType int32 const ( @@ -127,7 +127,7 @@ func (FieldType) EnumDescriptor() ([]byte, []int) { return file_yelp_nrtsearch_luceneserver_proto_rawDescGZIP(), []int{0} } -//How the tokens should be indexed. +// How the tokens should be indexed. type IndexOptions int32 const ( @@ -180,7 +180,7 @@ func (IndexOptions) EnumDescriptor() ([]byte, []int) { return file_yelp_nrtsearch_luceneserver_proto_rawDescGZIP(), []int{1} } -//Whether/how term vectors should be indexed. +// Whether/how term vectors should be indexed. type TermVectors int32 const ( @@ -236,7 +236,7 @@ func (TermVectors) EnumDescriptor() ([]byte, []int) { return file_yelp_nrtsearch_luceneserver_proto_rawDescGZIP(), []int{2} } -//Whether/How this field should index facets, and how. +// Whether/How this field should index facets, and how. type FacetType int32 const ( @@ -633,31 +633,31 @@ type LiveSettingsRequest struct { unknownFields protoimpl.UnknownFields IndexName string `protobuf:"bytes,1,opt,name=indexName,proto3" json:"indexName,omitempty"` // name of index whose liveSettings are to be updated. - //Longest time to wait before reopening IndexSearcher (i.e., periodic background reopen). + // Longest time to wait before reopening IndexSearcher (i.e., periodic background reopen). MaxRefreshSec float64 `protobuf:"fixed64,2,opt,name=maxRefreshSec,proto3" json:"maxRefreshSec,omitempty"` - //Shortest time to wait before reopening IndexSearcher (i.e., when a search is waiting for a specific indexGen). + // Shortest time to wait before reopening IndexSearcher (i.e., when a search is waiting for a specific indexGen). MinRefreshSec float64 `protobuf:"fixed64,3,opt,name=minRefreshSec,proto3" json:"minRefreshSec,omitempty"` - //Non-current searchers older than this are pruned. + // Non-current searchers older than this are pruned. MaxSearcherAgeSec float64 `protobuf:"fixed64,4,opt,name=maxSearcherAgeSec,proto3" json:"maxSearcherAgeSec,omitempty"` - //Size (in MB) of IndexWriter's RAM buffer. + // Size (in MB) of IndexWriter's RAM buffer. IndexRamBufferSizeMB float64 `protobuf:"fixed64,5,opt,name=indexRamBufferSizeMB,proto3" json:"indexRamBufferSizeMB,omitempty"` - //Max number of documents to add at a time. + // Max number of documents to add at a time. AddDocumentsMaxBufferLen int32 `protobuf:"varint,6,opt,name=addDocumentsMaxBufferLen,proto3" json:"addDocumentsMaxBufferLen,omitempty"` - //Maximum number of documents allowed in a parallel search slice. + // Maximum number of documents allowed in a parallel search slice. SliceMaxDocs int32 `protobuf:"varint,7,opt,name=sliceMaxDocs,proto3" json:"sliceMaxDocs,omitempty"` - //Maximum number of segments allowed in a parallel search slice. + // Maximum number of segments allowed in a parallel search slice. SliceMaxSegments int32 `protobuf:"varint,8,opt,name=sliceMaxSegments,proto3" json:"sliceMaxSegments,omitempty"` - //Number of virtual shards to use for this index. + // Number of virtual shards to use for this index. VirtualShards int32 `protobuf:"varint,9,opt,name=virtualShards,proto3" json:"virtualShards,omitempty"` - //Maximum sized segment to produce during normal merging + // Maximum sized segment to produce during normal merging MaxMergedSegmentMB int32 `protobuf:"varint,10,opt,name=maxMergedSegmentMB,proto3" json:"maxMergedSegmentMB,omitempty"` - //Number of segments per tier used by TieredMergePolicy + // Number of segments per tier used by TieredMergePolicy SegmentsPerTier int32 `protobuf:"varint,11,opt,name=segmentsPerTier,proto3" json:"segmentsPerTier,omitempty"` - //Timeout value to used when not specified in the search request. + // Timeout value to used when not specified in the search request. DefaultSearchTimeoutSec float64 `protobuf:"fixed64,12,opt,name=defaultSearchTimeoutSec,proto3" json:"defaultSearchTimeoutSec,omitempty"` - //Timeout check every value to use when not specified in the search request. + // Timeout check every value to use when not specified in the search request. DefaultSearchTimeoutCheckEvery int32 `protobuf:"varint,13,opt,name=defaultSearchTimeoutCheckEvery,proto3" json:"defaultSearchTimeoutCheckEvery,omitempty"` - //Terminate after value to use when not specified in the search request. + // Terminate after value to use when not specified in the search request. DefaultTerminateAfter int32 `protobuf:"varint,14,opt,name=defaultTerminateAfter,proto3" json:"defaultTerminateAfter,omitempty"` } @@ -966,12 +966,12 @@ type Field struct { DocValuesFormat string `protobuf:"bytes,14,opt,name=docValuesFormat,proto3" json:"docValuesFormat,omitempty"` // Which DocValuesFormat should be used to index this field. IndexOptions IndexOptions `protobuf:"varint,15,opt,name=indexOptions,proto3,enum=luceneserver.IndexOptions" json:"indexOptions,omitempty"` //How the tokens should be indexed. Script *Script `protobuf:"bytes,16,opt,name=script,proto3" json:"script,omitempty"` // The script definition defining a virtual field's value (only used with type=virtual). - //TODO make analyzers message types i.e. StandardAnalyzer, EnglishAnalyzer, CustomAnalyzer etc + // TODO make analyzers message types i.e. StandardAnalyzer, EnglishAnalyzer, CustomAnalyzer etc Analyzer *Analyzer `protobuf:"bytes,17,opt,name=analyzer,proto3" json:"analyzer,omitempty"` // Analyzer to use for this field during indexing and searching. IndexAnalyzer *Analyzer `protobuf:"bytes,18,opt,name=indexAnalyzer,proto3" json:"indexAnalyzer,omitempty"` // Analyzer to use for this field during indexing. SearchAnalyzer *Analyzer `protobuf:"bytes,19,opt,name=searchAnalyzer,proto3" json:"searchAnalyzer,omitempty"` //Analyzer to use for this field during searching. TermVectors TermVectors `protobuf:"varint,20,opt,name=termVectors,proto3,enum=luceneserver.TermVectors" json:"termVectors,omitempty"` // Whether/how term vectors should be indexed. - //TODO make similarity message types i.d. DefaultSimilarity, CustomSimilarity, BM25Similarity; + // TODO make similarity message types i.d. DefaultSimilarity, CustomSimilarity, BM25Similarity; Similarity string `protobuf:"bytes,21,opt,name=similarity,proto3" json:"similarity,omitempty"` // Which Similarity implementation to use for this field. Facet FacetType `protobuf:"varint,22,opt,name=facet,proto3,enum=luceneserver.FacetType" json:"facet,omitempty"` // Whether this field should index facets, and how. FacetIndexFieldName string `protobuf:"bytes,23,opt,name=facetIndexFieldName,proto3" json:"facetIndexFieldName,omitempty"` // "Which underlying Lucene index field is used to hold any indexed taxonomy or sorted set doc values facets @@ -2222,8 +2222,9 @@ type CommitResponse struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // sequence number of the last operation in the commit. All sequence numbers less than this value - //will be reflected in the commit, and all others will not. + // sequence number of the last operation in the commit. All sequence numbers less than this value + // + // will be reflected in the commit, and all others will not. Gen int64 `protobuf:"varint,1,opt,name=gen,proto3" json:"gen,omitempty"` // Unique identifier for the primary instance that processed the request PrimaryId string `protobuf:"bytes,2,opt,name=primaryId,proto3" json:"primaryId,omitempty"` @@ -2329,9 +2330,9 @@ type StatsResponse struct { Ord int32 `protobuf:"varint,1,opt,name=ord,proto3" json:"ord,omitempty"` //shard ordinal // The total number of docs in this index, including docs not yet flushed (still in the RAM buffer), - //not counting deletions. + // not counting deletions. MaxDoc int32 `protobuf:"varint,2,opt,name=maxDoc,proto3" json:"maxDoc,omitempty"` - //* + // * // The total number of docs in this index, including // docs not yet flushed (still in the RAM buffer), and // including deletions. NOTE: buffered deletions @@ -2494,7 +2495,7 @@ type Searcher struct { unknownFields protoimpl.UnknownFields // the version recorded in the commit that the reader opened. - //This version is advanced every time a change is made with IndexWriter. + // This version is advanced every time a change is made with IndexWriter. Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` NumDocs int32 `protobuf:"varint,2,opt,name=numDocs,proto3" json:"numDocs,omitempty"` //total number of docs in this index Segments string `protobuf:"bytes,3,opt,name=segments,proto3" json:"segments,omitempty"` //string representation of segments @@ -2927,21 +2928,20 @@ func (*ReloadStateResponse) Descriptor() ([]byte, []int) { return file_yelp_nrtsearch_luceneserver_proto_rawDescGZIP(), []int{35} } -// -//Creates a snapshot in the index, which is saved point-in-time view of the last commit in the -//index such that no files referenced by that snapshot will be deleted by ongoing indexing until -//the snapshot is released with @releaseSnapshot. Note that this will reference the last commit, -//so be sure to call commit first if you have pending changes that you'd like to be included in -//the snapshot.
This can be used for backup purposes, i.e. after creating the snapshot you can -//copy all referenced files to backup storage, and then release the snapshot once complete. -//To restore the backup, just copy all the files back and restart the server. It can also -//be used for transactional purposes, i.e. if you sometimes need to search a specific snapshot -//instead of the current live index.
Creating a snapshot is very fast (does not require any -//file copying), but over time it will consume extra disk space as old segments are merged in -//the index. Be sure to release the snapshot once you're done. Snapshots survive shutdown -//and restart of the server. Returns all protected filenames referenced by this snapshot: -//these files will not change and will not be deleted until the snapshot is released. -//This returns the directories and files referenced by the snapshot. +// Creates a snapshot in the index, which is saved point-in-time view of the last commit in the +// index such that no files referenced by that snapshot will be deleted by ongoing indexing until +// the snapshot is released with @releaseSnapshot. Note that this will reference the last commit, +// so be sure to call commit first if you have pending changes that you'd like to be included in +// the snapshot.
This can be used for backup purposes, i.e. after creating the snapshot you can +// copy all referenced files to backup storage, and then release the snapshot once complete. +// To restore the backup, just copy all the files back and restart the server. It can also +// be used for transactional purposes, i.e. if you sometimes need to search a specific snapshot +// instead of the current live index.
Creating a snapshot is very fast (does not require any
+// file copying), but over time it will consume extra disk space as old segments are merged in
+// the index. Be sure to release the snapshot once you're done. Snapshots survive shutdown
+// and restart of the server. Returns all protected filenames referenced by this snapshot:
+// these files will not change and will not be deleted until the snapshot is released.
+// This returns the directories and files referenced by the snapshot.
type CreateSnapshotRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -4389,7 +4389,7 @@ func (x *FileMetadata) GetFooter() []byte {
return nil
}
-//* Primary invokes this on a replica to ask it to copy files
+// * Primary invokes this on a replica to ask it to copy files
type CopyFiles struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -4461,7 +4461,7 @@ func (x *CopyFiles) GetFilesMetadata() *FilesMetadata {
return nil
}
-//* Replica invokes this on a primary to let primary know it needs the CopyState
+// * Replica invokes this on a primary to let primary know it needs the CopyState
type CopyStateRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -6110,14 +6110,14 @@ func (x *CustomResponse) GetResponse() map[string]string {
return nil
}
-//we use this wrapper object to represent each field as a multivalued field.
+// we use this wrapper object to represent each field as a multivalued field.
type AddDocumentRequest_MultiValuedField struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` //list of values for this field
- //Facet paths/hierarchy to bucket these values by, if indexed field is of type Facet.HIERARCHY
+ // Facet paths/hierarchy to bucket these values by, if indexed field is of type Facet.HIERARCHY
FaceHierarchyPaths []*FacetHierarchyPath `protobuf:"bytes,2,rep,name=faceHierarchyPaths,proto3" json:"faceHierarchyPaths,omitempty"`
}
@@ -8838,20 +8838,20 @@ type LuceneServerClient interface {
// Change global offline or online settings for this index.
LiveSettingsV2(ctx context.Context, in *LiveSettingsV2Request, opts ...grpc.CallOption) (*LiveSettingsV2Response, error)
// Registers one or more fields. Fields must be registered before they can be added in a document (via @addDocument).
- //Pass a list of Fields and an indexName. Any number of fields may be registered in a single request,
- //and once a field is registered it cannot be changed (write-once).
- //This returns the full set of fields currently registered.
+ // Pass a list of Fields and an indexName. Any number of fields may be registered in a single request,
+ // and once a field is registered it cannot be changed (write-once).
+ // This returns the full set of fields currently registered.
RegisterFields(ctx context.Context, in *FieldDefRequest, opts ...grpc.CallOption) (*FieldDefResponse, error)
// Adds one or more fields. Fields must be registered before they can be added in a document (via @addDocument).
- //Pass a list of Fields and an indexName. Any number of fields may be registered in a single request,
- //and once a field is registered it cannot be changed (write-once).
- //This returns the full set of fields currently registered.
+ // Pass a list of Fields and an indexName. Any number of fields may be registered in a single request,
+ // and once a field is registered it cannot be changed (write-once).
+ // This returns the full set of fields currently registered.
UpdateFields(ctx context.Context, in *FieldDefRequest, opts ...grpc.CallOption) (*FieldDefResponse, error)
// Change global offline settings for this index.
- //This returns the currently set settings; pass no settings changes to retrieve current settings.
+ // This returns the currently set settings; pass no settings changes to retrieve current settings.
Settings(ctx context.Context, in *SettingsRequest, opts ...grpc.CallOption) (*SettingsResponse, error)
// Change global offline settings for this index.
- //This returns the currently set settings; pass no settings to retrieve current settings.
+ // This returns the currently set settings; pass no settings to retrieve current settings.
SettingsV2(ctx context.Context, in *SettingsV2Request, opts ...grpc.CallOption) (*SettingsV2Response, error)
// Starts an index
StartIndex(ctx context.Context, in *StartIndexRequest, opts ...grpc.CallOption) (*StartIndexResponse, error)
@@ -8887,22 +8887,21 @@ type LuceneServerClient interface {
SuggestLookup(ctx context.Context, in *SuggestLookupRequest, opts ...grpc.CallOption) (*SuggestLookupResponse, error)
// Updates existing suggestions, if the suggester supports near-real-time changes.
UpdateSuggest(ctx context.Context, in *BuildSuggestRequest, opts ...grpc.CallOption) (*BuildSuggestResponse, error)
- //
- //Creates a snapshot in the index, which is saved point-in-time view of the last commit
- //in the index such that no files referenced by that snapshot will be deleted by ongoing
- //indexing until the snapshot is released with @releaseSnapshot. Note that this will
- //reference the last commit, so be sure to call commit first if you have pending changes
- //that you'd like to be included in the snapshot.
- //This can be used for backup purposes, i.e. after creating the snapshot you can copy
- //all referenced files to backup storage, and then release the snapshot once complete.
- //To restore the backup, just copy all the files back and restart the server.
- //It can also be used for transactional purposes, i.e. if you sometimes need to search a
- //specific snapshot instead of the current live index. Creating a snapshot is very fast
- //(does not require any file copying), but over time it will consume extra disk space as
- //old segments are merged in the index. Be sure to release the snapshot once you're done.
- //Snapshots survive shutdown and restart of the server. Returns all protected filenames
- //referenced by this snapshot: these files will not change and will not be deleted until
- //the snapshot is released. This returns the directories and files referenced by the snapshot.
+ // Creates a snapshot in the index, which is saved point-in-time view of the last commit
+ // in the index such that no files referenced by that snapshot will be deleted by ongoing
+ // indexing until the snapshot is released with @releaseSnapshot. Note that this will
+ // reference the last commit, so be sure to call commit first if you have pending changes
+ // that you'd like to be included in the snapshot.
+ // This can be used for backup purposes, i.e. after creating the snapshot you can copy
+ // all referenced files to backup storage, and then release the snapshot once complete.
+ // To restore the backup, just copy all the files back and restart the server.
+ // It can also be used for transactional purposes, i.e. if you sometimes need to search a
+ // specific snapshot instead of the current live index. Creating a snapshot is very fast
+ // (does not require any file copying), but over time it will consume extra disk space as
+ // old segments are merged in the index. Be sure to release the snapshot once you're done.
+ // Snapshots survive shutdown and restart of the server. Returns all protected filenames
+ // referenced by this snapshot: these files will not change and will not be deleted until
+ // the snapshot is released. This returns the directories and files referenced by the snapshot.
CreateSnapshot(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*CreateSnapshotResponse, error)
// releases a snapshot previously created with @createSnapshot.
ReleaseSnapshot(ctx context.Context, in *ReleaseSnapshotRequest, opts ...grpc.CallOption) (*ReleaseSnapshotResponse, error)
@@ -8917,30 +8916,26 @@ type LuceneServerClient interface {
State(ctx context.Context, in *StateRequest, opts ...grpc.CallOption) (*StateResponse, error)
// healthcheck
Status(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error)
- //
- //Checks if a node is ready to receive traffic by checking if all the indices (which can be preloaded)
- //are started. Can specify comma-separated list of index name to only check specific indices if needed.
+ // Checks if a node is ready to receive traffic by checking if all the indices (which can be preloaded)
+ // are started. Can specify comma-separated list of index name to only check specific indices if needed.
Ready(ctx context.Context, in *ReadyCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error)
// metrics
Metrics(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*httpbody.HttpBody, error)
// indices
Indices(ctx context.Context, in *IndicesRequest, opts ...grpc.CallOption) (*IndicesResponse, error)
- //
- //Forces merge policy to merge segments until there are <= maxNumSegments. The actual
- //merges to be executed are determined by the MergePolicy. This call will merge those
- //segments present in the index when the call started. If other threads are still
- //adding documents and flushing segments, those newly created segments will not be
- //merged unless you call forceMerge again.
+ // Forces merge policy to merge segments until there are <= maxNumSegments. The actual
+ // merges to be executed are determined by the MergePolicy. This call will merge those
+ // segments present in the index when the call started. If other threads are still
+ // adding documents and flushing segments, those newly created segments will not be
+ // merged unless you call forceMerge again.
ForceMerge(ctx context.Context, in *ForceMergeRequest, opts ...grpc.CallOption) (*ForceMergeResponse, error)
- //
- //Forces merging of all segments that have deleted documents. The actual merges to be
- //executed are determined by the MergePolicy. For example, the default TieredMergePolicy
- //will only pick a segment if the percentage of deleted docs is over 10%.
- //This method first flushes a new segment (if there are indexed documents), and applies
- //all buffered deletes.
+ // Forces merging of all segments that have deleted documents. The actual merges to be
+ // executed are determined by the MergePolicy. For example, the default TieredMergePolicy
+ // will only pick a segment if the percentage of deleted docs is over 10%.
+ // This method first flushes a new segment (if there are indexed documents), and applies
+ // all buffered deletes.
ForceMergeDeletes(ctx context.Context, in *ForceMergeDeletesRequest, opts ...grpc.CallOption) (*ForceMergeDeletesResponse, error)
- //
- //Process request in a plugin which implements CustomRequestPlugin interface.
+ // Process request in a plugin which implements CustomRequestPlugin interface.
Custom(ctx context.Context, in *CustomRequest, opts ...grpc.CallOption) (*CustomResponse, error)
}
@@ -9328,20 +9323,20 @@ type LuceneServerServer interface {
// Change global offline or online settings for this index.
LiveSettingsV2(context.Context, *LiveSettingsV2Request) (*LiveSettingsV2Response, error)
// Registers one or more fields. Fields must be registered before they can be added in a document (via @addDocument).
- //Pass a list of Fields and an indexName. Any number of fields may be registered in a single request,
- //and once a field is registered it cannot be changed (write-once).
- //This returns the full set of fields currently registered.
+ // Pass a list of Fields and an indexName. Any number of fields may be registered in a single request,
+ // and once a field is registered it cannot be changed (write-once).
+ // This returns the full set of fields currently registered.
RegisterFields(context.Context, *FieldDefRequest) (*FieldDefResponse, error)
// Adds one or more fields. Fields must be registered before they can be added in a document (via @addDocument).
- //Pass a list of Fields and an indexName. Any number of fields may be registered in a single request,
- //and once a field is registered it cannot be changed (write-once).
- //This returns the full set of fields currently registered.
+ // Pass a list of Fields and an indexName. Any number of fields may be registered in a single request,
+ // and once a field is registered it cannot be changed (write-once).
+ // This returns the full set of fields currently registered.
UpdateFields(context.Context, *FieldDefRequest) (*FieldDefResponse, error)
// Change global offline settings for this index.
- //This returns the currently set settings; pass no settings changes to retrieve current settings.
+ // This returns the currently set settings; pass no settings changes to retrieve current settings.
Settings(context.Context, *SettingsRequest) (*SettingsResponse, error)
// Change global offline settings for this index.
- //This returns the currently set settings; pass no settings to retrieve current settings.
+ // This returns the currently set settings; pass no settings to retrieve current settings.
SettingsV2(context.Context, *SettingsV2Request) (*SettingsV2Response, error)
// Starts an index
StartIndex(context.Context, *StartIndexRequest) (*StartIndexResponse, error)
@@ -9377,22 +9372,21 @@ type LuceneServerServer interface {
SuggestLookup(context.Context, *SuggestLookupRequest) (*SuggestLookupResponse, error)
// Updates existing suggestions, if the suggester supports near-real-time changes.
UpdateSuggest(context.Context, *BuildSuggestRequest) (*BuildSuggestResponse, error)
- //
- //Creates a snapshot in the index, which is saved point-in-time view of the last commit
- //in the index such that no files referenced by that snapshot will be deleted by ongoing
- //indexing until the snapshot is released with @releaseSnapshot. Note that this will
- //reference the last commit, so be sure to call commit first if you have pending changes
- //that you'd like to be included in the snapshot.
- //This can be used for backup purposes, i.e. after creating the snapshot you can copy
- //all referenced files to backup storage, and then release the snapshot once complete.
- //To restore the backup, just copy all the files back and restart the server.
- //It can also be used for transactional purposes, i.e. if you sometimes need to search a
- //specific snapshot instead of the current live index. Creating a snapshot is very fast
- //(does not require any file copying), but over time it will consume extra disk space as
- //old segments are merged in the index. Be sure to release the snapshot once you're done.
- //Snapshots survive shutdown and restart of the server. Returns all protected filenames
- //referenced by this snapshot: these files will not change and will not be deleted until
- //the snapshot is released. This returns the directories and files referenced by the snapshot.
+ // Creates a snapshot in the index, which is saved point-in-time view of the last commit
+ // in the index such that no files referenced by that snapshot will be deleted by ongoing
+ // indexing until the snapshot is released with @releaseSnapshot. Note that this will
+ // reference the last commit, so be sure to call commit first if you have pending changes
+ // that you'd like to be included in the snapshot.
+ // This can be used for backup purposes, i.e. after creating the snapshot you can copy
+ // all referenced files to backup storage, and then release the snapshot once complete.
+ // To restore the backup, just copy all the files back and restart the server.
+ // It can also be used for transactional purposes, i.e. if you sometimes need to search a
+ // specific snapshot instead of the current live index. Creating a snapshot is very fast
+ // (does not require any file copying), but over time it will consume extra disk space as
+ // old segments are merged in the index. Be sure to release the snapshot once you're done.
+ // Snapshots survive shutdown and restart of the server. Returns all protected filenames
+ // referenced by this snapshot: these files will not change and will not be deleted until
+ // the snapshot is released. This returns the directories and files referenced by the snapshot.
CreateSnapshot(context.Context, *CreateSnapshotRequest) (*CreateSnapshotResponse, error)
// releases a snapshot previously created with @createSnapshot.
ReleaseSnapshot(context.Context, *ReleaseSnapshotRequest) (*ReleaseSnapshotResponse, error)
@@ -9407,30 +9401,26 @@ type LuceneServerServer interface {
State(context.Context, *StateRequest) (*StateResponse, error)
// healthcheck
Status(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error)
- //
- //Checks if a node is ready to receive traffic by checking if all the indices (which can be preloaded)
- //are started. Can specify comma-separated list of index name to only check specific indices if needed.
+ // Checks if a node is ready to receive traffic by checking if all the indices (which can be preloaded)
+ // are started. Can specify comma-separated list of index name to only check specific indices if needed.
Ready(context.Context, *ReadyCheckRequest) (*HealthCheckResponse, error)
// metrics
Metrics(context.Context, *emptypb.Empty) (*httpbody.HttpBody, error)
// indices
Indices(context.Context, *IndicesRequest) (*IndicesResponse, error)
- //
- //Forces merge policy to merge segments until there are <= maxNumSegments. The actual
- //merges to be executed are determined by the MergePolicy. This call will merge those
- //segments present in the index when the call started. If other threads are still
- //adding documents and flushing segments, those newly created segments will not be
- //merged unless you call forceMerge again.
+ // Forces merge policy to merge segments until there are <= maxNumSegments. The actual
+ // merges to be executed are determined by the MergePolicy. This call will merge those
+ // segments present in the index when the call started. If other threads are still
+ // adding documents and flushing segments, those newly created segments will not be
+ // merged unless you call forceMerge again.
ForceMerge(context.Context, *ForceMergeRequest) (*ForceMergeResponse, error)
- //
- //Forces merging of all segments that have deleted documents. The actual merges to be
- //executed are determined by the MergePolicy. For example, the default TieredMergePolicy
- //will only pick a segment if the percentage of deleted docs is over 10%.
- //This method first flushes a new segment (if there are indexed documents), and applies
- //all buffered deletes.
+ // Forces merging of all segments that have deleted documents. The actual merges to be
+ // executed are determined by the MergePolicy. For example, the default TieredMergePolicy
+ // will only pick a segment if the percentage of deleted docs is over 10%.
+ // This method first flushes a new segment (if there are indexed documents), and applies
+ // all buffered deletes.
ForceMergeDeletes(context.Context, *ForceMergeDeletesRequest) (*ForceMergeDeletesResponse, error)
- //
- //Process request in a plugin which implements CustomRequestPlugin interface.
+ // Process request in a plugin which implements CustomRequestPlugin interface.
Custom(context.Context, *CustomRequest) (*CustomResponse, error)
}
@@ -10429,11 +10419,11 @@ type ReplicationServerClient interface {
CopyFiles(ctx context.Context, in *CopyFiles, opts ...grpc.CallOption) (ReplicationServer_CopyFilesClient, error)
// Invoked externally to replica, to notify it that a new NRT point was just created on the primary
NewNRTPoint(ctx context.Context, in *NewNRTPoint, opts ...grpc.CallOption) (*TransferStatus, error)
- //* Invoked externally to primary, to make all recent index operations searchable on the primary and, once copying is done, on the replicas
+ // * Invoked externally to primary, to make all recent index operations searchable on the primary and, once copying is done, on the replicas
WriteNRTPoint(ctx context.Context, in *IndexName, opts ...grpc.CallOption) (*SearcherVersion, error)
- //* Invoked externally to replica, to get the current Searcher version on replica.
+ // * Invoked externally to replica, to get the current Searcher version on replica.
GetCurrentSearcherVersion(ctx context.Context, in *IndexName, opts ...grpc.CallOption) (*SearcherVersion, error)
- //* Invoked externally on primary to find the list of replica nodes this node is connected to for binary replication per index
+ // * Invoked externally on primary to find the list of replica nodes this node is connected to for binary replication per index
GetConnectedNodes(ctx context.Context, in *GetNodesRequest, opts ...grpc.CallOption) (*GetNodesResponse, error)
}
@@ -10643,11 +10633,11 @@ type ReplicationServerServer interface {
CopyFiles(*CopyFiles, ReplicationServer_CopyFilesServer) error
// Invoked externally to replica, to notify it that a new NRT point was just created on the primary
NewNRTPoint(context.Context, *NewNRTPoint) (*TransferStatus, error)
- //* Invoked externally to primary, to make all recent index operations searchable on the primary and, once copying is done, on the replicas
+ // * Invoked externally to primary, to make all recent index operations searchable on the primary and, once copying is done, on the replicas
WriteNRTPoint(context.Context, *IndexName) (*SearcherVersion, error)
- //* Invoked externally to replica, to get the current Searcher version on replica.
+ // * Invoked externally to replica, to get the current Searcher version on replica.
GetCurrentSearcherVersion(context.Context, *IndexName) (*SearcherVersion, error)
- //* Invoked externally on primary to find the list of replica nodes this node is connected to for binary replication per index
+ // * Invoked externally on primary to find the list of replica nodes this node is connected to for binary replication per index
GetConnectedNodes(context.Context, *GetNodesRequest) (*GetNodesResponse, error)
}
diff --git a/grpc-gateway/search.pb.go b/grpc-gateway/search.pb.go
index 792747b32..6008ebcf5 100644
--- a/grpc-gateway/search.pb.go
+++ b/grpc-gateway/search.pb.go
@@ -649,7 +649,7 @@ func (Script_ParamNullValue) EnumDescriptor() ([]byte, []int) {
return file_yelp_nrtsearch_search_proto_rawDescGZIP(), []int{27, 0}
}
-//* How the {TotalHits#value} should be interpreted.
+// * How the {TotalHits#value} should be interpreted.
type TotalHits_Relation int32
const (
@@ -698,7 +698,7 @@ func (TotalHits_Relation) EnumDescriptor() ([]byte, []int) {
return file_yelp_nrtsearch_search_proto_rawDescGZIP(), []int{31, 0}
}
-//Sorting order type
+// Sorting order type
type BucketOrder_OrderType int32
const (
@@ -989,7 +989,7 @@ type PhraseQuery struct {
unknownFields protoimpl.UnknownFields
// Edit distance between respective positions of terms as defined in this PhraseQuery and the positions
- //of terms in a document.
+ // of terms in a document.
Slop int32 `protobuf:"varint,1,opt,name=slop,proto3" json:"slop,omitempty"`
Field string `protobuf:"bytes,2,opt,name=field,proto3" json:"field,omitempty"` // The field in the index that this query applies to.
Terms []string `protobuf:"bytes,3,rep,name=terms,proto3" json:"terms,omitempty"` // Terms to match.
@@ -1228,7 +1228,6 @@ func (x *FunctionFilterQuery) GetScript() *Script {
return nil
}
-//
type NestedQuery struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -1301,6 +1300,7 @@ type TermQuery struct {
// Field in the document to query.
Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"`
// Types that are assignable to TermTypes:
+ //
// *TermQuery_TextValue
// *TermQuery_IntValue
// *TermQuery_LongValue
@@ -1453,6 +1453,7 @@ type TermInSetQuery struct {
// Field in the document to query.
Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"`
// Types that are assignable to TermTypes:
+ //
// *TermInSetQuery_TextTerms_
// *TermInSetQuery_IntTerms_
// *TermInSetQuery_LongTerms_
@@ -2555,6 +2556,7 @@ type Query struct {
QueryType QueryType `protobuf:"varint,1,opt,name=queryType,proto3,enum=luceneserver.QueryType" json:"queryType,omitempty"` // no longer needed, type inferred from set QueryNode
Boost float32 `protobuf:"fixed32,2,opt,name=boost,proto3" json:"boost,omitempty"` // Boost values that are less than one will give less importance to this query compared to other ones while values that are greater than one will give more importance to the scores returned by this query. Boost value of zero will do nothing (default). Boost less than 0 is invalid.
// Types that are assignable to QueryNode:
+ //
// *Query_BooleanQuery
// *Query_PhraseQuery
// *Query_FunctionScoreQuery
@@ -2927,6 +2929,7 @@ type SearchRequest struct {
Query *Query `protobuf:"bytes,8,opt,name=query,proto3" json:"query,omitempty"` // Full query to execute using QueryNodes
QuerySort *QuerySortField `protobuf:"bytes,9,opt,name=querySort,proto3" json:"querySort,omitempty"` //Sort hits by field (default is by relevance).
// Types that are assignable to Searcher:
+ //
// *SearchRequest_IndexGen
// *SearchRequest_Version
// *SearchRequest_Snapshot
@@ -2937,15 +2940,15 @@ type SearchRequest struct {
DisallowPartialResults bool `protobuf:"varint,16,opt,name=disallowPartialResults,proto3" json:"disallowPartialResults,omitempty"` //Should partial result be a failure condition. Applies when a search request times out. If false, the top documents ranking at the point of timeout are used and the request continues. Also, hitTimeout is set to true in the response.
QueryNestedPath string `protobuf:"bytes,17,opt,name=queryNestedPath,proto3" json:"queryNestedPath,omitempty"` //nested path we want to query by if we want to query child documents.
Rescorers []*Rescorer `protobuf:"bytes,18,rep,name=rescorers,proto3" json:"rescorers,omitempty"` // Rescorers which are executed in-order after the first pass
- //If detailed request execution profiling should be included in the response
+ // If detailed request execution profiling should be included in the response
Profile bool `protobuf:"varint,19,opt,name=profile,proto3" json:"profile,omitempty"`
- //Check the search timeout condition after each collection of n documents in a segment. If 0, timeout is only checked on the segment boundary.
+ // Check the search timeout condition after each collection of n documents in a segment. If 0, timeout is only checked on the segment boundary.
TimeoutCheckEvery int32 `protobuf:"varint,20,opt,name=timeoutCheckEvery,proto3" json:"timeoutCheckEvery,omitempty"`
- //Additional document collectors. Provides support for operations such as aggregation.
+ // Additional document collectors. Provides support for operations such as aggregation.
Collectors map[string]*Collector `protobuf:"bytes,21,rep,name=collectors,proto3" json:"collectors,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- //Stop document collection in search phase after this many documents, 0 for unlimited.
+ // Stop document collection in search phase after this many documents, 0 for unlimited.
TerminateAfter int32 `protobuf:"varint,22,opt,name=terminateAfter,proto3" json:"terminateAfter,omitempty"`
- //Set gRPC compression codec to use for response message. If value is unset or invalid, falls back to uncompressed. Valid codecs: identity, gzip, lz4
+ // Set gRPC compression codec to use for response message. If value is unset or invalid, falls back to uncompressed. Valid codecs: identity, gzip, lz4
ResponseCompression string `protobuf:"bytes,23,opt,name=responseCompression,proto3" json:"responseCompression,omitempty"`
// Specify how to highlight matched text
Highlight *Highlight `protobuf:"bytes,24,opt,name=highlight,proto3" json:"highlight,omitempty"`
@@ -3550,8 +3553,8 @@ type SortType struct {
Selector Selector `protobuf:"varint,2,opt,name=selector,proto3,enum=luceneserver.Selector" json:"selector,omitempty"` // For multi valued fields, how to select which value is used for sorting
Origin *Point `protobuf:"bytes,3,opt,name=origin,proto3" json:"origin,omitempty"` // For distance sort, the point that we measure distance from
// Whether missing values should sort last instead of first.
- //Note that this runs \"before\" reverse, so if you sort missing first and reverse=true then missing values will
- //be at the end.
+ // Note that this runs \"before\" reverse, so if you sort missing first and reverse=true then missing values will
+ // be at the end.
MissingLat bool `protobuf:"varint,4,opt,name=missingLat,proto3" json:"missingLat,omitempty"`
// Sort in reverse of the field's natural order
Reverse bool `protobuf:"varint,5,opt,name=reverse,proto3" json:"reverse,omitempty"`
@@ -3630,7 +3633,7 @@ type TotalHits struct {
unknownFields protoimpl.UnknownFields
Relation TotalHits_Relation `protobuf:"varint,1,opt,name=relation,proto3,enum=luceneserver.TotalHits_Relation" json:"relation,omitempty"`
- //* The value of the total hit count. Must be interpreted in the context of * {#relation}.
+ // * The value of the total hit count. Must be interpreted in the context of * {#relation}.
Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
}
@@ -4365,6 +4368,7 @@ type Rescorer struct {
WindowSize int32 `protobuf:"varint,1,opt,name=windowSize,proto3" json:"windowSize,omitempty"`
// Types that are assignable to Rescorers:
+ //
// *Rescorer_QueryRescorer
// *Rescorer_PluginRescorer
Rescorers isRescorer_Rescorers `protobuf_oneof:"Rescorers"`
@@ -4527,20 +4531,21 @@ func (x *ProfileResult) GetDrillDownQuery() string {
return ""
}
-//Definition of additional document collector.
+// Definition of additional document collector.
type Collector struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Types that are assignable to Collectors:
+ //
// *Collector_Terms
// *Collector_PluginCollector
// *Collector_TopHitsCollector
// *Collector_Filter
// *Collector_Max
Collectors isCollector_Collectors `protobuf_oneof:"Collectors"`
- //Nested collectors that define sub-aggregations per bucket, supported by bucket based collectors.
+ // Nested collectors that define sub-aggregations per bucket, supported by bucket based collectors.
NestedCollectors map[string]*Collector `protobuf:"bytes,3,rep,name=nestedCollectors,proto3" json:"nestedCollectors,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
@@ -4630,7 +4635,7 @@ type isCollector_Collectors interface {
}
type Collector_Terms struct {
- //Collector for aggregating based on term values.
+ // Collector for aggregating based on term values.
Terms *TermsCollector `protobuf:"bytes,1,opt,name=terms,proto3,oneof"`
}
@@ -4639,17 +4644,17 @@ type Collector_PluginCollector struct {
}
type Collector_TopHitsCollector struct {
- //Collector for getting top hits based on score or sorting.
+ // Collector for getting top hits based on score or sorting.
TopHitsCollector *TopHitsCollector `protobuf:"bytes,4,opt,name=topHitsCollector,proto3,oneof"`
}
type Collector_Filter struct {
- //Collector that filters documents to nested collectors
+ // Collector that filters documents to nested collectors
Filter *FilterCollector `protobuf:"bytes,5,opt,name=filter,proto3,oneof"`
}
type Collector_Max struct {
- //Collector for finding a max double value from collected documents.
+ // Collector for finding a max double value from collected documents.
Max *MaxCollector `protobuf:"bytes,6,opt,name=max,proto3,oneof"`
}
@@ -4719,19 +4724,20 @@ func (x *PluginCollector) GetParams() *structpb.Struct {
return nil
}
-//Definition of term aggregating collector.
+// Definition of term aggregating collector.
type TermsCollector struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Types that are assignable to TermsSource:
+ //
// *TermsCollector_Field
// *TermsCollector_Script
TermsSource isTermsCollector_TermsSource `protobuf_oneof:"TermsSource"`
- //Maximum number of top terms to return.
+ // Maximum number of top terms to return.
Size int32 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"`
- //How results Buckets should be ordered, defaults to descending Bucket _count.
+ // How results Buckets should be ordered, defaults to descending Bucket _count.
Order *BucketOrder `protobuf:"bytes,4,opt,name=order,proto3" json:"order,omitempty"`
}
@@ -4807,12 +4813,12 @@ type isTermsCollector_TermsSource interface {
}
type TermsCollector_Field struct {
- //Use field values for terms.
+ // Use field values for terms.
Field string `protobuf:"bytes,1,opt,name=field,proto3,oneof"`
}
type TermsCollector_Script struct {
- //Use FacetScript definition to produce terms.
+ // Use FacetScript definition to produce terms.
Script *Script `protobuf:"bytes,2,opt,name=script,proto3,oneof"`
}
@@ -4820,19 +4826,19 @@ func (*TermsCollector_Field) isTermsCollector_TermsSource() {}
func (*TermsCollector_Script) isTermsCollector_TermsSource() {}
-//Definition of top hits based collector.
+// Definition of top hits based collector.
type TopHitsCollector struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- //Offset for retrieval of top hits.
+ // Offset for retrieval of top hits.
StartHit int32 `protobuf:"varint,1,opt,name=startHit,proto3" json:"startHit,omitempty"`
- //Total hits to collect, note that the number of hits returned is (topHits - startHit).
+ // Total hits to collect, note that the number of hits returned is (topHits - startHit).
TopHits int32 `protobuf:"varint,2,opt,name=topHits,proto3" json:"topHits,omitempty"`
- //When specified, collector does sort based collection. Otherwise, relevance score is used.
+ // When specified, collector does sort based collection. Otherwise, relevance score is used.
QuerySort *QuerySortField `protobuf:"bytes,3,opt,name=querySort,proto3" json:"querySort,omitempty"`
- //Which fields to retrieve.
+ // Which fields to retrieve.
RetrieveFields []string `protobuf:"bytes,4,rep,name=retrieveFields,proto3" json:"retrieveFields,omitempty"`
// If Lucene explanation should be included in the collector response
Explain bool `protobuf:"varint,5,opt,name=explain,proto3" json:"explain,omitempty"`
@@ -4905,13 +4911,14 @@ func (x *TopHitsCollector) GetExplain() bool {
return false
}
-//Definition of filtering collector, there must be at least one nested collector specified in the Collector message.
+// Definition of filtering collector, there must be at least one nested collector specified in the Collector message.
type FilterCollector struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Types that are assignable to Filter:
+ //
// *FilterCollector_Query
// *FilterCollector_SetQuery
Filter isFilterCollector_Filter `protobuf_oneof:"Filter"`
@@ -4988,13 +4995,14 @@ func (*FilterCollector_Query) isFilterCollector_Filter() {}
func (*FilterCollector_SetQuery) isFilterCollector_Filter() {}
-//Definition of collector to find a max double value over documents. Currently only allows for script based value production.
+// Definition of collector to find a max double value over documents. Currently only allows for script based value production.
type MaxCollector struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Types that are assignable to ValueSource:
+ //
// *MaxCollector_Script
ValueSource isMaxCollector_ValueSource `protobuf_oneof:"ValueSource"`
}
@@ -5050,7 +5058,7 @@ type isMaxCollector_ValueSource interface {
}
type MaxCollector_Script struct {
- //Script to produce a double value
+ // Script to produce a double value
Script *Script `protobuf:"bytes,1,opt,name=script,proto3,oneof"`
}
@@ -5062,6 +5070,7 @@ type CollectorResult struct {
unknownFields protoimpl.UnknownFields
// Types that are assignable to CollectorResults:
+ //
// *CollectorResult_BucketResult
// *CollectorResult_AnyResult
// *CollectorResult_HitsResult
@@ -5149,27 +5158,27 @@ type isCollectorResult_CollectorResults interface {
}
type CollectorResult_BucketResult struct {
- //Result of collector that produces buckets and counts.
+ // Result of collector that produces buckets and counts.
BucketResult *BucketResult `protobuf:"bytes,1,opt,name=bucketResult,proto3,oneof"`
}
type CollectorResult_AnyResult struct {
- //Flexible collector result for additional document collectors
+ // Flexible collector result for additional document collectors
AnyResult *anypb.Any `protobuf:"bytes,2,opt,name=anyResult,proto3,oneof"`
}
type CollectorResult_HitsResult struct {
- //Result of collector that returns document hits.
+ // Result of collector that returns document hits.
HitsResult *HitsResult `protobuf:"bytes,4,opt,name=hitsResult,proto3,oneof"`
}
type CollectorResult_FilterResult struct {
- //Result of collector that filters documents.
+ // Result of collector that filters documents.
FilterResult *FilterResult `protobuf:"bytes,5,opt,name=filterResult,proto3,oneof"`
}
type CollectorResult_DoubleResult struct {
- //Result of collector that produces a single double value.
+ // Result of collector that produces a single double value.
DoubleResult *wrapperspb.DoubleValue `protobuf:"bytes,6,opt,name=doubleResult,proto3,oneof"`
}
@@ -5183,15 +5192,15 @@ func (*CollectorResult_FilterResult) isCollectorResult_CollectorResults() {}
func (*CollectorResult_DoubleResult) isCollectorResult_CollectorResults() {}
-//Defines how Buckets should be ordered in BucketResult.
+// Defines how Buckets should be ordered in BucketResult.
type BucketOrder struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- //What to use for sorting. This can be _count for Bucket count, or the name of a nested collector that supports ordering.
+ // What to use for sorting. This can be _count for Bucket count, or the name of a nested collector that supports ordering.
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- //Sorting order
+ // Sorting order
Order BucketOrder_OrderType `protobuf:"varint,2,opt,name=order,proto3,enum=luceneserver.BucketOrder_OrderType" json:"order,omitempty"`
}
@@ -5247,9 +5256,9 @@ type BucketResult struct {
unknownFields protoimpl.UnknownFields
Buckets []*BucketResult_Bucket `protobuf:"bytes,1,rep,name=buckets,proto3" json:"buckets,omitempty"`
- //Number of unique buckets, including those not in the buckets list.
+ // Number of unique buckets, including those not in the buckets list.
TotalBuckets int32 `protobuf:"varint,2,opt,name=totalBuckets,proto3" json:"totalBuckets,omitempty"`
- //Number of other collected counts not represented in the buckets' counts.
+ // Number of other collected counts not represented in the buckets' counts.
TotalOtherCounts int32 `protobuf:"varint,3,opt,name=totalOtherCounts,proto3" json:"totalOtherCounts,omitempty"`
}
@@ -5311,9 +5320,9 @@ type HitsResult struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- //Total hit information.
+ // Total hit information.
TotalHits *TotalHits `protobuf:"bytes,3,opt,name=totalHits,proto3" json:"totalHits,omitempty"`
- //Ordered hits with scoring/sorting info and retrieved fields.
+ // Ordered hits with scoring/sorting info and retrieved fields.
Hits []*SearchResponse_Hit `protobuf:"bytes,4,rep,name=hits,proto3" json:"hits,omitempty"`
}
@@ -5368,9 +5377,9 @@ type FilterResult struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- //Number of documents that passed the filter.
+ // Number of documents that passed the filter.
DocCount int32 `protobuf:"varint,1,opt,name=docCount,proto3" json:"docCount,omitempty"`
- //Results from nested collectors.
+ // Results from nested collectors.
NestedCollectorResults map[string]*CollectorResult `protobuf:"bytes,2,rep,name=nestedCollectorResults,proto3" json:"nestedCollectorResults,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
@@ -5829,6 +5838,7 @@ type MultiFunctionScoreQuery_FilterFunction struct {
// Function to produce score, will be 1.0 if none are set
//
// Types that are assignable to Function:
+ //
// *MultiFunctionScoreQuery_FilterFunction_Script
Function isMultiFunctionScoreQuery_FilterFunction_Function `protobuf_oneof:"Function"`
}
@@ -5912,6 +5922,7 @@ type Script_ParamValue struct {
unknownFields protoimpl.UnknownFields
// Types that are assignable to ParamValues:
+ //
// *Script_ParamValue_TextValue
// *Script_ParamValue_BooleanValue
// *Script_ParamValue_IntValue
@@ -6572,6 +6583,7 @@ type SearchResponse_Hit_FieldValue struct {
unknownFields protoimpl.UnknownFields
// Types that are assignable to FieldValues:
+ //
// *SearchResponse_Hit_FieldValue_TextValue
// *SearchResponse_Hit_FieldValue_BooleanValue
// *SearchResponse_Hit_FieldValue_IntValue
@@ -7174,7 +7186,7 @@ type BucketResult_Bucket struct {
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
Count int32 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"`
- //Nested collector results for sub-aggregations of this bucket.
+ // Nested collector results for sub-aggregations of this bucket.
NestedCollectorResults map[string]*CollectorResult `protobuf:"bytes,8,rep,name=nestedCollectorResults,proto3" json:"nestedCollectorResults,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
diff --git a/grpc-gateway/suggest.pb.go b/grpc-gateway/suggest.pb.go
index 06f59d2a5..f7559d20f 100644
--- a/grpc-gateway/suggest.pb.go
+++ b/grpc-gateway/suggest.pb.go
@@ -29,6 +29,7 @@ type BuildSuggestRequest struct {
IndexName string `protobuf:"bytes,1,opt,name=indexName,proto3" json:"indexName,omitempty"` //index name
// Types that are assignable to Suggester:
+ //
// *BuildSuggestRequest_InfixSuggester
// *BuildSuggestRequest_AnalyzingSuggester
// *BuildSuggestRequest_FuzzySuggester
@@ -36,6 +37,7 @@ type BuildSuggestRequest struct {
// *BuildSuggestRequest_FuzzyInfixSuggester
Suggester isBuildSuggestRequest_Suggester `protobuf_oneof:"Suggester"`
// Types that are assignable to Source:
+ //
// *BuildSuggestRequest_LocalSource
// *BuildSuggestRequest_NonLocalSource
Source isBuildSuggestRequest_Source `protobuf_oneof:"Source"`
@@ -409,6 +411,7 @@ type OneSuggestLookupResponse struct {
unknownFields protoimpl.UnknownFields
// Types that are assignable to HighlightKey:
+ //
// *OneSuggestLookupResponse_SuggestLookupHighlight
// *OneSuggestLookupResponse_Key
HighlightKey isOneSuggestLookupResponse_HighlightKey `protobuf_oneof:"HighlightKey"`
@@ -609,8 +612,8 @@ type SuggestLocalSource struct {
unknownFields protoimpl.UnknownFields
// Local file (to the server) to read suggestions + weights from; format is weight U+001F suggestion U+001F payload,
- //one per line, with suggestion UTF-8 encoded. If this option is used then searcher, suggestField,
- //weightField/Expression, payloadField should not be specified.
+ // one per line, with suggestion UTF-8 encoded. If this option is used then searcher, suggestField,
+ // weightField/Expression, payloadField should not be specified.
LocalFile string `protobuf:"bytes,1,opt,name=localFile,proto3" json:"localFile,omitempty"`
HasContexts bool `protobuf:"varint,2,opt,name=hasContexts,proto3" json:"hasContexts,omitempty"` //True if this file provides per-suggestion contexts
HasPayload bool `protobuf:"varint,3,opt,name=hasPayload,proto3" json:"hasPayload,omitempty"` //True if this file provides per-suggestion payload
@@ -684,12 +687,14 @@ type SuggestNonLocalSource struct {
// Specific searcher version to use for pull suggestions to build. There are three different ways to specify a searcher version.
// Types that are assignable to Searcher:
+ //
// *SuggestNonLocalSource_IndexGen
// *SuggestNonLocalSource_Version
// *SuggestNonLocalSource_Snapshot
Searcher isSuggestNonLocalSource_Searcher `protobuf_oneof:"Searcher"`
SuggestField string `protobuf:"bytes,4,opt,name=suggestField,proto3" json:"suggestField,omitempty"` //Field (from stored documents) containing the suggestion text
// Types that are assignable to Weight:
+ //
// *SuggestNonLocalSource_WeightField
// *SuggestNonLocalSource_WeightExpression
Weight isSuggestNonLocalSource_Weight `protobuf_oneof:"Weight"`
diff --git a/src/main/java/com/yelp/nrtsearch/server/backup/ContentDownloader.java b/src/main/java/com/yelp/nrtsearch/server/backup/ContentDownloader.java
index ba355b986..77a427bc1 100644
--- a/src/main/java/com/yelp/nrtsearch/server/backup/ContentDownloader.java
+++ b/src/main/java/com/yelp/nrtsearch/server/backup/ContentDownloader.java
@@ -31,12 +31,18 @@ void getVersionContent(
final String serviceName, final String resource, final String hash, final Path destDirectory)
throws IOException;
- /** @return amazonS3 Client used by this ContentDownloader */
+ /**
+ * @return amazonS3 Client used by this ContentDownloader
+ */
AmazonS3 getS3Client();
- /** @return bucketName used by this ContentDownloader */
+ /**
+ * @return bucketName used by this ContentDownloader
+ */
String getBucketName();
- /** @return boolean to indicate if this ContentDownloader operates in stream mode */
+ /**
+ * @return boolean to indicate if this ContentDownloader operates in stream mode
+ */
boolean downloadAsStream();
}
diff --git a/src/main/java/com/yelp/nrtsearch/server/grpc/LuceneServerClient.java b/src/main/java/com/yelp/nrtsearch/server/grpc/LuceneServerClient.java
index a91804e47..cacd0d026 100644
--- a/src/main/java/com/yelp/nrtsearch/server/grpc/LuceneServerClient.java
+++ b/src/main/java/com/yelp/nrtsearch/server/grpc/LuceneServerClient.java
@@ -490,7 +490,9 @@ public void deleteIndexBackup(
}
public List> facetHierarchyPaths) {
parseFieldWithChildren(documentsContext.getRootDocument(), fieldValues, facetHierarchyPaths);
}
+
/**
* Parse a list of field values for this field and its children. The values will be those present
* in a {@link com.yelp.nrtsearch.server.grpc.AddDocumentRequest.MultiValuedField}.
diff --git a/src/main/java/com/yelp/nrtsearch/server/luceneserver/field/properties/Bindable.java b/src/main/java/com/yelp/nrtsearch/server/luceneserver/field/properties/Bindable.java
index d76358798..b9dcbadf2 100644
--- a/src/main/java/com/yelp/nrtsearch/server/luceneserver/field/properties/Bindable.java
+++ b/src/main/java/com/yelp/nrtsearch/server/luceneserver/field/properties/Bindable.java
@@ -23,6 +23,7 @@
*/
public interface Bindable {
String VALUE_PROPERTY = "value";
+
/**
* Get {@link DoubleValuesSource} to produce values per document when this field is bound into a
* lucene {@link org.apache.lucene.expressions.Expression} script.
diff --git a/src/main/java/com/yelp/nrtsearch/server/luceneserver/highlights/HighlighterService.java b/src/main/java/com/yelp/nrtsearch/server/luceneserver/highlights/HighlighterService.java
index b3066d122..7fa683051 100644
--- a/src/main/java/com/yelp/nrtsearch/server/luceneserver/highlights/HighlighterService.java
+++ b/src/main/java/com/yelp/nrtsearch/server/luceneserver/highlights/HighlighterService.java
@@ -50,6 +50,7 @@ private static void initializeBuiltinHighlighters() {
NRTFastVectorHighlighter nrtFastVectorHighlighter = NRTFastVectorHighlighter.getInstance();
instance.register(nrtFastVectorHighlighter.getName(), nrtFastVectorHighlighter);
}
+
/**
* Initialize singleton instance of {@link HighlighterService}. Registers all builtin highlighter
* and any additional highlighter provided by {@link HighlighterPlugin}s.
diff --git a/src/main/java/com/yelp/nrtsearch/server/luceneserver/index/BackendStateManager.java b/src/main/java/com/yelp/nrtsearch/server/luceneserver/index/BackendStateManager.java
index 8450ab250..48bd6771b 100644
--- a/src/main/java/com/yelp/nrtsearch/server/luceneserver/index/BackendStateManager.java
+++ b/src/main/java/com/yelp/nrtsearch/server/luceneserver/index/BackendStateManager.java
@@ -208,9 +208,7 @@ public synchronized void start(
logger.info("Doing initial commit for index: " + indexName);
currentState.commit(globalState.getConfiguration().getBackupWithInArchiver());
IndexStateInfo updatedStateInfo =
- currentState
- .getCurrentStateInfo()
- .toBuilder()
+ currentState.getCurrentStateInfo().toBuilder()
.setCommitted(true)
.setGen(currentState.getCurrentStateInfo().getGen() + 1)
.build();
@@ -259,8 +257,7 @@ private static IndexStateInfo mergeSettings(
IndexStateInfo currentStateInfo, IndexSettings settings) {
IndexSettings mergedSettings =
ImmutableIndexState.mergeSettings(currentStateInfo.getSettings(), settings);
- return currentStateInfo
- .toBuilder()
+ return currentStateInfo.toBuilder()
.setSettings(mergedSettings)
.setGen(currentStateInfo.getGen() + 1)
.build();
@@ -270,8 +267,7 @@ private static IndexStateInfo mergeLiveSettings(
IndexStateInfo currentStateInfo, IndexLiveSettings liveSettings) {
IndexLiveSettings mergedLiveSettings =
ImmutableIndexState.mergeLiveSettings(currentStateInfo.getLiveSettings(), liveSettings);
- return currentStateInfo
- .toBuilder()
+ return currentStateInfo.toBuilder()
.setLiveSettings(mergedLiveSettings)
.setGen(currentStateInfo.getGen() + 1)
.build();
diff --git a/src/main/java/com/yelp/nrtsearch/server/luceneserver/search/collectors/HitCountCollector.java b/src/main/java/com/yelp/nrtsearch/server/luceneserver/search/collectors/HitCountCollector.java
index e72e77267..1ecf10266 100644
--- a/src/main/java/com/yelp/nrtsearch/server/luceneserver/search/collectors/HitCountCollector.java
+++ b/src/main/java/com/yelp/nrtsearch/server/luceneserver/search/collectors/HitCountCollector.java
@@ -35,6 +35,7 @@
*/
public class HitCountCollector extends DocCollector {
private final HitCountCollectorManager manager;
+
/**
* Constructor
*
diff --git a/src/main/java/com/yelp/nrtsearch/server/luceneserver/search/collectors/additional/ScriptTermsCollectorManager.java b/src/main/java/com/yelp/nrtsearch/server/luceneserver/search/collectors/additional/ScriptTermsCollectorManager.java
index 8afbeae54..4daa6f0e6 100644
--- a/src/main/java/com/yelp/nrtsearch/server/luceneserver/search/collectors/additional/ScriptTermsCollectorManager.java
+++ b/src/main/java/com/yelp/nrtsearch/server/luceneserver/search/collectors/additional/ScriptTermsCollectorManager.java
@@ -46,6 +46,7 @@
/** Collector manager that aggregates terms from a {@link FacetScript} into buckets. */
public class ScriptTermsCollectorManager extends TermsCollectorManager {
private final FacetScript.SegmentFactory scriptFactory;
+
/**
* Constructor.
*
diff --git a/src/main/java/com/yelp/nrtsearch/server/luceneserver/search/collectors/additional/TermsCollectorManager.java b/src/main/java/com/yelp/nrtsearch/server/luceneserver/search/collectors/additional/TermsCollectorManager.java
index b9cf0b5ff..0b003bb28 100644
--- a/src/main/java/com/yelp/nrtsearch/server/luceneserver/search/collectors/additional/TermsCollectorManager.java
+++ b/src/main/java/com/yelp/nrtsearch/server/luceneserver/search/collectors/additional/TermsCollectorManager.java
@@ -221,6 +221,7 @@ public void setSearchContext(SearchContext searchContext) {
nestedCollectorManagers.setSearchContext(searchContext);
}
}
+
/** Get max number of buckets to return */
public int getSize() {
return size;
diff --git a/src/main/java/com/yelp/nrtsearch/server/luceneserver/state/BackendGlobalState.java b/src/main/java/com/yelp/nrtsearch/server/luceneserver/state/BackendGlobalState.java
index f06bd99d2..209955932 100644
--- a/src/main/java/com/yelp/nrtsearch/server/luceneserver/state/BackendGlobalState.java
+++ b/src/main/java/com/yelp/nrtsearch/server/luceneserver/state/BackendGlobalState.java
@@ -61,6 +61,7 @@ public class BackendGlobalState extends GlobalState {
private static final Logger logger = LoggerFactory.getLogger(BackendGlobalState.class);
private int resolvedReplicationPort;
+
/**
* State class containing immutable persistent and ephemeral global state, stored together so that
* they can be updated atomically.
@@ -290,9 +291,7 @@ public synchronized IndexState createIndex(CreateIndexRequest createIndexRequest
}
GlobalStateInfo updatedState =
- immutableState
- .globalStateInfo
- .toBuilder()
+ immutableState.globalStateInfo.toBuilder()
.putIndices(indexName, newIndexState)
.setGen(immutableState.globalStateInfo.getGen() + 1)
.build();
@@ -328,9 +327,7 @@ public IndexStateManager getIndexStateManager(String name) throws IOException {
@Override
public synchronized void deleteIndex(String name) throws IOException {
GlobalStateInfo updatedState =
- immutableState
- .globalStateInfo
- .toBuilder()
+ immutableState.globalStateInfo.toBuilder()
.removeIndices(name)
.setGen(immutableState.globalStateInfo.getGen() + 1)
.build();
@@ -372,12 +369,9 @@ && getConfiguration()
.getResourceName()
.equals(startIndexRequest.getIndexName())) {
request =
- startIndexRequest
- .toBuilder()
+ startIndexRequest.toBuilder()
.setRestore(
- startIndexRequest
- .getRestore()
- .toBuilder()
+ startIndexRequest.getRestore().toBuilder()
.setResourceName(
getUniqueIndexName(
startIndexRequest.getIndexName(), indexGlobalState.getId()))
@@ -392,9 +386,7 @@ && getConfiguration()
if (startIndexRequest.getMode() != Mode.REPLICA && !indexGlobalState.getStarted()) {
IndexGlobalState updatedIndexState = indexGlobalState.toBuilder().setStarted(true).build();
GlobalStateInfo updatedGlobalState =
- immutableState
- .globalStateInfo
- .toBuilder()
+ immutableState.globalStateInfo.toBuilder()
.putIndices(startIndexRequest.getIndexName(), updatedIndexState)
.setGen(immutableState.globalStateInfo.getGen() + 1)
.build();
@@ -419,9 +411,7 @@ public synchronized StartIndexResponse startIndexV2(StartIndexV2Request startInd
if (getConfiguration().getIndexStartConfig().getMode() != Mode.REPLICA) {
GlobalStateInfo updatedGlobalState =
- immutableState
- .globalStateInfo
- .toBuilder()
+ immutableState.globalStateInfo.toBuilder()
.putIndices(startIndexRequest.getIndexName(), updatedIndexGlobalState)
.setGen(immutableState.globalStateInfo.getGen() + 1)
.build();
@@ -471,9 +461,7 @@ public synchronized DummyResponse stopIndex(StopIndexRequest stopIndexRequest)
.build();
GlobalStateInfo updatedState =
- immutableState
- .globalStateInfo
- .toBuilder()
+ immutableState.globalStateInfo.toBuilder()
.putIndices(stopIndexRequest.getIndexName(), updatedIndexState)
.setGen(immutableState.globalStateInfo.getGen() + 1)
.build();
diff --git a/src/main/java/com/yelp/nrtsearch/server/luceneserver/warming/ReservoirSampler.java b/src/main/java/com/yelp/nrtsearch/server/luceneserver/warming/ReservoirSampler.java
index 10b7af614..8247ddd53 100644
--- a/src/main/java/com/yelp/nrtsearch/server/luceneserver/warming/ReservoirSampler.java
+++ b/src/main/java/com/yelp/nrtsearch/server/luceneserver/warming/ReservoirSampler.java
@@ -29,7 +29,9 @@ public class ReservoirSampler {
private final long maxQueries;
private final AtomicLong numQueries;
- /** @param maxQueries Maximum number of warming queries. */
+ /**
+ * @param maxQueries Maximum number of warming queries.
+ */
public ReservoirSampler(long maxQueries) {
this.maxQueries = maxQueries;
this.numQueries = new AtomicLong(0);
diff --git a/src/main/java/com/yelp/nrtsearch/server/plugins/MetricsPlugin.java b/src/main/java/com/yelp/nrtsearch/server/plugins/MetricsPlugin.java
index e924d32a5..456126271 100644
--- a/src/main/java/com/yelp/nrtsearch/server/plugins/MetricsPlugin.java
+++ b/src/main/java/com/yelp/nrtsearch/server/plugins/MetricsPlugin.java
@@ -23,6 +23,8 @@
*/
public interface MetricsPlugin {
- /** @param collectorRegistry Nrtsearch Prometheus collector registry. */
+ /**
+ * @param collectorRegistry Nrtsearch Prometheus collector registry.
+ */
void registerMetrics(CollectorRegistry collectorRegistry);
}
diff --git a/src/main/java/org/apache/lucene/search/suggest/document/Completion90PostingsFormat.java b/src/main/java/org/apache/lucene/search/suggest/document/Completion90PostingsFormat.java
index 76bc144c2..7da2411e9 100644
--- a/src/main/java/org/apache/lucene/search/suggest/document/Completion90PostingsFormat.java
+++ b/src/main/java/org/apache/lucene/search/suggest/document/Completion90PostingsFormat.java
@@ -26,6 +26,7 @@
*/
public class Completion90PostingsFormat extends CompletionPostingsFormat {
private static final Logger logger = LoggerFactory.getLogger(Completion90PostingsFormat.class);
+
/**
* Creates a {@link Completion90PostingsFormat} that will load the completion FST based on the
* value present in {@link CompletionPostingsFormatUtil}.
diff --git a/src/main/java/org/apache/lucene/search/suggest/document/MyContextQuery.java b/src/main/java/org/apache/lucene/search/suggest/document/MyContextQuery.java
index e003263c8..4e9cf6629 100644
--- a/src/main/java/org/apache/lucene/search/suggest/document/MyContextQuery.java
+++ b/src/main/java/org/apache/lucene/search/suggest/document/MyContextQuery.java
@@ -51,6 +51,7 @@ public class MyContextQuery extends ContextQuery {
private IntsRefBuilder scratch = new IntsRefBuilder();
private Map