improve storm selector consistency

by filtering puzzles before sampling
storm
Thibault Duplessis 2021-01-26 11:35:33 +01:00
parent 1e272929b5
commit b5a4d350e5
2 changed files with 51 additions and 52 deletions

View File

@ -78,7 +78,6 @@ final class StormHighApi(coll: Coll, cacheApi: CacheApi)(implicit ctx: Execution
) -> Nil ) -> Nil
} }
.map2 { doc => .map2 { doc =>
println(lila.db.BSON.debug(doc))
def readScore(doc: Bdoc, field: String) = def readScore(doc: Bdoc, field: String) =
~doc.getAsOpt[List[Bdoc]](field).flatMap(_.headOption).flatMap(_.getAsOpt[Int]("score")) ~doc.getAsOpt[List[Bdoc]](field).flatMap(_.headOption).flatMap(_.getAsOpt[Int]("score"))
StormHigh( StormHigh(

View File

@ -22,9 +22,11 @@ final class StormSelector(colls: PuzzleColls, cacheApi: CacheApi)(implicit ec: E
private val poolSize = 130 private val poolSize = 130
private val theme = lila.puzzle.PuzzleTheme.mix.key.value private val theme = lila.puzzle.PuzzleTheme.mix.key.value
private val tier = lila.puzzle.PuzzleTier.Good.key private val tier = lila.puzzle.PuzzleTier.Good.key
private val maxDeviation = 90
private val ratings = (1000 to 2800 by 150).toList private val ratings = (1000 to 2800 by 150).toList
private val ratingBuckets = ratings.size private val ratingBuckets = ratings.size
private val puzzlesPerBucket = poolSize / ratingBuckets
private val current = cacheApi.unit[List[StormPuzzle]] { private val current = cacheApi.unit[List[StormPuzzle]] {
_.refreshAfterWrite(6 seconds) _.refreshAfterWrite(6 seconds)
@ -39,34 +41,26 @@ final class StormSelector(colls: PuzzleColls, cacheApi: CacheApi)(implicit ec: E
Match( Match(
$doc( $doc(
"min" $lte f"${theme}_${tier}_${rating}%04d", "min" $lte f"${theme}_${tier}_${rating}%04d",
"max" $gt f"${theme}_${tier}_${rating}%04d" "max" $gte f"${theme}_${tier}_${rating}%04d"
) )
), ),
Project($doc("_id" -> false, "ids" -> true)), Project($doc("_id" -> false, "ids" -> true)),
Sample(1), Sample(1),
UnwindField("ids"), UnwindField("ids"),
Sample((poolSize * 5) / ratingBuckets), Sample(puzzlesPerBucket * 6), // ensure we have enough after filtering deviation & color
Group(BSONNull)("ids" -> PushField("ids"))
)
}
) -> List(
Project($doc("all" -> $doc("$setUnion" -> ratings.map(r => s"$$$r")))),
UnwindField("all"),
UnwindField("all.ids"),
Project($doc("id" -> "$all.ids")),
PipelineOperator( PipelineOperator(
$doc( $doc(
"$lookup" -> $doc( "$lookup" -> $doc(
"from" -> colls.puzzle.name.value, "from" -> colls.puzzle.name.value,
"as" -> "puzzle", "as" -> "puzzle",
"let" -> $doc("id" -> "$id"), "let" -> $doc("id" -> "$ids"),
"pipeline" -> $arr( "pipeline" -> $arr(
$doc( $doc(
"$match" -> $doc( "$match" -> $doc(
"$expr" -> $doc( "$expr" -> $doc(
"$and" -> $arr( "$and" -> $arr(
$doc("$eq" -> $arr("$_id", "$$id")), $doc("$eq" -> $arr("$_id", "$$id")),
$doc("$lt" -> $arr("$glicko.d", 90)), $doc("$lte" -> $arr("$glicko.d", maxDeviation)),
$doc( $doc(
"$regexMatch" -> $doc( "$regexMatch" -> $doc(
"input" -> "$fen", "input" -> "$fen",
@ -91,12 +85,18 @@ final class StormSelector(colls: PuzzleColls, cacheApi: CacheApi)(implicit ec: E
) )
), ),
UnwindField("puzzle"), UnwindField("puzzle"),
ReplaceRootField("puzzle"), Sample(puzzlesPerBucket),
Sample(poolSize), ReplaceRootField("puzzle")
)
}
) -> List(
Project($doc("all" -> $doc("$setUnion" -> ratings.map(r => s"$$$r")))),
UnwindField("all"),
ReplaceRootField("all"),
Sort(Ascending("rating")) Sort(Ascending("rating"))
) )
}.map { docs => }.map {
docs.flatMap(StormPuzzleBSONReader.readOpt) _.flatMap(StormPuzzleBSONReader.readOpt)
} }
} }
.mon(_.storm.selector.time) .mon(_.storm.selector.time)